Use rb_execution_context_t instead of rb_thread_t

to represent execution context [Feature #14038]

* vm_core.h (rb_thread_t): rb_thread_t::ec is now a pointer.
  There are many code using `th` to represent execution context
  (such as cfp, VM stack and so on). To access `ec`, they need to
  use `th->ec->...` (adding one indirection) so that we need to
  replace them by passing `ec` instead of `th`.

* vm_core.h (GET_EC()): introduced to access current ec. Also
  remove `ruby_current_thread` global variable.

* cont.c (rb_context_t): introduce rb_context_t::thread_ptr instead of
  rb_context_t::thread_value.

* cont.c (ec_set_vm_stack): added to update vm_stack explicitly.

* cont.c (ec_switch): added to switch ec explicitly.

* cont.c (rb_fiber_close): added to terminate fibers explicitly.


git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@60440 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
ko1 2017-10-26 08:32:49 +00:00
parent 07f04f468d
commit 837fd5e494
32 changed files with 743 additions and 576 deletions

View File

@ -1119,8 +1119,8 @@ define rb_ps_thread
set $ps_thread_th = (rb_thread_t*)$ps_thread->data set $ps_thread_th = (rb_thread_t*)$ps_thread->data
printf "* #<Thread:%p rb_thread_t:%p native_thread:%p>\n", \ printf "* #<Thread:%p rb_thread_t:%p native_thread:%p>\n", \
$ps_thread, $ps_thread_th, $ps_thread_th->thread_id $ps_thread, $ps_thread_th, $ps_thread_th->thread_id
set $cfp = $ps_thread_th->ec.cfp set $cfp = $ps_thread_th->ec->cfp
set $cfpend = (rb_control_frame_t *)($ps_thread_th->ec.vm_stack + $ps_thread_th->ec.vm_stack_size)-1 set $cfpend = (rb_control_frame_t *)($ps_thread_th->ec->vm_stack + $ps_thread_th->ec.vm_stack_size)-1
while $cfp < $cfpend while $cfp < $cfpend
if $cfp->iseq if $cfp->iseq
if !((VALUE)$cfp->iseq & RUBY_IMMEDIATE_MASK) && (((imemo_ifunc << RUBY_FL_USHIFT) | RUBY_T_IMEMO)==$cfp->iseq->flags & ((RUBY_IMEMO_MASK << RUBY_FL_USHIFT) | RUBY_T_MASK)) if !((VALUE)$cfp->iseq & RUBY_IMMEDIATE_MASK) && (((imemo_ifunc << RUBY_FL_USHIFT) | RUBY_T_IMEMO)==$cfp->iseq->flags & ((RUBY_IMEMO_MASK << RUBY_FL_USHIFT) | RUBY_T_MASK))

View File

@ -7604,7 +7604,7 @@ caller_location(VALUE *path, VALUE *realpath)
{ {
const rb_thread_t *const th = GET_THREAD(); const rb_thread_t *const th = GET_THREAD();
const rb_control_frame_t *const cfp = const rb_control_frame_t *const cfp =
rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); rb_vm_get_ruby_level_next_cfp(th, th->ec->cfp);
if (cfp) { if (cfp) {
int line = rb_vm_get_sourceline(cfp); int line = rb_vm_get_sourceline(cfp);

375
cont.c
View File

@ -83,8 +83,8 @@ enum context_type {
struct cont_saved_vm_stack { struct cont_saved_vm_stack {
VALUE *ptr; VALUE *ptr;
#ifdef CAPTURE_JUST_VALID_VM_STACK #ifdef CAPTURE_JUST_VALID_VM_STACK
size_t slen; /* length of stack (head of th->ec.vm_stack) */ size_t slen; /* length of stack (head of th->ec->vm_stack) */
size_t clen; /* length of control frames (tail of th->ec.vm_stack) */ size_t clen; /* length of control frames (tail of th->ec->vm_stack) */
#endif #endif
}; };
@ -110,7 +110,7 @@ typedef struct rb_context_struct {
rb_jmpbuf_t jmpbuf; rb_jmpbuf_t jmpbuf;
rb_ensure_entry_t *ensure_array; rb_ensure_entry_t *ensure_array;
rb_ensure_list_t *ensure_list; rb_ensure_list_t *ensure_list;
VALUE thread_value; rb_thread_t *thread_ptr;
} rb_context_t; } rb_context_t;
@ -189,15 +189,62 @@ fiber_status_name(enum fiber_status s)
return NULL; return NULL;
} }
static void
fiber_verify(const rb_fiber_t *fib)
{
#if VM_CHECK_MODE > 0
VM_ASSERT(fib->cont.saved_ec.fiber == fib);
switch (fib->status) {
case FIBER_RESUMED:
VM_ASSERT(fib->cont.saved_ec.vm_stack != NULL);
break;
case FIBER_SUSPENDED:
VM_ASSERT(fib->cont.saved_ec.vm_stack != NULL);
break;
case FIBER_CREATED:
case FIBER_TERMINATED:
/* TODO */
break;
default:
VM_UNREACHABLE(fiber_verify);
}
#endif
}
#if VM_CHECK_MODE > 0
void
rb_ec_verify(const rb_execution_context_t *ec)
{
/* TODO */
}
#endif
static void static void
fiber_status_set(const rb_fiber_t *fib, enum fiber_status s) fiber_status_set(const rb_fiber_t *fib, enum fiber_status s)
{ {
if (0) fprintf(stderr, "fib: %p, status: %s -> %s\n", fib, fiber_status_name(fib->status), fiber_status_name(s)); if (0) fprintf(stderr, "fib: %p, status: %s -> %s\n", fib, fiber_status_name(fib->status), fiber_status_name(s));
VM_ASSERT(!FIBER_TERMINATED_P(fib)); VM_ASSERT(!FIBER_TERMINATED_P(fib));
VM_ASSERT(fib->status != s); VM_ASSERT(fib->status != s);
fiber_verify(fib);
*((enum fiber_status *)&fib->status) = s; *((enum fiber_status *)&fib->status) = s;
} }
void
ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
{
*(VALUE **)(&ec->vm_stack) = stack;
*(size_t *)(&ec->vm_stack_size) = size;
}
static inline void
ec_switch(rb_thread_t *th, rb_fiber_t *fib)
{
rb_execution_context_t *ec = &fib->cont.saved_ec;
ruby_current_execution_context_ptr = th->ec = ec;
VM_ASSERT(ec->fiber->cont.self == 0 || ec->vm_stack != NULL);
}
static const rb_data_type_t cont_data_type, fiber_data_type; static const rb_data_type_t cont_data_type, fiber_data_type;
static VALUE rb_cContinuation; static VALUE rb_cContinuation;
static VALUE rb_cFiber; static VALUE rb_cFiber;
@ -214,13 +261,13 @@ static VALUE rb_eFiberError;
NOINLINE(static VALUE cont_capture(volatile int *volatile stat)); NOINLINE(static VALUE cont_capture(volatile int *volatile stat));
#define THREAD_MUST_BE_RUNNING(th) do { \ #define THREAD_MUST_BE_RUNNING(th) do { \
if (!(th)->ec.tag) rb_raise(rb_eThreadError, "not running thread"); \ if (!(th)->ec->tag) rb_raise(rb_eThreadError, "not running thread"); \
} while (0) } while (0)
static VALUE static VALUE
cont_thread_value(const rb_context_t *cont) cont_thread_value(const rb_context_t *cont)
{ {
return cont->thread_value; return cont->thread_ptr->self;
} }
static void static void
@ -252,10 +299,9 @@ cont_mark(void *ptr)
} }
else { else {
/* fiber */ /* fiber */
const rb_thread_t *th = rb_thread_ptr(cont_thread_value(cont));
const rb_fiber_t *fib = (rb_fiber_t*)cont; const rb_fiber_t *fib = (rb_fiber_t*)cont;
if ((th->ec.fiber != fib) && !FIBER_TERMINATED_P(fib)) { if (!FIBER_TERMINATED_P(fib)) {
rb_gc_mark_locations(cont->machine.stack, rb_gc_mark_locations(cont->machine.stack,
cont->machine.stack + cont->machine.stack_size); cont->machine.stack + cont->machine.stack_size);
} }
@ -277,7 +323,8 @@ cont_free(void *ptr)
rb_context_t *cont = ptr; rb_context_t *cont = ptr;
RUBY_FREE_ENTER("cont"); RUBY_FREE_ENTER("cont");
RUBY_FREE_UNLESS_NULL(cont->saved_ec.vm_stack); ruby_xfree(cont->saved_ec.vm_stack);
#if FIBER_USE_NATIVE #if FIBER_USE_NATIVE
if (cont->type == CONTINUATION_CONTEXT) { if (cont->type == CONTINUATION_CONTEXT) {
/* cont */ /* cont */
@ -287,22 +334,19 @@ cont_free(void *ptr)
else { else {
/* fiber */ /* fiber */
const rb_fiber_t *fib = (rb_fiber_t*)cont; const rb_fiber_t *fib = (rb_fiber_t*)cont;
const rb_thread_t *const th = GET_THREAD();
#ifdef _WIN32 #ifdef _WIN32
if (th && th->ec.fiber != fib && cont->type != ROOT_FIBER_CONTEXT) { if (cont->type != ROOT_FIBER_CONTEXT) {
/* don't delete root fiber handle */ /* don't delete root fiber handle */
if (fib->fib_handle) { if (fib->fib_handle) {
DeleteFiber(fib->fib_handle); DeleteFiber(fib->fib_handle);
} }
} }
#else /* not WIN32 */ #else /* not WIN32 */
if (th && th->ec.fiber != fib) { if (fib->ss_sp != NULL) {
if (fib->ss_sp) { if (cont->type == ROOT_FIBER_CONTEXT) {
if (cont->type == ROOT_FIBER_CONTEXT) { rb_bug("Illegal root fiber parameter");
rb_bug("Illegal root fiber parameter");
}
munmap((void*)fib->ss_sp, fib->ss_size);
} }
munmap((void*)fib->ss_sp, fib->ss_size);
} }
else { else {
/* It may reached here when finalize */ /* It may reached here when finalize */
@ -352,32 +396,21 @@ cont_memsize(const void *ptr)
return size; return size;
} }
static void rb_thread_t *
fiber_verify(const rb_fiber_t *fib) rb_fiberptr_thread_ptr(const rb_fiber_t *fib)
{ {
#if VM_CHECK_MODE > 0 return fib->cont.thread_ptr;
switch (fib->status) {
case FIBER_RESUMED:
VM_ASSERT(fib->cont.saved_ec.vm_stack == NULL);
break;
case FIBER_SUSPENDED:
VM_ASSERT(fib->cont.saved_ec.vm_stack != NULL);
break;
case FIBER_CREATED:
case FIBER_TERMINATED:
/* TODO */
break;
default:
VM_UNREACHABLE(fiber_verify);
}
#endif
} }
void void
rb_fiber_mark_self(const rb_fiber_t *fib) rb_fiber_mark_self(const rb_fiber_t *fib)
{ {
if (fib) if (fib->cont.self) {
rb_gc_mark(fib->cont.self); rb_gc_mark(fib->cont.self);
}
else {
rb_execution_context_mark(&fib->cont.saved_ec);
}
} }
static void static void
@ -387,7 +420,17 @@ fiber_mark(void *ptr)
RUBY_MARK_ENTER("cont"); RUBY_MARK_ENTER("cont");
fiber_verify(fib); fiber_verify(fib);
rb_gc_mark(fib->first_proc); rb_gc_mark(fib->first_proc);
rb_fiber_mark_self(fib->prev); if (fib->prev) rb_fiber_mark_self(fib->prev);
#if !FIBER_USE_NATIVE
if (fib->status == FIBER_TERMINATED) {
/* FIBER_TERMINATED fiber should not mark machine stack */
if (fib->cont.saved_ec.machine.stack_end != NULL) {
fib->cont.saved_ec.machine.stack_end = NULL;
}
}
#endif
cont_mark(&fib->cont); cont_mark(&fib->cont);
RUBY_MARK_LEAVE("cont"); RUBY_MARK_LEAVE("cont");
} }
@ -397,8 +440,7 @@ fiber_free(void *ptr)
{ {
rb_fiber_t *fib = ptr; rb_fiber_t *fib = ptr;
RUBY_FREE_ENTER("fiber"); RUBY_FREE_ENTER("fiber");
if (fib->cont.type != ROOT_FIBER_CONTEXT && if (fib->cont.saved_ec.local_storage) {
fib->cont.saved_ec.local_storage) {
st_free_table(fib->cont.saved_ec.local_storage); st_free_table(fib->cont.saved_ec.local_storage);
} }
@ -437,18 +479,18 @@ cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
{ {
size_t size; size_t size;
SET_MACHINE_STACK_END(&th->ec.machine.stack_end); SET_MACHINE_STACK_END(&th->ec->machine.stack_end);
#ifdef __ia64 #ifdef __ia64
th->machine.register_stack_end = rb_ia64_bsp(); th->machine.register_stack_end = rb_ia64_bsp();
#endif #endif
if (th->ec.machine.stack_start > th->ec.machine.stack_end) { if (th->ec->machine.stack_start > th->ec->machine.stack_end) {
size = cont->machine.stack_size = th->ec.machine.stack_start - th->ec.machine.stack_end; size = cont->machine.stack_size = th->ec->machine.stack_start - th->ec->machine.stack_end;
cont->machine.stack_src = th->ec.machine.stack_end; cont->machine.stack_src = th->ec->machine.stack_end;
} }
else { else {
size = cont->machine.stack_size = th->ec.machine.stack_end - th->ec.machine.stack_start; size = cont->machine.stack_size = th->ec->machine.stack_end - th->ec->machine.stack_start;
cont->machine.stack_src = th->ec.machine.stack_start; cont->machine.stack_src = th->ec->machine.stack_start;
} }
if (cont->machine.stack) { if (cont->machine.stack) {
@ -490,7 +532,7 @@ cont_save_thread(rb_context_t *cont, rb_thread_t *th)
VM_ASSERT(th->status == THREAD_RUNNABLE); VM_ASSERT(th->status == THREAD_RUNNABLE);
/* save thread context */ /* save thread context */
*sec = th->ec; *sec = *th->ec;
/* saved_thread->machine.stack_end should be NULL */ /* saved_thread->machine.stack_end should be NULL */
/* because it may happen GC afterward */ /* because it may happen GC afterward */
@ -507,7 +549,7 @@ cont_init(rb_context_t *cont, rb_thread_t *th)
{ {
/* save thread context */ /* save thread context */
cont_save_thread(cont, th); cont_save_thread(cont, th);
cont->thread_value = th->self; cont->thread_ptr = th;
cont->saved_ec.local_storage = NULL; cont->saved_ec.local_storage = NULL;
cont->saved_ec.local_storage_recursive_hash = Qnil; cont->saved_ec.local_storage_recursive_hash = Qnil;
cont->saved_ec.local_storage_recursive_hash_for_trace = Qnil; cont->saved_ec.local_storage_recursive_hash_for_trace = Qnil;
@ -527,13 +569,40 @@ cont_new(VALUE klass)
return cont; return cont;
} }
#if 0
void
show_vm_stack(const rb_execution_context_t *ec)
{
VALUE *p = ec->vm_stack;
while (p < ec->cfp->sp) {
fprintf(stderr, "%3d ", (int)(p - ec->vm_stack));
rb_obj_info_dump(*p);
p++;
}
}
void
show_vm_pcs(const rb_control_frame_t *cfp,
const rb_control_frame_t *end_of_cfp)
{
int i=0;
while (cfp != end_of_cfp) {
int pc = 0;
if (cfp->iseq) {
pc = cfp->pc - cfp->iseq->body->iseq_encoded;
}
fprintf(stderr, "%2d pc: %d\n", i++, pc);
cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
}
}
#endif
static VALUE static VALUE
cont_capture(volatile int *volatile stat) cont_capture(volatile int *volatile stat)
{ {
rb_context_t *volatile cont; rb_context_t *volatile cont;
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
volatile VALUE contval; volatile VALUE contval;
rb_execution_context_t *ec = &th->ec; const rb_execution_context_t *ec = th->ec;
THREAD_MUST_BE_RUNNING(th); THREAD_MUST_BE_RUNNING(th);
rb_vm_stack_to_heap(th); rb_vm_stack_to_heap(th);
@ -544,15 +613,18 @@ cont_capture(volatile int *volatile stat)
cont->saved_vm_stack.slen = ec->cfp->sp - ec->vm_stack; cont->saved_vm_stack.slen = ec->cfp->sp - ec->vm_stack;
cont->saved_vm_stack.clen = ec->vm_stack + ec->vm_stack_size - (VALUE*)ec->cfp; cont->saved_vm_stack.clen = ec->vm_stack + ec->vm_stack_size - (VALUE*)ec->cfp;
cont->saved_vm_stack.ptr = ALLOC_N(VALUE, cont->saved_vm_stack.slen + cont->saved_vm_stack.clen); cont->saved_vm_stack.ptr = ALLOC_N(VALUE, cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
MEMCPY(cont->saved_vm_stack.ptr, ec->vm_stack, VALUE, cont->saved_vm_stack.slen); MEMCPY(cont->saved_vm_stack.ptr,
ec->vm_stack,
VALUE, cont->saved_vm_stack.slen);
MEMCPY(cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen, MEMCPY(cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen,
(VALUE*)ec->cfp, VALUE, cont->saved_vm_stack.clen); (VALUE*)ec->cfp,
VALUE,
cont->saved_vm_stack.clen);
#else #else
cont->saved_vm_stack.ptr = ALLOC_N(VALUE, ec->vm_stack_size); cont->saved_vm_stack.ptr = ALLOC_N(VALUE, ec->vm_stack_size);
MEMCPY(cont->saved_vm_stack.ptr, ec->vm_stack, VALUE, ec->vm_stack_size); MEMCPY(cont->saved_vm_stack.ptr, ec->vm_stack, VALUE, ec->vm_stack_size);
#endif #endif
cont->saved_ec.vm_stack = NULL; ec_set_vm_stack(&cont->saved_ec, NULL, 0);
cont_save_machine_stack(th, cont); cont_save_machine_stack(th, cont);
/* backup ensure_list to array for search in another context */ /* backup ensure_list to array for search in another context */
@ -560,10 +632,10 @@ cont_capture(volatile int *volatile stat)
rb_ensure_list_t *p; rb_ensure_list_t *p;
int size = 0; int size = 0;
rb_ensure_entry_t *entry; rb_ensure_entry_t *entry;
for (p=th->ec.ensure_list; p; p=p->next) for (p=th->ec->ensure_list; p; p=p->next)
size++; size++;
entry = cont->ensure_array = ALLOC_N(rb_ensure_entry_t,size+1); entry = cont->ensure_array = ALLOC_N(rb_ensure_entry_t,size+1);
for (p=th->ec.ensure_list; p; p=p->next) { for (p=th->ec->ensure_list; p; p=p->next) {
if (!p->entry.marker) if (!p->entry.marker)
p->entry.marker = rb_ary_tmp_new(0); /* dummy object */ p->entry.marker = rb_ary_tmp_new(0); /* dummy object */
*entry++ = p->entry; *entry++ = p->entry;
@ -590,10 +662,8 @@ cont_capture(volatile int *volatile stat)
static inline void static inline void
fiber_restore_thread(rb_thread_t *th, rb_fiber_t *fib) fiber_restore_thread(rb_thread_t *th, rb_fiber_t *fib)
{ {
th->ec = fib->cont.saved_ec; ec_switch(th, fib);
fib->cont.saved_ec.vm_stack = NULL; VM_ASSERT(th->ec->fiber == fib);
VM_ASSERT(th->ec.vm_stack != NULL);
} }
static inline void static inline void
@ -605,36 +675,44 @@ cont_restore_thread(rb_context_t *cont)
if (cont->type == CONTINUATION_CONTEXT) { if (cont->type == CONTINUATION_CONTEXT) {
/* continuation */ /* continuation */
rb_execution_context_t *sec = &cont->saved_ec; rb_execution_context_t *sec = &cont->saved_ec;
const rb_fiber_t *fib; rb_fiber_t *fib = NULL;
fib = th->ec.fiber = sec->fiber; if (sec->fiber != NULL) {
if (fib == NULL) fib = th->root_fiber; fib = sec->fiber;
if (fib && fib->cont.saved_ec.vm_stack) {
th->ec.vm_stack_size = fib->cont.saved_ec.vm_stack_size;
th->ec.vm_stack = fib->cont.saved_ec.vm_stack;
} }
else if (th->root_fiber) {
fib = th->root_fiber;
}
if (fib && th->ec != &fib->cont.saved_ec) {
ec_switch(th, fib);
}
/* copy vm stack */
#ifdef CAPTURE_JUST_VALID_VM_STACK #ifdef CAPTURE_JUST_VALID_VM_STACK
MEMCPY(th->ec.vm_stack, cont->saved_vm_stack.ptr, VALUE, cont->saved_vm_stack.slen); MEMCPY(th->ec->vm_stack,
MEMCPY(th->ec.vm_stack + sec->vm_stack_size - cont->saved_vm_stack.clen, cont->saved_vm_stack.ptr,
cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen, VALUE, cont->saved_vm_stack.clen); VALUE, cont->saved_vm_stack.slen);
MEMCPY(th->ec->vm_stack + th->ec->vm_stack_size - cont->saved_vm_stack.clen,
cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen,
VALUE, cont->saved_vm_stack.clen);
#else #else
MEMCPY(th->ec.vm_stack, cont->saved_vm_stack.ptr, VALUE, sec->vm_stack_size); MEMCPY(th->ec->vm_stack, cont->saved_vm_stack.ptr, VALUE, sec->vm_stack_size);
#endif #endif
/* other members of ec */ /* other members of ec */
th->ec.cfp = sec->cfp;
th->ec.safe_level = sec->safe_level;
th->ec.raised_flag = sec->raised_flag;
th->ec.tag = sec->tag;
th->ec.protect_tag = sec->protect_tag;
th->ec.root_lep = sec->root_lep;
th->ec.root_svar = sec->root_svar;
th->ec.ensure_list = sec->ensure_list;
th->ec.errinfo = sec->errinfo;
th->ec.trace_arg = sec->trace_arg;
VM_ASSERT(th->ec.vm_stack != NULL); th->ec->cfp = sec->cfp;
th->ec->safe_level = sec->safe_level;
th->ec->raised_flag = sec->raised_flag;
th->ec->tag = sec->tag;
th->ec->protect_tag = sec->protect_tag;
th->ec->root_lep = sec->root_lep;
th->ec->root_svar = sec->root_svar;
th->ec->ensure_list = sec->ensure_list;
th->ec->errinfo = sec->errinfo;
th->ec->trace_arg = sec->trace_arg;
VM_ASSERT(th->ec->vm_stack != NULL);
} }
else { else {
/* fiber */ /* fiber */
@ -651,7 +729,7 @@ fiber_set_stack_location(void)
VALUE *ptr; VALUE *ptr;
SET_MACHINE_STACK_END(&ptr); SET_MACHINE_STACK_END(&ptr);
th->ec.machine.stack_start = (void*)(((VALUE)ptr & RB_PAGE_MASK) + STACK_UPPER((void *)&ptr, 0, RB_PAGE_SIZE)); th->ec->machine.stack_start = (void*)(((VALUE)ptr & RB_PAGE_MASK) + STACK_UPPER((void *)&ptr, 0, RB_PAGE_SIZE));
} }
static VOID CALLBACK static VOID CALLBACK
@ -762,19 +840,19 @@ fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib)
/* save oldfib's machine stack / TODO: is it needed? */ /* save oldfib's machine stack / TODO: is it needed? */
if (!FIBER_TERMINATED_P(oldfib)) { if (!FIBER_TERMINATED_P(oldfib)) {
STACK_GROW_DIR_DETECTION; STACK_GROW_DIR_DETECTION;
SET_MACHINE_STACK_END(&th->ec.machine.stack_end); SET_MACHINE_STACK_END(&th->ec->machine.stack_end);
if (STACK_DIR_UPPER(0, 1)) { if (STACK_DIR_UPPER(0, 1)) {
oldfib->cont.machine.stack_size = th->ec.machine.stack_start - th->ec.machine.stack_end; oldfib->cont.machine.stack_size = th->ec->machine.stack_start - th->ec->machine.stack_end;
oldfib->cont.machine.stack = th->ec.machine.stack_end; oldfib->cont.machine.stack = th->ec->machine.stack_end;
} }
else { else {
oldfib->cont.machine.stack_size = th->ec.machine.stack_end - th->ec.machine.stack_start; oldfib->cont.machine.stack_size = th->ec->machine.stack_end - th->ec->machine.stack_start;
oldfib->cont.machine.stack = th->ec.machine.stack_start; oldfib->cont.machine.stack = th->ec->machine.stack_start;
} }
} }
/* exchange machine_stack_start between oldfib and newfib */ /* exchange machine_stack_start between oldfib and newfib */
oldfib->cont.saved_ec.machine.stack_start = th->ec.machine.stack_start; oldfib->cont.saved_ec.machine.stack_start = th->ec->machine.stack_start;
/* oldfib->machine.stack_end should be NULL */ /* oldfib->machine.stack_end should be NULL */
oldfib->cont.saved_ec.machine.stack_end = NULL; oldfib->cont.saved_ec.machine.stack_end = NULL;
@ -1128,15 +1206,15 @@ rb_cont_call(int argc, VALUE *argv, VALUE contval)
if (cont_thread_value(cont) != th->self) { if (cont_thread_value(cont) != th->self) {
rb_raise(rb_eRuntimeError, "continuation called across threads"); rb_raise(rb_eRuntimeError, "continuation called across threads");
} }
if (cont->saved_ec.protect_tag != th->ec.protect_tag) { if (cont->saved_ec.protect_tag != th->ec->protect_tag) {
rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier"); rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
} }
if (cont->saved_ec.fiber) { if (cont->saved_ec.fiber) {
if (th->ec.fiber != cont->saved_ec.fiber) { if (th->ec->fiber != cont->saved_ec.fiber) {
rb_raise(rb_eRuntimeError, "continuation called across fiber"); rb_raise(rb_eRuntimeError, "continuation called across fiber");
} }
} }
rollback_ensure_stack(contval, th->ec.ensure_list, cont->ensure_array); rollback_ensure_stack(contval, th->ec->ensure_list, cont->ensure_array);
cont->argc = argc; cont->argc = argc;
cont->value = make_passing_arg(argc, argv); cont->value = make_passing_arg(argc, argv);
@ -1270,15 +1348,13 @@ fiber_init(VALUE fibval, VALUE proc)
rb_context_t *cont = &fib->cont; rb_context_t *cont = &fib->cont;
rb_execution_context_t *sec = &cont->saved_ec; rb_execution_context_t *sec = &cont->saved_ec;
rb_thread_t *cth = GET_THREAD(); rb_thread_t *cth = GET_THREAD();
size_t fib_stack_size = cth->vm->default_params.fiber_vm_stack_size / sizeof(VALUE);
/* initialize cont */ /* initialize cont */
cont->saved_vm_stack.ptr = NULL; cont->saved_vm_stack.ptr = NULL;
ec_set_vm_stack(sec, NULL, 0);
sec->vm_stack = NULL; ec_set_vm_stack(sec, ALLOC_N(VALUE, fib_stack_size), fib_stack_size);
sec->vm_stack_size = 0;
sec->vm_stack_size = cth->vm->default_params.fiber_vm_stack_size / sizeof(VALUE);
sec->vm_stack = ALLOC_N(VALUE, sec->vm_stack_size);
sec->cfp = (void *)(sec->vm_stack + sec->vm_stack_size); sec->cfp = (void *)(sec->vm_stack + sec->vm_stack_size);
rb_vm_push_frame(sec, rb_vm_push_frame(sec,
@ -1324,11 +1400,12 @@ static void rb_fiber_terminate(rb_fiber_t *fib);
void void
rb_fiber_start(void) rb_fiber_start(void)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t * volatile th = GET_THREAD();
rb_fiber_t *fib = th->ec.fiber; rb_fiber_t *fib = th->ec->fiber;
rb_proc_t *proc; rb_proc_t *proc;
enum ruby_tag_type state; enum ruby_tag_type state;
VM_ASSERT(th->ec == ruby_current_execution_context_ptr);
VM_ASSERT(FIBER_RESUMED_P(fib)); VM_ASSERT(FIBER_RESUMED_P(fib));
TH_PUSH_TAG(th); TH_PUSH_TAG(th);
@ -1339,9 +1416,9 @@ rb_fiber_start(void)
GetProcPtr(fib->first_proc, proc); GetProcPtr(fib->first_proc, proc);
argv = (argc = cont->argc) > 1 ? RARRAY_CONST_PTR(args) : &args; argv = (argc = cont->argc) > 1 ? RARRAY_CONST_PTR(args) : &args;
cont->value = Qnil; cont->value = Qnil;
th->ec.errinfo = Qnil; th->ec->errinfo = Qnil;
th->ec.root_lep = rb_vm_proc_local_ep(fib->first_proc); th->ec->root_lep = rb_vm_proc_local_ep(fib->first_proc);
th->ec.root_svar = Qfalse; th->ec->root_svar = Qfalse;
EXEC_EVENT_HOOK(th, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, 0, Qnil); EXEC_EVENT_HOOK(th, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, 0, Qnil);
cont->value = rb_vm_invoke_proc(th, proc, argc, argv, VM_BLOCK_HANDLER_NONE); cont->value = rb_vm_invoke_proc(th, proc, argc, argv, VM_BLOCK_HANDLER_NONE);
@ -1352,10 +1429,10 @@ rb_fiber_start(void)
VM_ASSERT(FIBER_RESUMED_P(fib)); VM_ASSERT(FIBER_RESUMED_P(fib));
if (state == TAG_RAISE || state == TAG_FATAL) { if (state == TAG_RAISE || state == TAG_FATAL) {
rb_threadptr_pending_interrupt_enque(th, th->ec.errinfo); rb_threadptr_pending_interrupt_enque(th, th->ec->errinfo);
} }
else { else {
VALUE err = rb_vm_make_jump_tag_but_local_jump(state, th->ec.errinfo); VALUE err = rb_vm_make_jump_tag_but_local_jump(state, th->ec->errinfo);
if (!NIL_P(err)) if (!NIL_P(err))
rb_threadptr_pending_interrupt_enque(th, err); rb_threadptr_pending_interrupt_enque(th, err);
} }
@ -1369,30 +1446,54 @@ rb_fiber_start(void)
static rb_fiber_t * static rb_fiber_t *
root_fiber_alloc(rb_thread_t *th) root_fiber_alloc(rb_thread_t *th)
{ {
rb_fiber_t *fib; VALUE fibval = fiber_alloc(rb_cFiber);
/* no need to allocate vm stack */ rb_fiber_t *fib = th->ec->fiber;
fib = fiber_t_alloc(fiber_alloc(rb_cFiber));
fib->cont.type = ROOT_FIBER_CONTEXT; VM_ASSERT(DATA_PTR(fibval) == NULL);
VM_ASSERT(fib->cont.type == ROOT_FIBER_CONTEXT);
VM_ASSERT(fib->status == FIBER_RESUMED);
th->root_fiber = fib;
DATA_PTR(fibval) = fib;
fib->cont.self = fibval;
#if FIBER_USE_NATIVE #if FIBER_USE_NATIVE
#ifdef _WIN32 #ifdef _WIN32
fib->fib_handle = ConvertThreadToFiber(0); if (fib->fib_handle == 0) {
fib->fib_handle = ConvertThreadToFiber(0);
}
#endif #endif
#endif #endif
fiber_status_set(fib, FIBER_RESUMED); /* skip CREATED */
th->root_fiber = th->ec.fiber = fib;
return fib; return fib;
} }
void
rb_threadptr_root_fiber_setup(rb_thread_t *th)
{
rb_fiber_t *fib = ruby_mimmalloc(sizeof(rb_fiber_t));
MEMZERO(fib, rb_fiber_t, 1);
fib->cont.type = ROOT_FIBER_CONTEXT;
fib->cont.saved_ec.fiber = fib;
fib->cont.thread_ptr = th;
fiber_status_set(fib, FIBER_RESUMED); /* skip CREATED */
th->ec = &fib->cont.saved_ec;
th->root_fiber = th->ec->fiber = fib;
#if FIBER_USE_NATIVE
#ifdef _WIN32
if (fib->fib_handle == 0) {
fib->fib_handle = ConvertThreadToFiber(0);
}
#endif
#endif
}
static inline rb_fiber_t* static inline rb_fiber_t*
fiber_current(void) fiber_current(void)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
if (th->ec.fiber == NULL) { if (th->ec->fiber->cont.self == 0) {
rb_fiber_t *fib = root_fiber_alloc(th); root_fiber_alloc(th);
/* Running thread object has stack management responsibility */
fib->cont.saved_ec.vm_stack = NULL;
} }
return th->ec.fiber; return th->ec->fiber;
} }
static inline rb_fiber_t* static inline rb_fiber_t*
@ -1426,9 +1527,8 @@ fiber_store(rb_fiber_t *next_fib, rb_thread_t *th)
{ {
rb_fiber_t *fib; rb_fiber_t *fib;
if (th->ec.fiber != NULL) { if (th->ec->fiber != NULL) {
fib = th->ec.fiber; fib = th->ec->fiber;
cont_save_thread(&fib->cont, th);
} }
else { else {
/* create root fiber */ /* create root fiber */
@ -1475,14 +1575,14 @@ fiber_store(rb_fiber_t *next_fib, rb_thread_t *th)
terminated_machine_stack.size = 0; terminated_machine_stack.size = 0;
} }
#endif /* not _WIN32 */ #endif /* not _WIN32 */
fib = th->ec.fiber; fib = th->ec->fiber;
if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value); if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value);
return fib->cont.value; return fib->cont.value;
#else /* FIBER_USE_NATIVE */ #else /* FIBER_USE_NATIVE */
if (ruby_setjmp(fib->cont.jmpbuf)) { if (ruby_setjmp(fib->cont.jmpbuf)) {
/* restored */ /* restored */
fib = th->ec.fiber; fib = th->ec->fiber;
if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value); if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value);
if (next_fib->cont.value == Qundef) { if (next_fib->cont.value == Qundef) {
cont_restore_0(&next_fib->cont, &next_fib->cont.value); cont_restore_0(&next_fib->cont, &next_fib->cont.value);
@ -1505,7 +1605,7 @@ fiber_switch(rb_fiber_t *fib, int argc, const VALUE *argv, int is_resume)
rb_context_t *cont = &fib->cont; rb_context_t *cont = &fib->cont;
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
if (th->ec.fiber == fib) { if (th->ec->fiber == fib) {
/* ignore fiber context switch /* ignore fiber context switch
* because destination fiber is same as current fiber * because destination fiber is same as current fiber
*/ */
@ -1515,13 +1615,13 @@ fiber_switch(rb_fiber_t *fib, int argc, const VALUE *argv, int is_resume)
if (cont_thread_value(cont) != th->self) { if (cont_thread_value(cont) != th->self) {
rb_raise(rb_eFiberError, "fiber called across threads"); rb_raise(rb_eFiberError, "fiber called across threads");
} }
else if (cont->saved_ec.protect_tag != th->ec.protect_tag) { else if (cont->saved_ec.protect_tag != th->ec->protect_tag) {
rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier"); rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
} }
else if (FIBER_TERMINATED_P(fib)) { else if (FIBER_TERMINATED_P(fib)) {
value = rb_exc_new2(rb_eFiberError, "dead fiber called"); value = rb_exc_new2(rb_eFiberError, "dead fiber called");
if (!FIBER_TERMINATED_P(th->ec.fiber)) { if (!FIBER_TERMINATED_P(th->ec->fiber)) {
rb_exc_raise(value); rb_exc_raise(value);
VM_UNREACHABLE(fiber_switch); VM_UNREACHABLE(fiber_switch);
} }
@ -1535,7 +1635,7 @@ fiber_switch(rb_fiber_t *fib, int argc, const VALUE *argv, int is_resume)
cont->argc = -1; cont->argc = -1;
cont->value = value; cont->value = value;
#if FIBER_USE_NATIVE #if FIBER_USE_NATIVE
fiber_setcontext(th->root_fiber, th->ec.fiber); fiber_setcontext(th->root_fiber, th->ec->fiber);
#else #else
cont_restore_0(cont, &value); cont_restore_0(cont, &value);
#endif #endif
@ -1567,13 +1667,33 @@ rb_fiber_transfer(VALUE fibval, int argc, const VALUE *argv)
return fiber_switch(fib, argc, argv, 0); return fiber_switch(fib, argc, argv, 0);
} }
void
rb_fiber_close(rb_fiber_t *fib)
{
VALUE *vm_stack = fib->cont.saved_ec.vm_stack;
fiber_status_set(fib, FIBER_TERMINATED);
if (fib->cont.type == ROOT_FIBER_CONTEXT) {
rb_thread_recycle_stack_release(vm_stack);
}
else {
ruby_xfree(vm_stack);
}
ec_set_vm_stack(&fib->cont.saved_ec, NULL, 0);
#if !FIBER_USE_NATIVE
/* should not mark machine stack any more */
fib->cont.saved_ec.machine.stack_end = NULL;
#endif
}
static void static void
rb_fiber_terminate(rb_fiber_t *fib) rb_fiber_terminate(rb_fiber_t *fib)
{ {
VALUE value = fib->cont.value; VALUE value = fib->cont.value;
VM_ASSERT(FIBER_RESUMED_P(fib)); VM_ASSERT(FIBER_RESUMED_P(fib));
fiber_status_set(fib, FIBER_TERMINATED); rb_fiber_close(fib);
#if FIBER_USE_NATIVE && !defined(_WIN32) #if FIBER_USE_NATIVE && !defined(_WIN32)
/* Ruby must not switch to other thread until storing terminated_machine_stack */ /* Ruby must not switch to other thread until storing terminated_machine_stack */
terminated_machine_stack.ptr = fib->ss_sp; terminated_machine_stack.ptr = fib->ss_sp;
@ -1583,6 +1703,7 @@ rb_fiber_terminate(rb_fiber_t *fib)
fib->cont.machine.stack = NULL; fib->cont.machine.stack = NULL;
fib->cont.machine.stack_size = 0; fib->cont.machine.stack_size = 0;
#endif #endif
fiber_switch(return_fiber(), 1, &value, 0); fiber_switch(return_fiber(), 1, &value, 0);
} }
@ -1613,8 +1734,8 @@ rb_fiber_reset_root_local_storage(VALUE thval)
{ {
rb_thread_t *th = rb_thread_ptr(thval); rb_thread_t *th = rb_thread_ptr(thval);
if (th->root_fiber && th->root_fiber != th->ec.fiber) { if (th->root_fiber && th->root_fiber != th->ec->fiber) {
th->ec.local_storage = th->root_fiber->cont.saved_ec.local_storage; th->ec->local_storage = th->root_fiber->cont.saved_ec.local_storage;
} }
} }
@ -1794,7 +1915,7 @@ Init_Cont(void)
#else /* not WIN32 */ #else /* not WIN32 */
pagesize = sysconf(_SC_PAGESIZE); pagesize = sysconf(_SC_PAGESIZE);
#endif #endif
SET_MACHINE_STACK_END(&th->ec.machine.stack_end); SET_MACHINE_STACK_END(&th->ec->machine.stack_end);
#endif #endif
rb_cFiber = rb_define_class("Fiber", rb_cObject); rb_cFiber = rb_define_class("Fiber", rb_cObject);

View File

@ -525,7 +525,7 @@ die(void)
abort(); abort();
} }
#include <stdio.h>
void void
rb_bug(const char *fmt, ...) rb_bug(const char *fmt, ...)
{ {
@ -1300,7 +1300,7 @@ name_err_initialize(int argc, VALUE *argv, VALUE self)
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = rb_control_frame_t *cfp =
rb_vm_get_ruby_level_next_cfp(th, rb_vm_get_ruby_level_next_cfp(th,
RUBY_VM_PREVIOUS_CONTROL_FRAME(th->ec.cfp)); RUBY_VM_PREVIOUS_CONTROL_FRAME(th->ec->cfp));
if (cfp) iseqw = rb_iseqw_new(cfp->iseq); if (cfp) iseqw = rb_iseqw_new(cfp->iseq);
} }
rb_ivar_set(self, id_iseq, iseqw); rb_ivar_set(self, id_iseq, iseqw);

96
eval.c
View File

@ -129,7 +129,7 @@ static void
ruby_finalize_1(void) ruby_finalize_1(void)
{ {
ruby_sig_finalize(); ruby_sig_finalize();
GET_THREAD()->ec.errinfo = Qnil; GET_THREAD()->ec->errinfo = Qnil;
rb_gc_call_finalizer_at_exit(); rb_gc_call_finalizer_at_exit();
} }
@ -174,8 +174,8 @@ ruby_cleanup(volatile int ex)
SAVE_ROOT_JMPBUF(th, { RUBY_VM_CHECK_INTS(th); }); SAVE_ROOT_JMPBUF(th, { RUBY_VM_CHECK_INTS(th); });
step_0: step++; step_0: step++;
errs[1] = th->ec.errinfo; errs[1] = th->ec->errinfo;
th->ec.safe_level = 0; th->ec->safe_level = 0;
ruby_init_stack(&errs[STACK_UPPER(errs, 0, 1)]); ruby_init_stack(&errs[STACK_UPPER(errs, 0, 1)]);
SAVE_ROOT_JMPBUF(th, ruby_finalize_0()); SAVE_ROOT_JMPBUF(th, ruby_finalize_0());
@ -184,7 +184,7 @@ ruby_cleanup(volatile int ex)
/* protect from Thread#raise */ /* protect from Thread#raise */
th->status = THREAD_KILLED; th->status = THREAD_KILLED;
errs[0] = th->ec.errinfo; errs[0] = th->ec->errinfo;
SAVE_ROOT_JMPBUF(th, rb_thread_terminate_all()); SAVE_ROOT_JMPBUF(th, rb_thread_terminate_all());
} }
else { else {
@ -194,7 +194,7 @@ ruby_cleanup(volatile int ex)
} }
if (ex == 0) ex = state; if (ex == 0) ex = state;
} }
th->ec.errinfo = errs[1]; th->ec->errinfo = errs[1];
sysex = error_handle(ex); sysex = error_handle(ex);
state = 0; state = 0;
@ -203,7 +203,7 @@ ruby_cleanup(volatile int ex)
if (!RTEST(err)) continue; if (!RTEST(err)) continue;
/* th->ec.errinfo contains a NODE while break'ing */ /* th->ec->errinfo contains a NODE while break'ing */
if (THROW_DATA_P(err)) continue; if (THROW_DATA_P(err)) continue;
if (rb_obj_is_kind_of(err, rb_eSystemExit)) { if (rb_obj_is_kind_of(err, rb_eSystemExit)) {
@ -237,7 +237,7 @@ ruby_exec_internal(void *n)
{ {
volatile int state; volatile int state;
rb_iseq_t *iseq = (rb_iseq_t *)n; rb_iseq_t *iseq = (rb_iseq_t *)n;
rb_thread_t *th = GET_THREAD(); rb_thread_t * volatile th = GET_THREAD();
if (!n) return 0; if (!n) return 0;
@ -478,7 +478,7 @@ exc_setup_message(rb_thread_t *th, VALUE mesg, VALUE *cause)
int nocause = 0; int nocause = 0;
if (NIL_P(mesg)) { if (NIL_P(mesg)) {
mesg = th->ec.errinfo; mesg = th->ec->errinfo;
if (INTERNAL_EXCEPTION_P(mesg)) TH_JUMP_TAG(th, TAG_FATAL); if (INTERNAL_EXCEPTION_P(mesg)) TH_JUMP_TAG(th, TAG_FATAL);
nocause = 1; nocause = 1;
} }
@ -531,19 +531,19 @@ setup_exception(rb_thread_t *th, int tag, volatile VALUE mesg, VALUE cause)
} }
if (!NIL_P(mesg)) { if (!NIL_P(mesg)) {
th->ec.errinfo = mesg; th->ec->errinfo = mesg;
} }
if (RTEST(ruby_debug) && !NIL_P(e = th->ec.errinfo) && if (RTEST(ruby_debug) && !NIL_P(e = th->ec->errinfo) &&
!rb_obj_is_kind_of(e, rb_eSystemExit)) { !rb_obj_is_kind_of(e, rb_eSystemExit)) {
enum ruby_tag_type state; enum ruby_tag_type state;
mesg = e; mesg = e;
TH_PUSH_TAG(th); TH_PUSH_TAG(th);
if ((state = EXEC_TAG()) == TAG_NONE) { if ((state = EXEC_TAG()) == TAG_NONE) {
th->ec.errinfo = Qnil; th->ec->errinfo = Qnil;
e = rb_obj_as_string(mesg); e = rb_obj_as_string(mesg);
th->ec.errinfo = mesg; th->ec->errinfo = mesg;
if (file && line) { if (file && line) {
e = rb_sprintf("Exception `%"PRIsVALUE"' at %s:%d - %"PRIsVALUE"\n", e = rb_sprintf("Exception `%"PRIsVALUE"' at %s:%d - %"PRIsVALUE"\n",
rb_obj_class(mesg), file, line, e); rb_obj_class(mesg), file, line, e);
@ -559,8 +559,8 @@ setup_exception(rb_thread_t *th, int tag, volatile VALUE mesg, VALUE cause)
warn_print_str(e); warn_print_str(e);
} }
TH_POP_TAG(); TH_POP_TAG();
if (state == TAG_FATAL && th->ec.errinfo == exception_error) { if (state == TAG_FATAL && th->ec->errinfo == exception_error) {
th->ec.errinfo = mesg; th->ec->errinfo = mesg;
} }
else if (state) { else if (state) {
rb_threadptr_reset_raised(th); rb_threadptr_reset_raised(th);
@ -570,14 +570,14 @@ setup_exception(rb_thread_t *th, int tag, volatile VALUE mesg, VALUE cause)
if (rb_threadptr_set_raised(th)) { if (rb_threadptr_set_raised(th)) {
fatal: fatal:
th->ec.errinfo = exception_error; th->ec->errinfo = exception_error;
rb_threadptr_reset_raised(th); rb_threadptr_reset_raised(th);
TH_JUMP_TAG(th, TAG_FATAL); TH_JUMP_TAG(th, TAG_FATAL);
} }
if (tag != TAG_FATAL) { if (tag != TAG_FATAL) {
RUBY_DTRACE_HOOK(RAISE, rb_obj_classname(th->ec.errinfo)); RUBY_DTRACE_HOOK(RAISE, rb_obj_classname(th->ec->errinfo));
EXEC_EVENT_HOOK(th, RUBY_EVENT_RAISE, th->ec.cfp->self, 0, 0, 0, mesg); EXEC_EVENT_HOOK(th, RUBY_EVENT_RAISE, th->ec->cfp->self, 0, 0, 0, mesg);
} }
} }
@ -797,7 +797,7 @@ void
rb_raise_jump(VALUE mesg, VALUE cause) rb_raise_jump(VALUE mesg, VALUE cause)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
const rb_control_frame_t *cfp = th->ec.cfp; const rb_control_frame_t *cfp = th->ec->cfp;
const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp); const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
VALUE klass = me->owner; VALUE klass = me->owner;
VALUE self = cfp->self; VALUE self = cfp->self;
@ -835,7 +835,7 @@ int
rb_block_given_p(void) rb_block_given_p(void)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
if (rb_vm_frame_block_handler(th->ec.cfp) == VM_BLOCK_HANDLER_NONE) { if (rb_vm_frame_block_handler(th->ec->cfp) == VM_BLOCK_HANDLER_NONE) {
return FALSE; return FALSE;
} }
else { else {
@ -896,10 +896,10 @@ rb_rescue2(VALUE (* b_proc) (ANYARGS), VALUE data1,
VALUE (* r_proc) (ANYARGS), VALUE data2, ...) VALUE (* r_proc) (ANYARGS), VALUE data2, ...)
{ {
enum ruby_tag_type state; enum ruby_tag_type state;
rb_thread_t *th = GET_THREAD(); rb_thread_t * volatile th = GET_THREAD();
rb_control_frame_t *volatile cfp = th->ec.cfp; rb_control_frame_t *volatile cfp = th->ec->cfp;
volatile VALUE result = Qfalse; volatile VALUE result = Qfalse;
volatile VALUE e_info = th->ec.errinfo; volatile VALUE e_info = th->ec->errinfo;
va_list args; va_list args;
TH_PUSH_TAG(th); TH_PUSH_TAG(th);
@ -911,7 +911,7 @@ rb_rescue2(VALUE (* b_proc) (ANYARGS), VALUE data1,
/* escape from r_proc */ /* escape from r_proc */
if (state == TAG_RETRY) { if (state == TAG_RETRY) {
state = 0; state = 0;
th->ec.errinfo = Qnil; th->ec->errinfo = Qnil;
result = Qfalse; result = Qfalse;
goto retry_entry; goto retry_entry;
} }
@ -925,7 +925,7 @@ rb_rescue2(VALUE (* b_proc) (ANYARGS), VALUE data1,
va_init_list(args, data2); va_init_list(args, data2);
while ((eclass = va_arg(args, VALUE)) != 0) { while ((eclass = va_arg(args, VALUE)) != 0) {
if (rb_obj_is_kind_of(th->ec.errinfo, eclass)) { if (rb_obj_is_kind_of(th->ec->errinfo, eclass)) {
handle = TRUE; handle = TRUE;
break; break;
} }
@ -936,9 +936,9 @@ rb_rescue2(VALUE (* b_proc) (ANYARGS), VALUE data1,
result = Qnil; result = Qnil;
state = 0; state = 0;
if (r_proc) { if (r_proc) {
result = (*r_proc) (data2, th->ec.errinfo); result = (*r_proc) (data2, th->ec->errinfo);
} }
th->ec.errinfo = e_info; th->ec->errinfo = e_info;
} }
} }
} }
@ -993,15 +993,15 @@ rb_protect(VALUE (* proc) (VALUE), VALUE data, int *pstate)
{ {
volatile VALUE result = Qnil; volatile VALUE result = Qnil;
volatile enum ruby_tag_type state; volatile enum ruby_tag_type state;
rb_thread_t *th = GET_THREAD(); rb_thread_t * volatile th = GET_THREAD();
rb_control_frame_t *volatile cfp = th->ec.cfp; rb_control_frame_t *volatile cfp = th->ec->cfp;
struct rb_vm_protect_tag protect_tag; struct rb_vm_protect_tag protect_tag;
rb_jmpbuf_t org_jmpbuf; rb_jmpbuf_t org_jmpbuf;
protect_tag.prev = th->ec.protect_tag; protect_tag.prev = th->ec->protect_tag;
TH_PUSH_TAG(th); TH_PUSH_TAG(th);
th->ec.protect_tag = &protect_tag; th->ec->protect_tag = &protect_tag;
MEMCPY(&org_jmpbuf, &(th)->root_jmpbuf, rb_jmpbuf_t, 1); MEMCPY(&org_jmpbuf, &(th)->root_jmpbuf, rb_jmpbuf_t, 1);
if ((state = TH_EXEC_TAG()) == TAG_NONE) { if ((state = TH_EXEC_TAG()) == TAG_NONE) {
SAVE_ROOT_JMPBUF(th, result = (*proc) (data)); SAVE_ROOT_JMPBUF(th, result = (*proc) (data));
@ -1010,7 +1010,7 @@ rb_protect(VALUE (* proc) (VALUE), VALUE data, int *pstate)
rb_vm_rewind_cfp(th, cfp); rb_vm_rewind_cfp(th, cfp);
} }
MEMCPY(&(th)->root_jmpbuf, &org_jmpbuf, rb_jmpbuf_t, 1); MEMCPY(&(th)->root_jmpbuf, &org_jmpbuf, rb_jmpbuf_t, 1);
th->ec.protect_tag = protect_tag.prev; th->ec->protect_tag = protect_tag.prev;
TH_POP_TAG(); TH_POP_TAG();
if (pstate != NULL) *pstate = state; if (pstate != NULL) *pstate = state;
@ -1037,25 +1037,25 @@ rb_ensure(VALUE (*b_proc)(ANYARGS), VALUE data1, VALUE (*e_proc)(ANYARGS), VALUE
int state; int state;
volatile VALUE result = Qnil; volatile VALUE result = Qnil;
VALUE errinfo; VALUE errinfo;
rb_thread_t *const th = GET_THREAD(); rb_thread_t *const volatile th = GET_THREAD();
rb_ensure_list_t ensure_list; rb_ensure_list_t ensure_list;
ensure_list.entry.marker = 0; ensure_list.entry.marker = 0;
ensure_list.entry.e_proc = e_proc; ensure_list.entry.e_proc = e_proc;
ensure_list.entry.data2 = data2; ensure_list.entry.data2 = data2;
ensure_list.next = th->ec.ensure_list; ensure_list.next = th->ec->ensure_list;
th->ec.ensure_list = &ensure_list; th->ec->ensure_list = &ensure_list;
TH_PUSH_TAG(th); TH_PUSH_TAG(th);
if ((state = EXEC_TAG()) == TAG_NONE) { if ((state = EXEC_TAG()) == TAG_NONE) {
result = (*b_proc) (data1); result = (*b_proc) (data1);
} }
TH_POP_TAG(); TH_POP_TAG();
errinfo = th->ec.errinfo; errinfo = th->ec->errinfo;
if (!NIL_P(errinfo) && !RB_TYPE_P(errinfo, T_OBJECT)) { if (!NIL_P(errinfo) && !RB_TYPE_P(errinfo, T_OBJECT)) {
th->ec.errinfo = Qnil; th->ec->errinfo = Qnil;
} }
th->ec.ensure_list=ensure_list.next; th->ec->ensure_list=ensure_list.next;
(*ensure_list.entry.e_proc)(ensure_list.entry.data2); (*ensure_list.entry.e_proc)(ensure_list.entry.data2);
th->ec.errinfo = errinfo; th->ec->errinfo = errinfo;
if (state) if (state)
TH_JUMP_TAG(th, state); TH_JUMP_TAG(th, state);
return result; return result;
@ -1102,7 +1102,7 @@ frame_called_id(rb_control_frame_t *cfp)
ID ID
rb_frame_this_func(void) rb_frame_this_func(void)
{ {
return frame_func_id(GET_THREAD()->ec.cfp); return frame_func_id(GET_THREAD()->ec->cfp);
} }
/*! /*!
@ -1119,15 +1119,15 @@ rb_frame_this_func(void)
ID ID
rb_frame_callee(void) rb_frame_callee(void)
{ {
return frame_called_id(GET_THREAD()->ec.cfp); return frame_called_id(GET_THREAD()->ec->cfp);
} }
static rb_control_frame_t * static rb_control_frame_t *
previous_frame(rb_thread_t *th) previous_frame(rb_thread_t *th)
{ {
rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->ec.cfp); rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->ec->cfp);
/* check if prev_cfp can be accessible */ /* check if prev_cfp can be accessible */
if ((void *)(th->ec.vm_stack + th->ec.vm_stack_size) == (void *)(prev_cfp)) { if ((void *)(th->ec->vm_stack + th->ec->vm_stack_size) == (void *)(prev_cfp)) {
return 0; return 0;
} }
return prev_cfp; return prev_cfp;
@ -1159,7 +1159,7 @@ ID
rb_frame_last_func(void) rb_frame_last_func(void)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = th->ec.cfp; rb_control_frame_t *cfp = th->ec->cfp;
ID mid; ID mid;
while (!(mid = frame_func_id(cfp)) && while (!(mid = frame_func_id(cfp)) &&
@ -1439,7 +1439,7 @@ rb_mod_refine(VALUE module, VALUE klass)
id_refined_class, id_defined_at; id_refined_class, id_defined_at;
VALUE refinements, activated_refinements; VALUE refinements, activated_refinements;
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
VALUE block_handler = rb_vm_frame_block_handler(th->ec.cfp); VALUE block_handler = rb_vm_frame_block_handler(th->ec->cfp);
if (block_handler == VM_BLOCK_HANDLER_NONE) { if (block_handler == VM_BLOCK_HANDLER_NONE) {
rb_raise(rb_eArgError, "no block given"); rb_raise(rb_eArgError, "no block given");
@ -1724,7 +1724,7 @@ top_using(VALUE self, VALUE module)
static const VALUE * static const VALUE *
errinfo_place(rb_thread_t *th) errinfo_place(rb_thread_t *th)
{ {
rb_control_frame_t *cfp = th->ec.cfp; rb_control_frame_t *cfp = th->ec->cfp;
rb_control_frame_t *end_cfp = RUBY_VM_END_CONTROL_FRAME(th); rb_control_frame_t *end_cfp = RUBY_VM_END_CONTROL_FRAME(th);
while (RUBY_VM_VALID_CONTROL_FRAME_P(cfp, end_cfp)) { while (RUBY_VM_VALID_CONTROL_FRAME_P(cfp, end_cfp)) {
@ -1751,7 +1751,7 @@ get_thread_errinfo(rb_thread_t *th)
return *ptr; return *ptr;
} }
else { else {
return th->ec.errinfo; return th->ec->errinfo;
} }
} }
@ -1777,7 +1777,7 @@ VALUE
rb_errinfo(void) rb_errinfo(void)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
return th->ec.errinfo; return th->ec->errinfo;
} }
/*! Sets the current exception (\c $!) to the given value /*! Sets the current exception (\c $!) to the given value
@ -1794,7 +1794,7 @@ rb_set_errinfo(VALUE err)
if (!NIL_P(err) && !rb_obj_is_kind_of(err, rb_eException)) { if (!NIL_P(err) && !rb_obj_is_kind_of(err, rb_eException)) {
rb_raise(rb_eTypeError, "assigning non-exception to $!"); rb_raise(rb_eTypeError, "assigning non-exception to $!");
} }
GET_THREAD()->ec.errinfo = err; GET_THREAD()->ec->errinfo = err;
} }
static VALUE static VALUE

View File

@ -69,7 +69,7 @@ set_backtrace(VALUE info, VALUE bt)
static void static void
error_print(rb_thread_t *th) error_print(rb_thread_t *th)
{ {
rb_threadptr_error_print(th, th->ec.errinfo); rb_threadptr_error_print(th, th->ec->errinfo);
} }
static void static void
@ -167,7 +167,7 @@ void
rb_threadptr_error_print(rb_thread_t *volatile th, volatile VALUE errinfo) rb_threadptr_error_print(rb_thread_t *volatile th, volatile VALUE errinfo)
{ {
volatile VALUE errat = Qundef; volatile VALUE errat = Qundef;
volatile int raised_flag = th->ec.raised_flag; volatile int raised_flag = th->ec->raised_flag;
volatile VALUE eclass = Qundef, emesg = Qundef; volatile VALUE eclass = Qundef, emesg = Qundef;
if (NIL_P(errinfo)) if (NIL_P(errinfo))
@ -202,7 +202,7 @@ rb_threadptr_error_print(rb_thread_t *volatile th, volatile VALUE errinfo)
} }
error: error:
TH_POP_TAG(); TH_POP_TAG();
th->ec.errinfo = errinfo; th->ec->errinfo = errinfo;
rb_thread_raised_set(th, raised_flag); rb_thread_raised_set(th, raised_flag);
} }
@ -304,7 +304,7 @@ error_handle(int ex)
warn_print("unexpected throw\n"); warn_print("unexpected throw\n");
break; break;
case TAG_RAISE: { case TAG_RAISE: {
VALUE errinfo = th->ec.errinfo; VALUE errinfo = th->ec->errinfo;
if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) { if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
status = sysexit_status(errinfo); status = sysexit_status(errinfo);
} }

View File

@ -14,10 +14,10 @@ vm_passed_block_handler_set(rb_thread_t *th, VALUE block_handler)
static inline void static inline void
pass_passed_block_handler(rb_thread_t *th) pass_passed_block_handler(rb_thread_t *th)
{ {
VALUE block_handler = rb_vm_frame_block_handler(th->ec.cfp); VALUE block_handler = rb_vm_frame_block_handler(th->ec->cfp);
vm_block_handler_verify(block_handler); vm_block_handler_verify(block_handler);
vm_passed_block_handler_set(th, block_handler); vm_passed_block_handler_set(th, block_handler);
VM_ENV_FLAGS_SET(th->ec.cfp->ep, VM_FRAME_FLAG_PASSED); VM_ENV_FLAGS_SET(th->ec->cfp->ep, VM_FRAME_FLAG_PASSED);
} }
#define PASS_PASSED_BLOCK_HANDLER_TH(th) pass_passed_block_handler(th) #define PASS_PASSED_BLOCK_HANDLER_TH(th) pass_passed_block_handler(th)
@ -133,16 +133,16 @@ LONG WINAPI rb_w32_stack_overflow_handler(struct _EXCEPTION_POINTERS *);
struct rb_vm_tag _tag; \ struct rb_vm_tag _tag; \
_tag.state = TAG_NONE; \ _tag.state = TAG_NONE; \
_tag.tag = Qundef; \ _tag.tag = Qundef; \
_tag.prev = _th->ec.tag; _tag.prev = _th->ec->tag;
#define TH_POP_TAG() \ #define TH_POP_TAG() \
_th->ec.tag = _tag.prev; \ _th->ec->tag = _tag.prev; \
} while (0) } while (0)
#define TH_TMPPOP_TAG() \ #define TH_TMPPOP_TAG() \
_th->ec.tag = _tag.prev _th->ec->tag = _tag.prev
#define TH_REPUSH_TAG() (void)(_th->ec.tag = &_tag) #define TH_REPUSH_TAG() (void)(_th->ec->tag = &_tag)
#define PUSH_TAG() TH_PUSH_TAG(GET_THREAD()) #define PUSH_TAG() TH_PUSH_TAG(GET_THREAD())
#define POP_TAG() TH_POP_TAG() #define POP_TAG() TH_POP_TAG()
@ -174,12 +174,12 @@ LONG WINAPI rb_w32_stack_overflow_handler(struct _EXCEPTION_POINTERS *);
#undef RB_OBJ_WRITE #undef RB_OBJ_WRITE
#define RB_OBJ_WRITE(a, slot, b) UNALIGNED_MEMBER_ACCESS(rb_obj_write((VALUE)(a), (VALUE *)(slot), (VALUE)(b), __FILE__, __LINE__)) #define RB_OBJ_WRITE(a, slot, b) UNALIGNED_MEMBER_ACCESS(rb_obj_write((VALUE)(a), (VALUE *)(slot), (VALUE)(b), __FILE__, __LINE__))
/* clear th->ec.tag->state, and return the value */ /* clear th->ec->tag->state, and return the value */
static inline int static inline int
rb_threadptr_tag_state(rb_thread_t *th) rb_threadptr_tag_state(rb_thread_t *th)
{ {
enum ruby_tag_type state = th->ec.tag->state; enum ruby_tag_type state = th->ec->tag->state;
th->ec.tag->state = TAG_NONE; th->ec->tag->state = TAG_NONE;
return state; return state;
} }
@ -187,8 +187,8 @@ NORETURN(static inline void rb_threadptr_tag_jump(rb_thread_t *, enum ruby_tag_t
static inline void static inline void
rb_threadptr_tag_jump(rb_thread_t *th, enum ruby_tag_type st) rb_threadptr_tag_jump(rb_thread_t *th, enum ruby_tag_type st)
{ {
th->ec.tag->state = st; th->ec->tag->state = st;
ruby_longjmp(th->ec.tag->buf, 1); ruby_longjmp(th->ec->tag->buf, 1);
} }
/* /*
@ -282,10 +282,10 @@ enum {
}; };
int rb_threadptr_set_raised(rb_thread_t *th); int rb_threadptr_set_raised(rb_thread_t *th);
int rb_threadptr_reset_raised(rb_thread_t *th); int rb_threadptr_reset_raised(rb_thread_t *th);
#define rb_thread_raised_set(th, f) ((th)->ec.raised_flag |= (f)) #define rb_thread_raised_set(th, f) ((th)->ec->raised_flag |= (f))
#define rb_thread_raised_reset(th, f) ((th)->ec.raised_flag &= ~(f)) #define rb_thread_raised_reset(th, f) ((th)->ec->raised_flag &= ~(f))
#define rb_thread_raised_p(th, f) (((th)->ec.raised_flag & (f)) != 0) #define rb_thread_raised_p(th, f) (((th)->ec->raised_flag & (f)) != 0)
#define rb_thread_raised_clear(th) ((th)->ec.raised_flag = 0) #define rb_thread_raised_clear(th) ((th)->ec->raised_flag = 0)
int rb_threadptr_stack_check(rb_thread_t *th); int rb_threadptr_stack_check(rb_thread_t *th);
VALUE rb_f_eval(int argc, const VALUE *argv, VALUE self); VALUE rb_f_eval(int argc, const VALUE *argv, VALUE self);

View File

@ -116,26 +116,26 @@ rb_exec_end_proc(void)
enum ruby_tag_type state; enum ruby_tag_type state;
volatile int safe = rb_safe_level(); volatile int safe = rb_safe_level();
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
volatile VALUE errinfo = th->ec.errinfo; volatile VALUE errinfo = th->ec->errinfo;
TH_PUSH_TAG(th); TH_PUSH_TAG(th);
if ((state = EXEC_TAG()) == TAG_NONE) { if ((state = EXEC_TAG()) == TAG_NONE) {
again: again:
exec_end_procs_chain(&ephemeral_end_procs, &th->ec.errinfo); exec_end_procs_chain(&ephemeral_end_procs, &th->ec->errinfo);
exec_end_procs_chain(&end_procs, &th->ec.errinfo); exec_end_procs_chain(&end_procs, &th->ec->errinfo);
} }
else { else {
VAR_INITIALIZED(th); VAR_INITIALIZED(th);
TH_TMPPOP_TAG(); TH_TMPPOP_TAG();
error_handle(state); error_handle(state);
if (!NIL_P(th->ec.errinfo)) errinfo = th->ec.errinfo; if (!NIL_P(th->ec->errinfo)) errinfo = th->ec->errinfo;
TH_REPUSH_TAG(); TH_REPUSH_TAG();
goto again; goto again;
} }
TH_POP_TAG(); TH_POP_TAG();
rb_set_safe_level_force(safe); rb_set_safe_level_force(safe);
th->ec.errinfo = errinfo; th->ec->errinfo = errinfo;
} }
void void

18
gc.c
View File

@ -1812,7 +1812,7 @@ rb_objspace_set_event_hook(const rb_event_flag_t event)
static void static void
gc_event_hook_body(rb_thread_t *th, rb_objspace_t *objspace, const rb_event_flag_t event, VALUE data) gc_event_hook_body(rb_thread_t *th, rb_objspace_t *objspace, const rb_event_flag_t event, VALUE data)
{ {
EXEC_EVENT_HOOK(th, event, th->ec.cfp->self, 0, 0, 0, data); EXEC_EVENT_HOOK(th, event, th->ec->cfp->self, 0, 0, 0, data);
} }
#define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook) #define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
@ -2784,16 +2784,16 @@ run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
long finished; long finished;
int safe; int safe;
} saved; } saved;
rb_thread_t *const th = GET_THREAD(); rb_thread_t *const volatile th = GET_THREAD();
#define RESTORE_FINALIZER() (\ #define RESTORE_FINALIZER() (\
th->ec.cfp = saved.cfp, \ th->ec->cfp = saved.cfp, \
rb_set_safe_level_force(saved.safe), \ rb_set_safe_level_force(saved.safe), \
rb_set_errinfo(saved.errinfo)) rb_set_errinfo(saved.errinfo))
saved.safe = rb_safe_level(); saved.safe = rb_safe_level();
saved.errinfo = rb_errinfo(); saved.errinfo = rb_errinfo();
saved.objid = nonspecial_obj_id(obj); saved.objid = nonspecial_obj_id(obj);
saved.cfp = th->ec.cfp; saved.cfp = th->ec->cfp;
saved.finished = 0; saved.finished = 0;
TH_PUSH_TAG(th); TH_PUSH_TAG(th);
@ -4001,7 +4001,7 @@ ruby_get_stack_grow_direction(volatile VALUE *addr)
size_t size_t
ruby_stack_length(VALUE **p) ruby_stack_length(VALUE **p)
{ {
rb_execution_context_t *ec = &GET_THREAD()->ec; rb_execution_context_t *ec = GET_THREAD()->ec;
SET_STACK_END; SET_STACK_END;
if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END); if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
return STACK_LENGTH; return STACK_LENGTH;
@ -4019,7 +4019,7 @@ ruby_stack_length(VALUE **p)
static int static int
stack_check(rb_thread_t *th, int water_mark) stack_check(rb_thread_t *th, int water_mark)
{ {
rb_execution_context_t *ec = &th->ec; rb_execution_context_t *ec = th->ec;
int ret; int ret;
SET_STACK_END; SET_STACK_END;
ret = STACK_LENGTH > STACK_LEVEL_MAX - water_mark; ret = STACK_LENGTH > STACK_LEVEL_MAX - water_mark;
@ -4784,7 +4784,7 @@ gc_mark_roots(rb_objspace_t *objspace, const char **categoryp)
{ {
struct gc_list *list; struct gc_list *list;
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_execution_context_t *ec = &th->ec; rb_execution_context_t *ec = th->ec;
#if PRINT_ROOT_TICKS #if PRINT_ROOT_TICKS
tick_t start_tick = tick(); tick_t start_tick = tick();
@ -4831,7 +4831,7 @@ gc_mark_roots(rb_objspace_t *objspace, const char **categoryp)
mark_tbl(objspace, finalizer_table); mark_tbl(objspace, finalizer_table);
MARK_CHECKPOINT("machine_context"); MARK_CHECKPOINT("machine_context");
mark_current_machine_context(objspace, &th->ec); mark_current_machine_context(objspace, th->ec);
MARK_CHECKPOINT("encodings"); MARK_CHECKPOINT("encodings");
rb_gc_mark_encodings(); rb_gc_mark_encodings();
@ -7712,7 +7712,7 @@ rb_memerror(void)
rb_thread_raised_set(th, RAISED_NOMEMORY); rb_thread_raised_set(th, RAISED_NOMEMORY);
exc = ruby_vm_special_exception_copy(exc); exc = ruby_vm_special_exception_copy(exc);
} }
th->ec.errinfo = exc; th->ec->errinfo = exc;
TH_JUMP_TAG(th, TAG_RAISE); TH_JUMP_TAG(th, TAG_RAISE);
} }

View File

@ -1678,8 +1678,8 @@ opt_call_c_function
reg_cfp = (funcptr)(th, reg_cfp); reg_cfp = (funcptr)(th, reg_cfp);
if (reg_cfp == 0) { if (reg_cfp == 0) {
VALUE err = th->ec.errinfo; VALUE err = th->ec->errinfo;
th->ec.errinfo = Qnil; th->ec->errinfo = Qnil;
THROW_EXCEPTION(err); THROW_EXCEPTION(err);
} }

View File

@ -1721,7 +1721,6 @@ VALUE rb_uninterruptible(VALUE (*b_proc)(ANYARGS), VALUE data);
VALUE rb_mutex_owned_p(VALUE self); VALUE rb_mutex_owned_p(VALUE self);
/* thread_pthread.c, thread_win32.c */ /* thread_pthread.c, thread_win32.c */
void Init_native_thread(void);
int rb_divert_reserved_fd(int fd); int rb_divert_reserved_fd(int fd);
/* transcode.c */ /* transcode.c */

4
iseq.c
View File

@ -663,7 +663,7 @@ rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE realpath, VALUE line, c
} }
if (!node) { if (!node) {
rb_exc_raise(th->ec.errinfo); rb_exc_raise(th->ec->errinfo);
} }
else { else {
INITIALIZED VALUE label = parent ? INITIALIZED VALUE label = parent ?
@ -870,7 +870,7 @@ iseqw_s_compile_file(int argc, VALUE *argv, VALUE self)
parser = rb_parser_new(); parser = rb_parser_new();
rb_parser_set_context(parser, NULL, FALSE); rb_parser_set_context(parser, NULL, FALSE);
node = rb_parser_compile_file_path(parser, file, f, NUM2INT(line)); node = rb_parser_compile_file_path(parser, file, f, NUM2INT(line));
if (!node) exc = GET_THREAD()->ec.errinfo; if (!node) exc = GET_THREAD()->ec->errinfo;
rb_io_close(f); rb_io_close(f);
if (!node) rb_exc_raise(exc); if (!node) rb_exc_raise(exc);

12
load.c
View File

@ -587,7 +587,7 @@ rb_load_internal0(rb_thread_t *th, VALUE fname, int wrap)
rb_thread_t *volatile th0 = th; rb_thread_t *volatile th0 = th;
#endif #endif
th->ec.errinfo = Qnil; /* ensure */ th->ec->errinfo = Qnil; /* ensure */
if (!wrap) { if (!wrap) {
th->top_wrapper = 0; th->top_wrapper = 0;
@ -631,11 +631,11 @@ rb_load_internal0(rb_thread_t *th, VALUE fname, int wrap)
* rb_iseq_load_iseq case */ * rb_iseq_load_iseq case */
VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef); VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
if (NIL_P(exc)) return state; if (NIL_P(exc)) return state;
th->ec.errinfo = exc; th->ec->errinfo = exc;
return TAG_RAISE; return TAG_RAISE;
} }
if (!NIL_P(th->ec.errinfo)) { if (!NIL_P(th->ec->errinfo)) {
/* exception during load */ /* exception during load */
return TAG_RAISE; return TAG_RAISE;
} }
@ -648,7 +648,7 @@ rb_load_internal(VALUE fname, int wrap)
rb_thread_t *curr_th = GET_THREAD(); rb_thread_t *curr_th = GET_THREAD();
int state = rb_load_internal0(curr_th, fname, wrap); int state = rb_load_internal0(curr_th, fname, wrap);
if (state) { if (state) {
if (state == TAG_RAISE) rb_exc_raise(curr_th->ec.errinfo); if (state == TAG_RAISE) rb_exc_raise(curr_th->ec->errinfo);
TH_JUMP_TAG(curr_th, state); TH_JUMP_TAG(curr_th, state);
} }
} }
@ -963,7 +963,7 @@ rb_require_internal(VALUE fname, int safe)
{ {
volatile int result = -1; volatile int result = -1;
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
volatile VALUE errinfo = th->ec.errinfo; volatile VALUE errinfo = th->ec->errinfo;
enum ruby_tag_type state; enum ruby_tag_type state;
struct { struct {
int safe; int safe;
@ -1024,7 +1024,7 @@ rb_require_internal(VALUE fname, int safe)
return state; return state;
} }
th->ec.errinfo = errinfo; th->ec->errinfo = errinfo;
RUBY_DTRACE_HOOK(REQUIRE_RETURN, RSTRING_PTR(fname)); RUBY_DTRACE_HOOK(REQUIRE_RETURN, RSTRING_PTR(fname));

10
proc.c
View File

@ -333,7 +333,7 @@ VALUE
rb_binding_new(void) rb_binding_new(void)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
return rb_vm_make_binding(th, th->ec.cfp); return rb_vm_make_binding(th, th->ec->cfp);
} }
/* /*
@ -698,7 +698,7 @@ proc_new(VALUE klass, int8_t is_lambda)
{ {
VALUE procval; VALUE procval;
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = th->ec.cfp; rb_control_frame_t *cfp = th->ec->cfp;
VALUE block_handler; VALUE block_handler;
if ((block_handler = rb_vm_frame_block_handler(cfp)) == VM_BLOCK_HANDLER_NONE) { if ((block_handler = rb_vm_frame_block_handler(cfp)) == VM_BLOCK_HANDLER_NONE) {
@ -1049,7 +1049,7 @@ rb_block_arity(void)
{ {
int min, max; int min, max;
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = th->ec.cfp; rb_control_frame_t *cfp = th->ec->cfp;
VALUE block_handler = rb_vm_frame_block_handler(cfp); VALUE block_handler = rb_vm_frame_block_handler(cfp);
struct rb_block block; struct rb_block block;
@ -1082,7 +1082,7 @@ int
rb_block_min_max_arity(int *max) rb_block_min_max_arity(int *max)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = th->ec.cfp; rb_control_frame_t *cfp = th->ec->cfp;
VALUE block_handler = rb_vm_frame_block_handler(cfp); VALUE block_handler = rb_vm_frame_block_handler(cfp);
struct rb_block block; struct rb_block block;
@ -1911,7 +1911,7 @@ rb_mod_define_method(int argc, VALUE *argv, VALUE mod)
body = rb_block_lambda(); body = rb_block_lambda();
#else #else
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
VALUE block_handler = rb_vm_frame_block_handler(th->ec.cfp); VALUE block_handler = rb_vm_frame_block_handler(th->ec->cfp);
if (block_handler == VM_BLOCK_HANDLER_NONE) rb_raise(rb_eArgError, proc_without_block); if (block_handler == VM_BLOCK_HANDLER_NONE) rb_raise(rb_eArgError, proc_without_block);
switch (vm_block_handler_type(block_handler)) { switch (vm_block_handler_type(block_handler)) {

View File

@ -3765,7 +3765,7 @@ rb_f_exit_bang(int argc, VALUE *argv, VALUE obj)
void void
rb_exit(int status) rb_exit(int status)
{ {
if (GET_THREAD()->ec.tag) { if (GET_THREAD()->ec->tag) {
VALUE args[2]; VALUE args[2];
args[0] = INT2NUM(status); args[0] = INT2NUM(status);
@ -3851,7 +3851,7 @@ rb_f_abort(int argc, const VALUE *argv)
rb_check_arity(argc, 0, 1); rb_check_arity(argc, 0, 1);
if (argc == 0) { if (argc == 0) {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
VALUE errinfo = th->ec.errinfo; VALUE errinfo = th->ec->errinfo;
if (!NIL_P(errinfo)) { if (!NIL_P(errinfo)) {
rb_threadptr_error_print(th, errinfo); rb_threadptr_error_print(th, errinfo);
} }

12
safe.c
View File

@ -34,13 +34,13 @@ ruby_safe_level_2_warning(void)
int int
rb_safe_level(void) rb_safe_level(void)
{ {
return GET_THREAD()->ec.safe_level; return GET_THREAD()->ec->safe_level;
} }
void void
rb_set_safe_level_force(int safe) rb_set_safe_level_force(int safe)
{ {
GET_THREAD()->ec.safe_level = safe; GET_THREAD()->ec->safe_level = safe;
} }
void void
@ -48,11 +48,11 @@ rb_set_safe_level(int level)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
if (level > th->ec.safe_level) { if (level > th->ec->safe_level) {
if (level > SAFE_LEVEL_MAX) { if (level > SAFE_LEVEL_MAX) {
rb_raise(rb_eArgError, "$SAFE=2 to 4 are obsolete"); rb_raise(rb_eArgError, "$SAFE=2 to 4 are obsolete");
} }
th->ec.safe_level = level; th->ec->safe_level = level;
} }
} }
@ -66,7 +66,7 @@ static void
safe_setter(VALUE val) safe_setter(VALUE val)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
int current_level = th->ec.safe_level; int current_level = th->ec->safe_level;
int level = NUM2INT(val); int level = NUM2INT(val);
if (level == current_level) { if (level == current_level) {
@ -84,7 +84,7 @@ safe_setter(VALUE val)
/* block parameters */ /* block parameters */
rb_vm_stack_to_heap(th); rb_vm_stack_to_heap(th);
th->ec.safe_level = level; th->ec->safe_level = level;
} }
void void

View File

@ -838,13 +838,13 @@ check_stack_overflow(int sig, const uintptr_t addr, const ucontext_t *ctx)
* the fault page can be the next. */ * the fault page can be the next. */
if (sp_page == fault_page || sp_page == fault_page + 1 || if (sp_page == fault_page || sp_page == fault_page + 1 ||
sp_page <= fault_page && fault_page <= bp_page) { sp_page <= fault_page && fault_page <= bp_page) {
rb_thread_t *th = ruby_current_thread; rb_thread_t *th = ruby_current_thread();
int crit = FALSE; int crit = FALSE;
if ((uintptr_t)th->ec.tag->buf / pagesize <= fault_page + 1) { if ((uintptr_t)th->ec->tag->buf / pagesize <= fault_page + 1) {
/* drop the last tag if it is close to the fault, /* drop the last tag if it is close to the fault,
* otherwise it can cause stack overflow again at the same * otherwise it can cause stack overflow again at the same
* place. */ * place. */
th->ec.tag = th->ec.tag->prev; th->ec->tag = th->ec->tag->prev;
crit = TRUE; crit = TRUE;
} }
reset_sigmask(sig); reset_sigmask(sig);
@ -856,7 +856,7 @@ static void
check_stack_overflow(int sig, const void *addr) check_stack_overflow(int sig, const void *addr)
{ {
int ruby_stack_overflowed_p(const rb_thread_t *, const void *); int ruby_stack_overflowed_p(const rb_thread_t *, const void *);
rb_thread_t *th = ruby_current_thread; rb_thread_t *th = GET_THREAD();
if (ruby_stack_overflowed_p(th, addr)) { if (ruby_stack_overflowed_p(th, addr)) {
reset_sigmask(sig); reset_sigmask(sig);
rb_threadptr_stack_overflow(th, FALSE); rb_threadptr_stack_overflow(th, FALSE);

View File

@ -139,8 +139,8 @@ static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_regio
do { \ do { \
FLUSH_REGISTER_WINDOWS; \ FLUSH_REGISTER_WINDOWS; \
RB_GC_SAVE_MACHINE_REGISTER_STACK(th); \ RB_GC_SAVE_MACHINE_REGISTER_STACK(th); \
setjmp((th)->ec.machine.regs); \ setjmp((th)->ec->machine.regs); \
SET_MACHINE_STACK_END(&(th)->ec.machine.stack_end); \ SET_MACHINE_STACK_END(&(th)->ec->machine.stack_end); \
} while (0) } while (0)
#define GVL_UNLOCK_BEGIN() do { \ #define GVL_UNLOCK_BEGIN() do { \
@ -526,9 +526,9 @@ thread_cleanup_func_before_exec(void *th_ptr)
{ {
rb_thread_t *th = th_ptr; rb_thread_t *th = th_ptr;
th->status = THREAD_KILLED; th->status = THREAD_KILLED;
th->ec.machine.stack_start = th->ec.machine.stack_end = NULL; th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
#ifdef __ia64 #ifdef __ia64
th->ec.machine.register_stack_start = th->ec.machine.register_stack_end = NULL; th->ec->machine.register_stack_start = th->ec->machine.register_stack_end = NULL;
#endif #endif
} }
@ -581,9 +581,9 @@ thread_do_start(rb_thread_t *th, VALUE args)
if (!th->first_func) { if (!th->first_func) {
rb_proc_t *proc; rb_proc_t *proc;
GetProcPtr(th->first_proc, proc); GetProcPtr(th->first_proc, proc);
th->ec.errinfo = Qnil; th->ec->errinfo = Qnil;
th->ec.root_lep = rb_vm_proc_local_ep(th->first_proc); th->ec->root_lep = rb_vm_proc_local_ep(th->first_proc);
th->ec.root_svar = Qfalse; th->ec->root_svar = Qfalse;
EXEC_EVENT_HOOK(th, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef); EXEC_EVENT_HOOK(th, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
th->value = rb_vm_invoke_proc(th, proc, th->value = rb_vm_invoke_proc(th, proc,
(int)RARRAY_LEN(args), RARRAY_CONST_PTR(args), (int)RARRAY_LEN(args), RARRAY_CONST_PTR(args),
@ -614,9 +614,9 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_s
ruby_thread_set_native(th); ruby_thread_set_native(th);
th->ec.machine.stack_start = stack_start; th->ec->machine.stack_start = stack_start;
#ifdef __ia64 #ifdef __ia64
th->ec.machine.register_stack_start = register_stack_start; th->ec->machine.register_stack_start = register_stack_start;
#endif #endif
thread_debug("thread start: %p\n", (void *)th); thread_debug("thread start: %p\n", (void *)th);
@ -630,7 +630,7 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_s
SAVE_ROOT_JMPBUF(th, thread_do_start(th, args)); SAVE_ROOT_JMPBUF(th, thread_do_start(th, args));
} }
else { else {
errinfo = th->ec.errinfo; errinfo = th->ec->errinfo;
if (state == TAG_FATAL) { if (state == TAG_FATAL) {
/* fatal error within this thread, need to stop whole script */ /* fatal error within this thread, need to stop whole script */
} }
@ -696,8 +696,7 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_s
rb_threadptr_unlock_all_locking_mutexes(th); rb_threadptr_unlock_all_locking_mutexes(th);
rb_check_deadlock(th->vm); rb_check_deadlock(th->vm);
rb_thread_recycle_stack_release(th->ec.vm_stack); rb_fiber_close(th->ec->fiber);
th->ec.vm_stack = NULL;
} }
native_mutex_lock(&th->vm->thread_destruct_lock); native_mutex_lock(&th->vm->thread_destruct_lock);
/* make sure vm->running_thread never point me after this point.*/ /* make sure vm->running_thread never point me after this point.*/
@ -923,8 +922,8 @@ thread_join(rb_thread_t *target_th, double delay)
thread_debug("thread_join: success (thid: %"PRI_THREAD_ID")\n", thread_debug("thread_join: success (thid: %"PRI_THREAD_ID")\n",
thread_id_str(target_th)); thread_id_str(target_th));
if (target_th->ec.errinfo != Qnil) { if (target_th->ec->errinfo != Qnil) {
VALUE err = target_th->ec.errinfo; VALUE err = target_th->ec->errinfo;
if (FIXNUM_P(err)) { if (FIXNUM_P(err)) {
switch (err) { switch (err) {
@ -935,7 +934,7 @@ thread_join(rb_thread_t *target_th, double delay)
rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err)); rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
} }
} }
else if (THROW_DATA_P(target_th->ec.errinfo)) { else if (THROW_DATA_P(target_th->ec->errinfo)) {
rb_bug("thread_join: THROW_DATA should not reach here."); rb_bug("thread_join: THROW_DATA should not reach here.");
} }
else { else {
@ -1437,7 +1436,7 @@ rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
{ {
volatile VALUE val = Qundef; /* shouldn't be used */ volatile VALUE val = Qundef; /* shouldn't be used */
rb_vm_t *vm = GET_VM(); rb_vm_t *vm = GET_VM();
rb_thread_t *th = GET_THREAD(); rb_thread_t *volatile th = GET_THREAD();
volatile int saved_errno = 0; volatile int saved_errno = 0;
enum ruby_tag_type state; enum ruby_tag_type state;
struct waiting_fd wfd; struct waiting_fd wfd;
@ -1858,7 +1857,7 @@ static VALUE
rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg) rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
{ {
VALUE mask; VALUE mask;
rb_thread_t *th = GET_THREAD(); rb_thread_t * volatile th = GET_THREAD();
volatile VALUE r = Qnil; volatile VALUE r = Qnil;
enum ruby_tag_type state; enum ruby_tag_type state;
@ -2008,7 +2007,7 @@ rb_threadptr_to_kill(rb_thread_t *th)
rb_threadptr_pending_interrupt_clear(th); rb_threadptr_pending_interrupt_clear(th);
th->status = THREAD_RUNNABLE; th->status = THREAD_RUNNABLE;
th->to_kill = 1; th->to_kill = 1;
th->ec.errinfo = INT2FIX(TAG_FATAL); th->ec->errinfo = INT2FIX(TAG_FATAL);
TH_JUMP_TAG(th, TAG_FATAL); TH_JUMP_TAG(th, TAG_FATAL);
} }
@ -2031,7 +2030,7 @@ rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
rb_atomic_t interrupt; rb_atomic_t interrupt;
int postponed_job_interrupt = 0; int postponed_job_interrupt = 0;
if (th->ec.raised_flag) return; if (th->ec->raised_flag) return;
while ((interrupt = threadptr_get_interrupts(th)) != 0) { while ((interrupt = threadptr_get_interrupts(th)) != 0) {
int sig; int sig;
@ -2095,7 +2094,7 @@ rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
if (th->status == THREAD_RUNNABLE) if (th->status == THREAD_RUNNABLE)
th->running_time_us += TIME_QUANTUM_USEC; th->running_time_us += TIME_QUANTUM_USEC;
EXEC_EVENT_HOOK(th, RUBY_INTERNAL_EVENT_SWITCH, th->ec.cfp->self, EXEC_EVENT_HOOK(th, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
0, 0, 0, Qundef); 0, 0, 0, Qundef);
rb_thread_schedule_limits(limits_us); rb_thread_schedule_limits(limits_us);
@ -2172,20 +2171,20 @@ rb_threadptr_signal_exit(rb_thread_t *th)
int int
rb_threadptr_set_raised(rb_thread_t *th) rb_threadptr_set_raised(rb_thread_t *th)
{ {
if (th->ec.raised_flag & RAISED_EXCEPTION) { if (th->ec->raised_flag & RAISED_EXCEPTION) {
return 1; return 1;
} }
th->ec.raised_flag |= RAISED_EXCEPTION; th->ec->raised_flag |= RAISED_EXCEPTION;
return 0; return 0;
} }
int int
rb_threadptr_reset_raised(rb_thread_t *th) rb_threadptr_reset_raised(rb_thread_t *th)
{ {
if (!(th->ec.raised_flag & RAISED_EXCEPTION)) { if (!(th->ec->raised_flag & RAISED_EXCEPTION)) {
return 0; return 0;
} }
th->ec.raised_flag &= ~RAISED_EXCEPTION; th->ec->raised_flag &= ~RAISED_EXCEPTION;
return 1; return 1;
} }
@ -2822,8 +2821,8 @@ rb_thread_status(VALUE thread)
rb_thread_t *target_th = rb_thread_ptr(thread); rb_thread_t *target_th = rb_thread_ptr(thread);
if (rb_threadptr_dead(target_th)) { if (rb_threadptr_dead(target_th)) {
if (!NIL_P(target_th->ec.errinfo) && if (!NIL_P(target_th->ec->errinfo) &&
!FIXNUM_P(target_th->ec.errinfo)) { !FIXNUM_P(target_th->ec->errinfo)) {
return Qnil; return Qnil;
} }
else { else {
@ -2907,7 +2906,7 @@ rb_thread_stop_p(VALUE thread)
static VALUE static VALUE
rb_thread_safe_level(VALUE thread) rb_thread_safe_level(VALUE thread)
{ {
return INT2NUM(rb_thread_ptr(thread)->ec.safe_level); return INT2NUM(rb_thread_ptr(thread)->ec->safe_level);
} }
/* /*
@ -2994,11 +2993,11 @@ static VALUE
threadptr_local_aref(rb_thread_t *th, ID id) threadptr_local_aref(rb_thread_t *th, ID id)
{ {
if (id == recursive_key) { if (id == recursive_key) {
return th->ec.local_storage_recursive_hash; return th->ec->local_storage_recursive_hash;
} }
else { else {
st_data_t val; st_data_t val;
st_table *local_storage = th->ec.local_storage; st_table *local_storage = th->ec->local_storage;
if (local_storage != NULL && st_lookup(local_storage, id, &val)) { if (local_storage != NULL && st_lookup(local_storage, id, &val)) {
return (VALUE)val; return (VALUE)val;
@ -3102,10 +3101,10 @@ rb_thread_fetch(int argc, VALUE *argv, VALUE self)
id = rb_check_id(&key); id = rb_check_id(&key);
if (id == recursive_key) { if (id == recursive_key) {
return target_th->ec.local_storage_recursive_hash; return target_th->ec->local_storage_recursive_hash;
} }
else if (id && target_th->ec.local_storage && else if (id && target_th->ec->local_storage &&
st_lookup(target_th->ec.local_storage, id, &val)) { st_lookup(target_th->ec->local_storage, id, &val)) {
return val; return val;
} }
else if (block_given) { else if (block_given) {
@ -3123,11 +3122,11 @@ static VALUE
threadptr_local_aset(rb_thread_t *th, ID id, VALUE val) threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
{ {
if (id == recursive_key) { if (id == recursive_key) {
th->ec.local_storage_recursive_hash = val; th->ec->local_storage_recursive_hash = val;
return val; return val;
} }
else { else {
st_table *local_storage = th->ec.local_storage; st_table *local_storage = th->ec->local_storage;
if (NIL_P(val)) { if (NIL_P(val)) {
if (!local_storage) return Qnil; if (!local_storage) return Qnil;
@ -3136,7 +3135,7 @@ threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
} }
else { else {
if (local_storage == NULL) { if (local_storage == NULL) {
th->ec.local_storage = local_storage = st_init_numtable(); th->ec->local_storage = local_storage = st_init_numtable();
} }
st_insert(local_storage, id, val); st_insert(local_storage, id, val);
return val; return val;
@ -3249,7 +3248,7 @@ static VALUE
rb_thread_key_p(VALUE self, VALUE key) rb_thread_key_p(VALUE self, VALUE key)
{ {
ID id = rb_check_id(&key); ID id = rb_check_id(&key);
st_table *local_storage = rb_thread_ptr(self)->ec.local_storage; st_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
if (!id || local_storage == NULL) { if (!id || local_storage == NULL) {
return Qfalse; return Qfalse;
@ -3292,7 +3291,7 @@ rb_thread_alone(void)
static VALUE static VALUE
rb_thread_keys(VALUE self) rb_thread_keys(VALUE self)
{ {
st_table *local_storage = rb_thread_ptr(self)->ec.local_storage; st_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
VALUE ary = rb_ary_new(); VALUE ary = rb_ary_new();
if (local_storage) { if (local_storage) {
@ -4481,13 +4480,13 @@ rb_thread_shield_destroy(VALUE self)
static VALUE static VALUE
threadptr_recursive_hash(rb_thread_t *th) threadptr_recursive_hash(rb_thread_t *th)
{ {
return th->ec.local_storage_recursive_hash; return th->ec->local_storage_recursive_hash;
} }
static void static void
threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash) threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
{ {
th->ec.local_storage_recursive_hash = hash; th->ec->local_storage_recursive_hash = hash;
} }
ID rb_frame_last_func(void); ID rb_frame_last_func(void);
@ -4982,7 +4981,7 @@ rb_check_deadlock(rb_vm_t *vm)
static void static void
update_coverage(VALUE data, const rb_trace_arg_t *trace_arg) update_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
{ {
VALUE coverage = rb_iseq_coverage(GET_THREAD()->ec.cfp->iseq); VALUE coverage = rb_iseq_coverage(GET_THREAD()->ec->cfp->iseq);
if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) { if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
long arg = FIX2INT(trace_arg->data); long arg = FIX2INT(trace_arg->data);
switch (arg % 16) { switch (arg % 16) {

View File

@ -446,10 +446,8 @@ ruby_thread_set_native(rb_thread_t *th)
static void native_thread_init(rb_thread_t *th); static void native_thread_init(rb_thread_t *th);
void void
Init_native_thread(void) Init_native_thread(rb_thread_t *th)
{ {
rb_thread_t *th = GET_THREAD();
pthread_key_create(&ruby_native_thread_key, NULL); pthread_key_create(&ruby_native_thread_key, NULL);
th->thread_id = pthread_self(); th->thread_id = pthread_self();
fill_thread_id_str(th); fill_thread_id_str(th);
@ -827,8 +825,8 @@ native_thread_init_stack(rb_thread_t *th)
rb_nativethread_id_t curr = pthread_self(); rb_nativethread_id_t curr = pthread_self();
if (pthread_equal(curr, native_main_thread.id)) { if (pthread_equal(curr, native_main_thread.id)) {
th->ec.machine.stack_start = native_main_thread.stack_start; th->ec->machine.stack_start = native_main_thread.stack_start;
th->ec.machine.stack_maxsize = native_main_thread.stack_maxsize; th->ec->machine.stack_maxsize = native_main_thread.stack_maxsize;
} }
else { else {
#ifdef STACKADDR_AVAILABLE #ifdef STACKADDR_AVAILABLE
@ -837,11 +835,11 @@ native_thread_init_stack(rb_thread_t *th)
if (get_stack(&start, &size) == 0) { if (get_stack(&start, &size) == 0) {
uintptr_t diff = (uintptr_t)start - (uintptr_t)&curr; uintptr_t diff = (uintptr_t)start - (uintptr_t)&curr;
th->ec.machine.stack_start = (VALUE *)&curr; th->ec->machine.stack_start = (VALUE *)&curr;
th->ec.machine.stack_maxsize = size - diff; th->ec->machine.stack_maxsize = size - diff;
} }
#elif defined get_stack_of #elif defined get_stack_of
if (!th->ec.machine.stack_maxsize) { if (!th->ec->machine.stack_maxsize) {
native_mutex_lock(&th->interrupt_lock); native_mutex_lock(&th->interrupt_lock);
native_mutex_unlock(&th->interrupt_lock); native_mutex_unlock(&th->interrupt_lock);
} }
@ -850,9 +848,9 @@ native_thread_init_stack(rb_thread_t *th)
#endif #endif
} }
#ifdef __ia64 #ifdef __ia64
th->ec.machine.register_stack_start = native_main_thread.register_stack_start; th->ec->machine.register_stack_start = native_main_thread.register_stack_start;
th->ec.machine.stack_maxsize /= 2; th->ec->machine.stack_maxsize /= 2;
th->ec.machine.register_stack_maxsize = th->ec.machine.stack_maxsize; th->ec->machine.register_stack_maxsize = th->ec->machine.stack_maxsize;
#endif #endif
return 0; return 0;
} }
@ -880,7 +878,7 @@ thread_start_func_1(void *th_ptr)
native_thread_init(th); native_thread_init(th);
/* run */ /* run */
#if defined USE_NATIVE_THREAD_INIT #if defined USE_NATIVE_THREAD_INIT
thread_start_func_2(th, th->ec.machine.stack_start, rb_ia64_bsp()); thread_start_func_2(th, th->ec->machine.stack_start, rb_ia64_bsp());
#else #else
thread_start_func_2(th, &stack_start, rb_ia64_bsp()); thread_start_func_2(th, &stack_start, rb_ia64_bsp());
#endif #endif
@ -1002,10 +1000,10 @@ native_thread_create(rb_thread_t *th)
const size_t stack_size = th->vm->default_params.thread_machine_stack_size; const size_t stack_size = th->vm->default_params.thread_machine_stack_size;
const size_t space = space_size(stack_size); const size_t space = space_size(stack_size);
th->ec.machine.stack_maxsize = stack_size - space; th->ec->machine.stack_maxsize = stack_size - space;
#ifdef __ia64 #ifdef __ia64
th->ec.machine.stack_maxsize /= 2; th->ec->machine.stack_maxsize /= 2;
th->ec.machine.register_stack_maxsize = th->ec.machine.stack_maxsize; th->ec->machine.register_stack_maxsize = th->ec->machine.stack_maxsize;
#endif #endif
#ifdef HAVE_PTHREAD_ATTR_INIT #ifdef HAVE_PTHREAD_ATTR_INIT
@ -1028,8 +1026,8 @@ native_thread_create(rb_thread_t *th)
#ifdef get_stack_of #ifdef get_stack_of
if (!err) { if (!err) {
get_stack_of(th->thread_id, get_stack_of(th->thread_id,
&th->ec.machine.stack_start, &th->ec->machine.stack_start,
&th->ec.machine.stack_maxsize); &th->ec->machine.stack_maxsize);
} }
native_mutex_unlock(&th->interrupt_lock); native_mutex_unlock(&th->interrupt_lock);
#endif #endif
@ -1745,8 +1743,8 @@ ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
else else
#endif #endif
if (th) { if (th) {
size = th->ec.machine.stack_maxsize; size = th->ec->machine.stack_maxsize;
base = (char *)th->ec.machine.stack_start - STACK_DIR_UPPER(0, size); base = (char *)th->ec->machine.stack_start - STACK_DIR_UPPER(0, size);
} }
else { else {
return 0; return 0;

View File

@ -140,10 +140,8 @@ ruby_thread_set_native(rb_thread_t *th)
} }
void void
Init_native_thread(void) Init_native_thread(rb_thread_t *th)
{ {
rb_thread_t *th = GET_THREAD();
ruby_native_thread_key = TlsAlloc(); ruby_native_thread_key = TlsAlloc();
ruby_thread_set_native(th); ruby_thread_set_native(th);
DuplicateHandle(GetCurrentProcess(), DuplicateHandle(GetCurrentProcess(),
@ -546,8 +544,8 @@ native_thread_init_stack(rb_thread_t *th)
size = end - base; size = end - base;
space = size / 5; space = size / 5;
if (space > 1024*1024) space = 1024*1024; if (space > 1024*1024) space = 1024*1024;
th->ec.machine.stack_start = (VALUE *)end - 1; th->ec->machine.stack_start = (VALUE *)end - 1;
th->ec.machine.stack_maxsize = size - space; th->ec->machine.stack_maxsize = size - space;
} }
#ifndef InterlockedExchangePointer #ifndef InterlockedExchangePointer
@ -575,7 +573,7 @@ thread_start_func_1(void *th_ptr)
thread_debug("thread created (th: %p, thid: %p, event: %p)\n", th, thread_debug("thread created (th: %p, thid: %p, event: %p)\n", th,
th->thread_id, th->native_thread_data.interrupt_event); th->thread_id, th->native_thread_data.interrupt_event);
thread_start_func_2(th, th->ec.machine.stack_start, rb_ia64_bsp()); thread_start_func_2(th, th->ec->machine.stack_start, rb_ia64_bsp());
w32_close_handle(thread_id); w32_close_handle(thread_id);
thread_debug("thread deleted (th: %p)\n", th); thread_debug("thread deleted (th: %p)\n", th);

256
vm.c
View File

@ -88,8 +88,8 @@ rb_vm_frame_block_handler(const rb_control_frame_t *cfp)
static int static int
VM_CFP_IN_HEAP_P(const rb_thread_t *th, const rb_control_frame_t *cfp) VM_CFP_IN_HEAP_P(const rb_thread_t *th, const rb_control_frame_t *cfp)
{ {
const VALUE *start = th->ec.vm_stack; const VALUE *start = th->ec->vm_stack;
const VALUE *end = (VALUE *)th->ec.vm_stack + th->ec.vm_stack_size; const VALUE *end = (VALUE *)th->ec->vm_stack + th->ec->vm_stack_size;
VM_ASSERT(start != NULL); VM_ASSERT(start != NULL);
if (start <= (VALUE *)cfp && (VALUE *)cfp < end) { if (start <= (VALUE *)cfp && (VALUE *)cfp < end) {
@ -138,7 +138,9 @@ vm_ep_in_heap_p_(const rb_execution_context_t *ec, const VALUE *ep)
int int
rb_vm_ep_in_heap_p(const VALUE *ep) rb_vm_ep_in_heap_p(const VALUE *ep)
{ {
return vm_ep_in_heap_p_(&GET_THREAD()->ec, ep); rb_thread_t *th = GET_THREAD();
if (th->ec->vm_stack == NULL) return TRUE;
return vm_ep_in_heap_p_(th->ec, ep);
} }
#endif #endif
@ -317,8 +319,8 @@ VALUE rb_mRubyVMFrozenCore;
#define ruby_vm_redefined_flag GET_VM()->redefined_flag #define ruby_vm_redefined_flag GET_VM()->redefined_flag
VALUE ruby_vm_const_missing_count = 0; VALUE ruby_vm_const_missing_count = 0;
rb_thread_t *ruby_current_thread = 0; rb_vm_t *ruby_current_vm_ptr = NULL;
rb_vm_t *ruby_current_vm = 0; rb_execution_context_t *ruby_current_execution_context_ptr = NULL;
rb_event_flag_t ruby_vm_event_flags; rb_event_flag_t ruby_vm_event_flags;
rb_serial_t ruby_vm_global_method_state = 1; rb_serial_t ruby_vm_global_method_state = 1;
rb_serial_t ruby_vm_global_constant_state = 1; rb_serial_t ruby_vm_global_constant_state = 1;
@ -450,7 +452,7 @@ vm_set_top_stack(rb_thread_t *th, const rb_iseq_t *iseq)
vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, th->top_self, vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, th->top_self,
VM_BLOCK_HANDLER_NONE, VM_BLOCK_HANDLER_NONE,
(VALUE)vm_cref_new_toplevel(th), /* cref or me */ (VALUE)vm_cref_new_toplevel(th), /* cref or me */
iseq->body->iseq_encoded, th->ec.cfp->sp, iseq->body->iseq_encoded, th->ec->cfp->sp,
iseq->body->local_table_size, iseq->body->stack_max); iseq->body->local_table_size, iseq->body->stack_max);
} }
@ -461,7 +463,7 @@ vm_set_eval_stack(rb_thread_t * th, const rb_iseq_t *iseq, const rb_cref_t *cref
vm_block_self(base_block), VM_GUARDED_PREV_EP(vm_block_ep(base_block)), vm_block_self(base_block), VM_GUARDED_PREV_EP(vm_block_ep(base_block)),
(VALUE)cref, /* cref or me */ (VALUE)cref, /* cref or me */
iseq->body->iseq_encoded, iseq->body->iseq_encoded,
th->ec.cfp->sp, iseq->body->local_table_size, th->ec->cfp->sp, iseq->body->local_table_size,
iseq->body->stack_max); iseq->body->stack_max);
} }
@ -478,7 +480,7 @@ vm_set_main_stack(rb_thread_t *th, const rb_iseq_t *iseq)
/* save binding */ /* save binding */
if (iseq->body->local_table_size > 0) { if (iseq->body->local_table_size > 0) {
vm_bind_update_env(toplevel_binding, bind, vm_make_env_object(th, th->ec.cfp)); vm_bind_update_env(toplevel_binding, bind, vm_make_env_object(th, th->ec->cfp));
} }
} }
@ -532,7 +534,7 @@ void
rb_vm_pop_cfunc_frame(void) rb_vm_pop_cfunc_frame(void)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = th->ec.cfp; rb_control_frame_t *cfp = th->ec->cfp;
const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp); const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, cfp->self, me->def->original_id, me->called_id, me->owner, Qnil); EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, cfp->self, me->def->original_id, me->called_id, me->owner, Qnil);
@ -544,11 +546,11 @@ void
rb_vm_rewind_cfp(rb_thread_t *th, rb_control_frame_t *cfp) rb_vm_rewind_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
{ {
/* check skipped frame */ /* check skipped frame */
while (th->ec.cfp != cfp) { while (th->ec->cfp != cfp) {
#if VMDEBUG #if VMDEBUG
printf("skipped frame: %s\n", vm_frametype_name(th->ec.cfp)); printf("skipped frame: %s\n", vm_frametype_name(th->ec->cfp));
#endif #endif
if (VM_FRAME_TYPE(th->ec.cfp) != VM_FRAME_MAGIC_CFUNC) { if (VM_FRAME_TYPE(th->ec->cfp) != VM_FRAME_MAGIC_CFUNC) {
rb_vm_pop_frame(th); rb_vm_pop_frame(th);
} }
else { /* unlikely path */ else { /* unlikely path */
@ -730,7 +732,7 @@ vm_make_env_object(rb_thread_t *th, rb_control_frame_t *cfp)
void void
rb_vm_stack_to_heap(rb_thread_t *th) rb_vm_stack_to_heap(rb_thread_t *th)
{ {
rb_control_frame_t *cfp = th->ec.cfp; rb_control_frame_t *cfp = th->ec->cfp;
while ((cfp = rb_vm_get_binding_creatable_next_cfp(th, cfp)) != 0) { while ((cfp = rb_vm_get_binding_creatable_next_cfp(th, cfp)) != 0) {
vm_make_env_object(th, cfp); vm_make_env_object(th, cfp);
cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
@ -812,7 +814,7 @@ rb_proc_create_from_captured(VALUE klass,
VALUE procval = rb_proc_alloc(klass); VALUE procval = rb_proc_alloc(klass);
rb_proc_t *proc = RTYPEDDATA_DATA(procval); rb_proc_t *proc = RTYPEDDATA_DATA(procval);
VM_ASSERT(VM_EP_IN_HEAP_P(&GET_THREAD()->ec, captured->ep)); VM_ASSERT(VM_EP_IN_HEAP_P(GET_THREAD()->ec, captured->ep));
/* copy block */ /* copy block */
RB_OBJ_WRITE(procval, &proc->block.as.captured.self, captured->self); RB_OBJ_WRITE(procval, &proc->block.as.captured.self, captured->self);
@ -854,7 +856,7 @@ rb_proc_create(VALUE klass, const struct rb_block *block,
VALUE procval = rb_proc_alloc(klass); VALUE procval = rb_proc_alloc(klass);
rb_proc_t *proc = RTYPEDDATA_DATA(procval); rb_proc_t *proc = RTYPEDDATA_DATA(procval);
VM_ASSERT(VM_EP_IN_HEAP_P(&GET_THREAD()->ec, vm_block_ep(block))); VM_ASSERT(VM_EP_IN_HEAP_P(GET_THREAD()->ec, vm_block_ep(block)));
rb_vm_block_copy(procval, &proc->block, block); rb_vm_block_copy(procval, &proc->block, block);
vm_block_type_set(&proc->block, block->type); vm_block_type_set(&proc->block, block->type);
proc->safe_level = safe_level; proc->safe_level = safe_level;
@ -879,13 +881,13 @@ rb_vm_make_proc_lambda(rb_thread_t *th, const struct rb_captured_block *captured
rb_control_frame_t *cfp = VM_CAPTURED_BLOCK_TO_CFP(captured); rb_control_frame_t *cfp = VM_CAPTURED_BLOCK_TO_CFP(captured);
vm_make_env_object(th, cfp); vm_make_env_object(th, cfp);
} }
VM_ASSERT(VM_EP_IN_HEAP_P(&th->ec, captured->ep)); VM_ASSERT(VM_EP_IN_HEAP_P(th->ec, captured->ep));
VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq) || VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq) ||
imemo_type_p(captured->code.val, imemo_ifunc)); imemo_type_p(captured->code.val, imemo_ifunc));
procval = rb_proc_create_from_captured(klass, captured, procval = rb_proc_create_from_captured(klass, captured,
imemo_type(captured->code.val) == imemo_iseq ? block_type_iseq : block_type_ifunc, imemo_type(captured->code.val) == imemo_iseq ? block_type_iseq : block_type_ifunc,
(int8_t)th->ec.safe_level, FALSE, is_lambda); (int8_t)th->ec->safe_level, FALSE, is_lambda);
return procval; return procval;
} }
@ -959,7 +961,7 @@ rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const I
ALLOCV_END(idtmp); ALLOCV_END(idtmp);
vm_set_eval_stack(th, iseq, 0, base_block); vm_set_eval_stack(th, iseq, 0, base_block);
vm_bind_update_env(bindval, bind, envval = vm_make_env_object(th, th->ec.cfp)); vm_bind_update_env(bindval, bind, envval = vm_make_env_object(th, th->ec->cfp));
rb_vm_pop_frame(th); rb_vm_pop_frame(th);
env = (const rb_env_t *)envval; env = (const rb_env_t *)envval;
@ -977,7 +979,7 @@ invoke_block(rb_thread_t *th, const rb_iseq_t *iseq, VALUE self, const struct rb
VM_GUARDED_PREV_EP(captured->ep), VM_GUARDED_PREV_EP(captured->ep),
(VALUE)cref, /* cref or method */ (VALUE)cref, /* cref or method */
iseq->body->iseq_encoded + opt_pc, iseq->body->iseq_encoded + opt_pc,
th->ec.cfp->sp + arg_size, th->ec->cfp->sp + arg_size,
iseq->body->local_table_size - arg_size, iseq->body->local_table_size - arg_size,
iseq->body->stack_max); iseq->body->stack_max);
return vm_exec(th); return vm_exec(th);
@ -994,13 +996,13 @@ invoke_bmethod(rb_thread_t *th, const rb_iseq_t *iseq, VALUE self, const struct
VM_GUARDED_PREV_EP(captured->ep), VM_GUARDED_PREV_EP(captured->ep),
(VALUE)me, (VALUE)me,
iseq->body->iseq_encoded + opt_pc, iseq->body->iseq_encoded + opt_pc,
th->ec.cfp->sp + arg_size, th->ec->cfp->sp + arg_size,
iseq->body->local_table_size - arg_size, iseq->body->local_table_size - arg_size,
iseq->body->stack_max); iseq->body->stack_max);
RUBY_DTRACE_METHOD_ENTRY_HOOK(th, me->owner, me->def->original_id); RUBY_DTRACE_METHOD_ENTRY_HOOK(th, me->owner, me->def->original_id);
EXEC_EVENT_HOOK(th, RUBY_EVENT_CALL, self, me->def->original_id, me->called_id, me->owner, Qnil); EXEC_EVENT_HOOK(th, RUBY_EVENT_CALL, self, me->def->original_id, me->called_id, me->owner, Qnil);
VM_ENV_FLAGS_SET(th->ec.cfp->ep, VM_FRAME_FLAG_FINISH); VM_ENV_FLAGS_SET(th->ec->cfp->ep, VM_FRAME_FLAG_FINISH);
ret = vm_exec(th); ret = vm_exec(th);
EXEC_EVENT_HOOK(th, RUBY_EVENT_RETURN, self, me->def->original_id, me->called_id, me->owner, ret); EXEC_EVENT_HOOK(th, RUBY_EVENT_RETURN, self, me->def->original_id, me->called_id, me->owner, ret);
RUBY_DTRACE_METHOD_RETURN_HOOK(th, me->owner, me->def->original_id); RUBY_DTRACE_METHOD_RETURN_HOOK(th, me->owner, me->def->original_id);
@ -1015,7 +1017,7 @@ invoke_iseq_block_from_c(rb_thread_t *th, const struct rb_captured_block *captur
const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq); const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
int i, opt_pc; int i, opt_pc;
VALUE type = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0); VALUE type = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
rb_control_frame_t *cfp = th->ec.cfp; rb_control_frame_t *cfp = th->ec->cfp;
VALUE *sp = cfp->sp; VALUE *sp = cfp->sp;
const rb_callable_method_entry_t *me = th->passed_bmethod_me; const rb_callable_method_entry_t *me = th->passed_bmethod_me;
th->passed_bmethod_me = NULL; th->passed_bmethod_me = NULL;
@ -1075,7 +1077,7 @@ invoke_block_from_c_bh(rb_thread_t *th, VALUE block_handler,
static inline VALUE static inline VALUE
check_block_handler(rb_thread_t *th) check_block_handler(rb_thread_t *th)
{ {
VALUE block_handler = VM_CF_BLOCK_HANDLER(th->ec.cfp); VALUE block_handler = VM_CF_BLOCK_HANDLER(th->ec->cfp);
vm_block_handler_verify(block_handler); vm_block_handler_verify(block_handler);
if (UNLIKELY(block_handler == VM_BLOCK_HANDLER_NONE)) { if (UNLIKELY(block_handler == VM_BLOCK_HANDLER_NONE)) {
rb_vm_localjump_error("no block given", Qnil, 0); rb_vm_localjump_error("no block given", Qnil, 0);
@ -1145,16 +1147,16 @@ vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self,
{ {
VALUE val = Qundef; VALUE val = Qundef;
enum ruby_tag_type state; enum ruby_tag_type state;
volatile int stored_safe = th->ec.safe_level; volatile int stored_safe = th->ec->safe_level;
TH_PUSH_TAG(th); TH_PUSH_TAG(th);
if ((state = EXEC_TAG()) == TAG_NONE) { if ((state = EXEC_TAG()) == TAG_NONE) {
th->ec.safe_level = proc->safe_level; th->ec->safe_level = proc->safe_level;
val = invoke_block_from_c_proc(th, proc, self, argc, argv, passed_block_handler, proc->is_lambda); val = invoke_block_from_c_proc(th, proc, self, argc, argv, passed_block_handler, proc->is_lambda);
} }
TH_POP_TAG(); TH_POP_TAG();
th->ec.safe_level = stored_safe; th->ec->safe_level = stored_safe;
if (state) { if (state) {
TH_JUMP_TAG(th, state); TH_JUMP_TAG(th, state);
@ -1216,14 +1218,14 @@ static VALUE
vm_svar_get(VALUE key) vm_svar_get(VALUE key)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
return vm_cfp_svar_get(th, th->ec.cfp, key); return vm_cfp_svar_get(th, th->ec->cfp, key);
} }
static void static void
vm_svar_set(VALUE key, VALUE val) vm_svar_set(VALUE key, VALUE val)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
vm_cfp_svar_set(th, th->ec.cfp, key, val); vm_cfp_svar_set(th, th->ec->cfp, key, val);
} }
VALUE VALUE
@ -1256,7 +1258,7 @@ VALUE
rb_sourcefilename(void) rb_sourcefilename(void)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec->cfp);
if (cfp) { if (cfp) {
return rb_iseq_path(cfp->iseq); return rb_iseq_path(cfp->iseq);
@ -1270,7 +1272,7 @@ const char *
rb_sourcefile(void) rb_sourcefile(void)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec->cfp);
if (cfp) { if (cfp) {
return RSTRING_PTR(rb_iseq_path(cfp->iseq)); return RSTRING_PTR(rb_iseq_path(cfp->iseq));
@ -1284,7 +1286,7 @@ int
rb_sourceline(void) rb_sourceline(void)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec->cfp);
if (cfp) { if (cfp) {
return rb_vm_get_sourceline(cfp); return rb_vm_get_sourceline(cfp);
@ -1298,7 +1300,7 @@ VALUE
rb_source_location(int *pline) rb_source_location(int *pline)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec->cfp);
if (cfp) { if (cfp) {
if (pline) *pline = rb_vm_get_sourceline(cfp); if (pline) *pline = rb_vm_get_sourceline(cfp);
@ -1322,7 +1324,7 @@ rb_cref_t *
rb_vm_cref(void) rb_vm_cref(void)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec->cfp);
if (cfp == NULL) { if (cfp == NULL) {
return NULL; return NULL;
@ -1335,7 +1337,7 @@ rb_cref_t *
rb_vm_cref_replace_with_duplicated_cref(void) rb_vm_cref_replace_with_duplicated_cref(void)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec->cfp);
rb_cref_t *cref = vm_cref_replace_with_duplicated_cref(cfp->ep); rb_cref_t *cref = vm_cref_replace_with_duplicated_cref(cfp->ep);
return cref; return cref;
} }
@ -1344,7 +1346,7 @@ const rb_cref_t *
rb_vm_cref_in_context(VALUE self, VALUE cbase) rb_vm_cref_in_context(VALUE self, VALUE cbase)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec->cfp);
const rb_cref_t *cref; const rb_cref_t *cref;
if (cfp->self != self) return NULL; if (cfp->self != self) return NULL;
if (!vm_env_cref_by_cref(cfp->ep)) return NULL; if (!vm_env_cref_by_cref(cfp->ep)) return NULL;
@ -1369,7 +1371,7 @@ VALUE
rb_vm_cbase(void) rb_vm_cbase(void)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec->cfp);
if (cfp == 0) { if (cfp == 0) {
rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread"); rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
@ -1445,7 +1447,7 @@ rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
return Qnil; return Qnil;
} }
if (val == Qundef) { if (val == Qundef) {
val = GET_THREAD()->ec.tag->retval; val = GET_THREAD()->ec->tag->retval;
} }
return make_localjump_error(mesg, val, state); return make_localjump_error(mesg, val, state);
} }
@ -1474,7 +1476,7 @@ next_not_local_frame(rb_control_frame_t *cfp)
static void static void
vm_iter_break(rb_thread_t *th, VALUE val) vm_iter_break(rb_thread_t *th, VALUE val)
{ {
rb_control_frame_t *cfp = next_not_local_frame(th->ec.cfp); rb_control_frame_t *cfp = next_not_local_frame(th->ec->cfp);
const VALUE *ep = VM_CF_PREV_EP(cfp); const VALUE *ep = VM_CF_PREV_EP(cfp);
const rb_control_frame_t *target_cfp = rb_vm_search_cf_from_ep(th, cfp, ep); const rb_control_frame_t *target_cfp = rb_vm_search_cf_from_ep(th, cfp, ep);
@ -1484,7 +1486,7 @@ vm_iter_break(rb_thread_t *th, VALUE val)
} }
#endif #endif
th->ec.errinfo = (VALUE)THROW_DATA_NEW(val, target_cfp, TAG_BREAK); th->ec->errinfo = (VALUE)THROW_DATA_NEW(val, target_cfp, TAG_BREAK);
TH_JUMP_TAG(th, TAG_BREAK); TH_JUMP_TAG(th, TAG_BREAK);
} }
@ -1670,33 +1672,33 @@ hook_before_rewind(rb_thread_t *th, const rb_control_frame_t *cfp, int will_fini
if (state == TAG_RAISE && RBASIC_CLASS(err) == rb_eSysStackError) { if (state == TAG_RAISE && RBASIC_CLASS(err) == rb_eSysStackError) {
return; return;
} }
switch (VM_FRAME_TYPE(th->ec.cfp)) { switch (VM_FRAME_TYPE(th->ec->cfp)) {
case VM_FRAME_MAGIC_METHOD: case VM_FRAME_MAGIC_METHOD:
RUBY_DTRACE_METHOD_RETURN_HOOK(th, 0, 0); RUBY_DTRACE_METHOD_RETURN_HOOK(th, 0, 0);
EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_RETURN, th->ec.cfp->self, 0, 0, 0, frame_return_value(err)); EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_RETURN, th->ec->cfp->self, 0, 0, 0, frame_return_value(err));
THROW_DATA_CONSUMED_SET(err); THROW_DATA_CONSUMED_SET(err);
break; break;
case VM_FRAME_MAGIC_BLOCK: case VM_FRAME_MAGIC_BLOCK:
if (VM_FRAME_BMETHOD_P(th->ec.cfp)) { if (VM_FRAME_BMETHOD_P(th->ec->cfp)) {
EXEC_EVENT_HOOK(th, RUBY_EVENT_B_RETURN, th->ec.cfp->self, 0, 0, 0, frame_return_value(err)); EXEC_EVENT_HOOK(th, RUBY_EVENT_B_RETURN, th->ec->cfp->self, 0, 0, 0, frame_return_value(err));
if (!will_finish_vm_exec) { if (!will_finish_vm_exec) {
/* kick RUBY_EVENT_RETURN at invoke_block_from_c() for bmethod */ /* kick RUBY_EVENT_RETURN at invoke_block_from_c() for bmethod */
EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_RETURN, th->ec.cfp->self, EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_RETURN, th->ec->cfp->self,
rb_vm_frame_method_entry(th->ec.cfp)->def->original_id, rb_vm_frame_method_entry(th->ec->cfp)->def->original_id,
rb_vm_frame_method_entry(th->ec.cfp)->called_id, rb_vm_frame_method_entry(th->ec->cfp)->called_id,
rb_vm_frame_method_entry(th->ec.cfp)->owner, rb_vm_frame_method_entry(th->ec->cfp)->owner,
frame_return_value(err)); frame_return_value(err));
} }
THROW_DATA_CONSUMED_SET(err); THROW_DATA_CONSUMED_SET(err);
} }
else { else {
EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_B_RETURN, th->ec.cfp->self, 0, 0, 0, frame_return_value(err)); EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_B_RETURN, th->ec->cfp->self, 0, 0, 0, frame_return_value(err));
THROW_DATA_CONSUMED_SET(err); THROW_DATA_CONSUMED_SET(err);
} }
break; break;
case VM_FRAME_MAGIC_CLASS: case VM_FRAME_MAGIC_CLASS:
EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_END, th->ec.cfp->self, 0, 0, 0, Qnil); EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_END, th->ec->cfp->self, 0, 0, 0, Qnil);
break; break;
} }
} }
@ -1794,7 +1796,7 @@ vm_exec(rb_thread_t *th)
if ((state = EXEC_TAG()) == TAG_NONE) { if ((state = EXEC_TAG()) == TAG_NONE) {
vm_loop_start: vm_loop_start:
result = vm_exec_core(th, initial); result = vm_exec_core(th, initial);
VM_ASSERT(th->ec.tag == &_tag); VM_ASSERT(th->ec->tag == &_tag);
if ((state = _tag.state) != TAG_NONE) { if ((state = _tag.state) != TAG_NONE) {
err = (struct vm_throw_data *)result; err = (struct vm_throw_data *)result;
_tag.state = TAG_NONE; _tag.state = TAG_NONE;
@ -1811,27 +1813,27 @@ vm_exec(rb_thread_t *th)
VALUE type; VALUE type;
const rb_control_frame_t *escape_cfp; const rb_control_frame_t *escape_cfp;
err = (struct vm_throw_data *)th->ec.errinfo; err = (struct vm_throw_data *)th->ec->errinfo;
rb_thread_raised_reset(th, RAISED_STACKOVERFLOW); rb_thread_raised_reset(th, RAISED_STACKOVERFLOW);
exception_handler: exception_handler:
cont_pc = cont_sp = 0; cont_pc = cont_sp = 0;
catch_iseq = NULL; catch_iseq = NULL;
while (th->ec.cfp->pc == 0 || th->ec.cfp->iseq == 0) { while (th->ec->cfp->pc == 0 || th->ec->cfp->iseq == 0) {
if (UNLIKELY(VM_FRAME_TYPE(th->ec.cfp) == VM_FRAME_MAGIC_CFUNC)) { if (UNLIKELY(VM_FRAME_TYPE(th->ec->cfp) == VM_FRAME_MAGIC_CFUNC)) {
EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, th->ec.cfp->self, EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, th->ec->cfp->self,
rb_vm_frame_method_entry(th->ec.cfp)->def->original_id, rb_vm_frame_method_entry(th->ec->cfp)->def->original_id,
rb_vm_frame_method_entry(th->ec.cfp)->called_id, rb_vm_frame_method_entry(th->ec->cfp)->called_id,
rb_vm_frame_method_entry(th->ec.cfp)->owner, Qnil); rb_vm_frame_method_entry(th->ec->cfp)->owner, Qnil);
RUBY_DTRACE_CMETHOD_RETURN_HOOK(th, RUBY_DTRACE_CMETHOD_RETURN_HOOK(th,
rb_vm_frame_method_entry(th->ec.cfp)->owner, rb_vm_frame_method_entry(th->ec->cfp)->owner,
rb_vm_frame_method_entry(th->ec.cfp)->def->original_id); rb_vm_frame_method_entry(th->ec->cfp)->def->original_id);
} }
rb_vm_pop_frame(th); rb_vm_pop_frame(th);
} }
cfp = th->ec.cfp; cfp = th->ec->cfp;
epc = cfp->pc - cfp->iseq->body->iseq_encoded; epc = cfp->pc - cfp->iseq->body->iseq_encoded;
escape_cfp = NULL; escape_cfp = NULL;
@ -1858,10 +1860,10 @@ vm_exec(rb_thread_t *th)
} }
} }
if (catch_iseq == NULL) { if (catch_iseq == NULL) {
th->ec.errinfo = Qnil; th->ec->errinfo = Qnil;
result = THROW_DATA_VAL(err); result = THROW_DATA_VAL(err);
THROW_DATA_CATCH_FRAME_SET(err, cfp + 1); THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
hook_before_rewind(th, th->ec.cfp, TRUE, state, err); hook_before_rewind(th, th->ec->cfp, TRUE, state, err);
rb_vm_pop_frame(th); rb_vm_pop_frame(th);
goto finish_vme; goto finish_vme;
} }
@ -1873,9 +1875,9 @@ vm_exec(rb_thread_t *th)
#if OPT_STACK_CACHING #if OPT_STACK_CACHING
initial = THROW_DATA_VAL(err); initial = THROW_DATA_VAL(err);
#else #else
*th->ec.cfp->sp++ = THROW_DATA_VAL(err); *th->ec->cfp->sp++ = THROW_DATA_VAL(err);
#endif #endif
th->ec.errinfo = Qnil; th->ec->errinfo = Qnil;
goto vm_loop_start; goto vm_loop_start;
} }
} }
@ -1914,7 +1916,7 @@ vm_exec(rb_thread_t *th)
escape_cfp = THROW_DATA_CATCH_FRAME(err); escape_cfp = THROW_DATA_CATCH_FRAME(err);
if (cfp == escape_cfp) { if (cfp == escape_cfp) {
cfp->pc = cfp->iseq->body->iseq_encoded + entry->cont; cfp->pc = cfp->iseq->body->iseq_encoded + entry->cont;
th->ec.errinfo = Qnil; th->ec->errinfo = Qnil;
goto vm_loop_start; goto vm_loop_start;
} }
} }
@ -1944,11 +1946,11 @@ vm_exec(rb_thread_t *th)
#if OPT_STACK_CACHING #if OPT_STACK_CACHING
initial = THROW_DATA_VAL(err); initial = THROW_DATA_VAL(err);
#else #else
*th->ec.cfp->sp++ = THROW_DATA_VAL(err); *th->ec->cfp->sp++ = THROW_DATA_VAL(err);
#endif #endif
} }
th->ec.errinfo = Qnil; th->ec->errinfo = Qnil;
VM_ASSERT(th->ec.tag->state == TAG_NONE); VM_ASSERT(th->ec->tag->state == TAG_NONE);
goto vm_loop_start; goto vm_loop_start;
} }
} }
@ -1998,16 +2000,16 @@ vm_exec(rb_thread_t *th)
catch_iseq->body->stack_max); catch_iseq->body->stack_max);
state = 0; state = 0;
th->ec.tag->state = TAG_NONE; th->ec->tag->state = TAG_NONE;
th->ec.errinfo = Qnil; th->ec->errinfo = Qnil;
goto vm_loop_start; goto vm_loop_start;
} }
else { else {
hook_before_rewind(th, th->ec.cfp, FALSE, state, err); hook_before_rewind(th, th->ec->cfp, FALSE, state, err);
if (VM_FRAME_FINISHED_P(th->ec.cfp)) { if (VM_FRAME_FINISHED_P(th->ec->cfp)) {
rb_vm_pop_frame(th); rb_vm_pop_frame(th);
th->ec.errinfo = (VALUE)err; th->ec->errinfo = (VALUE)err;
TH_TMPPOP_TAG(); TH_TMPPOP_TAG();
TH_JUMP_TAG(th, state); TH_JUMP_TAG(th, state);
} }
@ -2064,7 +2066,7 @@ rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *cal
int int
rb_thread_method_id_and_class(rb_thread_t *th, ID *idp, ID *called_idp, VALUE *klassp) rb_thread_method_id_and_class(rb_thread_t *th, ID *idp, ID *called_idp, VALUE *klassp)
{ {
return rb_vm_control_frame_id_and_class(th->ec.cfp, idp, called_idp, klassp); return rb_vm_control_frame_id_and_class(th->ec->cfp, idp, called_idp, klassp);
} }
int int
@ -2076,7 +2078,7 @@ rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
VALUE VALUE
rb_thread_current_status(const rb_thread_t *th) rb_thread_current_status(const rb_thread_t *th)
{ {
const rb_control_frame_t *cfp = th->ec.cfp; const rb_control_frame_t *cfp = th->ec->cfp;
const rb_callable_method_entry_t *me; const rb_callable_method_entry_t *me;
VALUE str = Qnil; VALUE str = Qnil;
@ -2102,7 +2104,7 @@ rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
VALUE block_handler, VALUE filename) VALUE block_handler, VALUE filename)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
const rb_control_frame_t *reg_cfp = th->ec.cfp; const rb_control_frame_t *reg_cfp = th->ec->cfp;
const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP); const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
VALUE val; VALUE val;
@ -2216,7 +2218,7 @@ ruby_vm_destruct(rb_vm_t *vm)
} }
/* after freeing objspace, you *can't* use ruby_xfree() */ /* after freeing objspace, you *can't* use ruby_xfree() */
ruby_mimfree(vm); ruby_mimfree(vm);
ruby_current_vm = 0; ruby_current_vm_ptr = NULL;
} }
RUBY_FREE_LEAVE("vm"); RUBY_FREE_LEAVE("vm");
return 0; return 0;
@ -2366,11 +2368,14 @@ rb_thread_recycle_stack_release(VALUE *stack)
ruby_xfree(stack); ruby_xfree(stack);
} }
void rb_fiber_mark_self(rb_fiber_t *fib);
void void
rb_execution_context_mark(const rb_execution_context_t *ec) rb_execution_context_mark(const rb_execution_context_t *ec)
{ {
#if VM_CHECK_MODE > 0
void rb_ec_verify(const rb_execution_context_t *ec); /* cont.c */
rb_ec_verify(ec);
#endif
/* mark VM stack */ /* mark VM stack */
if (ec->vm_stack) { if (ec->vm_stack) {
VALUE *p = ec->vm_stack; VALUE *p = ec->vm_stack;
@ -2394,8 +2399,7 @@ rb_execution_context_mark(const rb_execution_context_t *ec)
} }
/* mark machine stack */ /* mark machine stack */
if (&GET_THREAD()->ec != ec && if (ec->machine.stack_start && ec->machine.stack_end) {
ec->machine.stack_start && ec->machine.stack_end) {
rb_gc_mark_machine_stack(ec); rb_gc_mark_machine_stack(ec);
rb_gc_mark_locations((VALUE *)&ec->machine.regs, rb_gc_mark_locations((VALUE *)&ec->machine.regs,
(VALUE *)(&ec->machine.regs) + (VALUE *)(&ec->machine.regs) +
@ -2407,16 +2411,16 @@ rb_execution_context_mark(const rb_execution_context_t *ec)
rb_mark_tbl(ec->local_storage); rb_mark_tbl(ec->local_storage);
RUBY_MARK_UNLESS_NULL(ec->local_storage_recursive_hash); RUBY_MARK_UNLESS_NULL(ec->local_storage_recursive_hash);
RUBY_MARK_UNLESS_NULL(ec->local_storage_recursive_hash_for_trace); RUBY_MARK_UNLESS_NULL(ec->local_storage_recursive_hash_for_trace);
rb_fiber_mark_self(ec->fiber);
} }
void rb_fiber_mark_self(rb_fiber_t *fib);
void void
rb_thread_mark(void *ptr) rb_thread_mark(void *ptr)
{ {
rb_thread_t *th = ptr; rb_thread_t *th = ptr;
RUBY_MARK_ENTER("thread"); RUBY_MARK_ENTER("thread");
rb_fiber_mark_self(th->ec->fiber);
rb_execution_context_mark(&th->ec);
/* mark ruby objects */ /* mark ruby objects */
RUBY_MARK_UNLESS_NULL(th->first_proc); RUBY_MARK_UNLESS_NULL(th->first_proc);
@ -2444,11 +2448,6 @@ thread_free(void *ptr)
rb_thread_t *th = ptr; rb_thread_t *th = ptr;
RUBY_FREE_ENTER("thread"); RUBY_FREE_ENTER("thread");
if (th->ec.vm_stack != NULL) {
rb_thread_recycle_stack_release(th->ec.vm_stack);
th->ec.vm_stack = NULL;
}
if (th->locking_mutex != Qfalse) { if (th->locking_mutex != Qfalse) {
rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex); rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex);
} }
@ -2456,10 +2455,13 @@ thread_free(void *ptr)
rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes); rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes);
} }
if (th->ec.local_storage) { if (th->ec->local_storage) {
st_free_table(th->ec.local_storage); st_free_table(th->ec->local_storage);
} }
if (th->ec == ruby_current_execution_context_ptr)
ruby_current_execution_context_ptr = NULL;
if (th->vm && th->vm->main_thread == th) { if (th->vm && th->vm->main_thread == th) {
RUBY_GC_INFO("main thread\n"); RUBY_GC_INFO("main thread\n");
} }
@ -2471,8 +2473,6 @@ thread_free(void *ptr)
#endif #endif
ruby_xfree(ptr); ruby_xfree(ptr);
} }
if (ruby_current_thread == th)
ruby_current_thread = NULL;
RUBY_FREE_LEAVE("thread"); RUBY_FREE_LEAVE("thread");
} }
@ -2484,10 +2484,10 @@ thread_memsize(const void *ptr)
size_t size = sizeof(rb_thread_t); size_t size = sizeof(rb_thread_t);
if (!th->root_fiber) { if (!th->root_fiber) {
size += th->ec.vm_stack_size * sizeof(VALUE); size += th->ec->vm_stack_size * sizeof(VALUE);
} }
if (th->ec.local_storage) { if (th->ec->local_storage) {
size += st_memsize(th->ec.local_storage); size += st_memsize(th->ec->local_storage);
} }
return size; return size;
} }
@ -2524,35 +2524,39 @@ thread_alloc(VALUE klass)
return obj; return obj;
} }
void rb_threadptr_root_fiber_setup(rb_thread_t *th);
static void static void
th_init(rb_thread_t *th, VALUE self) th_init(rb_thread_t *th, VALUE self)
{ {
th->self = self; th->self = self;
rb_threadptr_root_fiber_setup(th);
/* allocate thread stack */ /* allocate thread stack */
#ifdef USE_SIGALTSTACK #ifdef USE_SIGALTSTACK
/* altstack of main thread is reallocated in another place */ /* altstack of main thread is reallocated in another place */
th->altstack = malloc(rb_sigaltstack_size()); th->altstack = malloc(rb_sigaltstack_size());
#endif #endif
/* th->ec.vm_stack_size is word number. {
* th->vm->default_params.thread_vm_stack_size is byte size. /* vm_stack_size is word number.
*/ * th->vm->default_params.thread_vm_stack_size is byte size. */
th->ec.vm_stack_size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE); size_t size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
th->ec.vm_stack = thread_recycle_stack(th->ec.vm_stack_size); ec_set_vm_stack(th->ec, thread_recycle_stack(size), size);
}
th->ec.cfp = (void *)(th->ec.vm_stack + th->ec.vm_stack_size); th->ec->cfp = (void *)(th->ec->vm_stack + th->ec->vm_stack_size);
vm_push_frame(th, 0 /* dummy iseq */, VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH | VM_FRAME_FLAG_CFRAME /* dummy frame */, vm_push_frame(th, 0 /* dummy iseq */, VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH | VM_FRAME_FLAG_CFRAME /* dummy frame */,
Qnil /* dummy self */, VM_BLOCK_HANDLER_NONE /* dummy block ptr */, Qnil /* dummy self */, VM_BLOCK_HANDLER_NONE /* dummy block ptr */,
0 /* dummy cref/me */, 0 /* dummy cref/me */,
0 /* dummy pc */, th->ec.vm_stack, 0, 0); 0 /* dummy pc */, th->ec->vm_stack, 0, 0);
th->status = THREAD_RUNNABLE; th->status = THREAD_RUNNABLE;
th->last_status = Qnil; th->last_status = Qnil;
th->ec.errinfo = Qnil; th->ec->errinfo = Qnil;
th->ec.root_svar = Qfalse; th->ec->root_svar = Qfalse;
th->ec.local_storage_recursive_hash = Qnil; th->ec->local_storage_recursive_hash = Qnil;
th->ec.local_storage_recursive_hash_for_trace = Qnil; th->ec->local_storage_recursive_hash_for_trace = Qnil;
#ifdef NON_SCALAR_THREAD_ID #ifdef NON_SCALAR_THREAD_ID
th->thread_id_string[0] = '\0'; th->thread_id_string[0] = '\0';
#endif #endif
@ -2575,7 +2579,7 @@ ruby_thread_init(VALUE self)
th->top_wrapper = 0; th->top_wrapper = 0;
th->top_self = rb_vm_top_self(); th->top_self = rb_vm_top_self();
th->ec.root_svar = Qfalse; th->ec->root_svar = Qfalse;
return self; return self;
} }
@ -2617,11 +2621,11 @@ vm_define_method(rb_thread_t *th, VALUE obj, ID id, VALUE iseqval, int is_single
#define REWIND_CFP(expr) do { \ #define REWIND_CFP(expr) do { \
rb_thread_t *th__ = GET_THREAD(); \ rb_thread_t *th__ = GET_THREAD(); \
VALUE *const curr_sp = (th__->ec.cfp++)->sp; \ VALUE *const curr_sp = (th__->ec->cfp++)->sp; \
VALUE *const saved_sp = th__->ec.cfp->sp; \ VALUE *const saved_sp = th__->ec->cfp->sp; \
th__->ec.cfp->sp = curr_sp; \ th__->ec->cfp->sp = curr_sp; \
expr; \ expr; \
(th__->ec.cfp--)->sp = saved_sp; \ (th__->ec->cfp--)->sp = saved_sp; \
} while (0) } while (0)
static VALUE static VALUE
@ -3065,7 +3069,7 @@ Init_VM(void)
/* VM bootstrap: phase 2 */ /* VM bootstrap: phase 2 */
{ {
rb_vm_t *vm = ruby_current_vm; rb_vm_t *vm = ruby_current_vm_ptr;
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
VALUE filename = rb_fstring_cstr("<main>"); VALUE filename = rb_fstring_cstr("<main>");
const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP); const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
@ -3087,12 +3091,12 @@ Init_VM(void)
rb_vm_living_threads_insert(vm, th); rb_vm_living_threads_insert(vm, th);
rb_gc_register_mark_object((VALUE)iseq); rb_gc_register_mark_object((VALUE)iseq);
th->ec.cfp->iseq = iseq; th->ec->cfp->iseq = iseq;
th->ec.cfp->pc = iseq->body->iseq_encoded; th->ec->cfp->pc = iseq->body->iseq_encoded;
th->ec.cfp->self = th->top_self; th->ec->cfp->self = th->top_self;
VM_ENV_FLAGS_UNSET(th->ec.cfp->ep, VM_FRAME_FLAG_CFRAME); VM_ENV_FLAGS_UNSET(th->ec->cfp->ep, VM_FRAME_FLAG_CFRAME);
VM_STACK_ENV_WRITE(th->ec.cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE)); VM_STACK_ENV_WRITE(th->ec->cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE));
/* /*
* The Binding of the top level scope * The Binding of the top level scope
@ -3110,7 +3114,7 @@ void
rb_vm_set_progname(VALUE filename) rb_vm_set_progname(VALUE filename)
{ {
rb_thread_t *th = GET_VM()->main_thread; rb_thread_t *th = GET_VM()->main_thread;
rb_control_frame_t *cfp = (void *)(th->ec.vm_stack + th->ec.vm_stack_size); rb_control_frame_t *cfp = (void *)(th->ec->vm_stack + th->ec->vm_stack_size);
--cfp; --cfp;
rb_iseq_pathobj_set(cfp->iseq, rb_str_dup(filename), rb_iseq_realpath(cfp->iseq)); rb_iseq_pathobj_set(cfp->iseq, rb_str_dup(filename), rb_iseq_realpath(cfp->iseq));
@ -3129,15 +3133,15 @@ Init_BareVM(void)
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
MEMZERO(th, rb_thread_t, 1); MEMZERO(th, rb_thread_t, 1);
rb_thread_set_current_raw(th);
vm_init2(vm); vm_init2(vm);
vm->objspace = rb_objspace_alloc();
ruby_current_vm = vm;
Init_native_thread(); vm->objspace = rb_objspace_alloc();
ruby_current_vm_ptr = vm;
Init_native_thread(th);
th->vm = vm; th->vm = vm;
th_init(th, 0); th_init(th, 0);
rb_thread_set_current_raw(th);
ruby_thread_init_stack(th); ruby_thread_init_stack(th);
} }
@ -3293,7 +3297,7 @@ vm_analysis_operand(int insn, int n, VALUE op)
HASH_ASET(ihash, INT2FIX(n), ophash); HASH_ASET(ihash, INT2FIX(n), ophash);
} }
/* intern */ /* intern */
valstr = rb_insn_operand_intern(GET_THREAD()->ec.cfp->iseq, insn, n, op, 0, 0, 0, 0); valstr = rb_insn_operand_intern(GET_THREAD()->ec->cfp->iseq, insn, n, op, 0, 0, 0, 0);
/* set count */ /* set count */
if ((cv = rb_hash_aref(ophash, valstr)) == Qnil) { if ((cv = rb_hash_aref(ophash, valstr)) == Qnil) {
@ -3406,7 +3410,7 @@ vm_collect_usage_operand(int insn, int n, VALUE op)
if (RUBY_DTRACE_INSN_OPERAND_ENABLED()) { if (RUBY_DTRACE_INSN_OPERAND_ENABLED()) {
VALUE valstr; VALUE valstr;
valstr = rb_insn_operand_intern(GET_THREAD()->ec.cfp->iseq, insn, n, op, 0, 0, 0, 0); valstr = rb_insn_operand_intern(GET_THREAD()->ec->cfp->iseq, insn, n, op, 0, 0, 0, 0);
RUBY_DTRACE_INSN_OPERAND(RSTRING_PTR(valstr), rb_insns_name(insn)); RUBY_DTRACE_INSN_OPERAND(RSTRING_PTR(valstr), rb_insns_name(insn));
RB_GC_GUARD(valstr); RB_GC_GUARD(valstr);

View File

@ -508,7 +508,7 @@ setup_parameters_complex(rb_thread_t * const th, const rb_iseq_t * const iseq,
int given_argc; int given_argc;
struct args_info args_body, *args; struct args_info args_body, *args;
VALUE keyword_hash = Qnil; VALUE keyword_hash = Qnil;
VALUE * const orig_sp = th->ec.cfp->sp; VALUE * const orig_sp = th->ec->cfp->sp;
unsigned int i; unsigned int i;
/* /*
@ -528,7 +528,7 @@ setup_parameters_complex(rb_thread_t * const th, const rb_iseq_t * const iseq,
for (i=calling->argc; i<iseq->body->param.size; i++) { for (i=calling->argc; i<iseq->body->param.size; i++) {
locals[i] = Qnil; locals[i] = Qnil;
} }
th->ec.cfp->sp = &locals[i]; th->ec->cfp->sp = &locals[i];
/* setup args */ /* setup args */
args = &args_body; args = &args_body;
@ -587,7 +587,7 @@ setup_parameters_complex(rb_thread_t * const th, const rb_iseq_t * const iseq,
} }
else { else {
if (arg_setup_type == arg_setup_block) { if (arg_setup_type == arg_setup_block) {
CHECK_VM_STACK_OVERFLOW(th->ec.cfp, min_argc); CHECK_VM_STACK_OVERFLOW(th->ec->cfp, min_argc);
given_argc = min_argc; given_argc = min_argc;
args_extend(args, min_argc); args_extend(args, min_argc);
} }
@ -683,7 +683,7 @@ setup_parameters_complex(rb_thread_t * const th, const rb_iseq_t * const iseq,
} }
#endif #endif
th->ec.cfp->sp = orig_sp; th->ec->cfp->sp = orig_sp;
return opt_pc; return opt_pc;
} }
@ -696,7 +696,7 @@ raise_argument_error(rb_thread_t *th, const rb_iseq_t *iseq, const VALUE exc)
vm_push_frame(th, iseq, VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL, Qnil /* self */, vm_push_frame(th, iseq, VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL, Qnil /* self */,
VM_BLOCK_HANDLER_NONE /* specval*/, Qfalse /* me or cref */, VM_BLOCK_HANDLER_NONE /* specval*/, Qfalse /* me or cref */,
iseq->body->iseq_encoded, iseq->body->iseq_encoded,
th->ec.cfp->sp, 0, 0 /* stack_max */); th->ec->cfp->sp, 0, 0 /* stack_max */);
at = rb_threadptr_backtrace_object(th); at = rb_threadptr_backtrace_object(th);
rb_vm_pop_frame(th); rb_vm_pop_frame(th);
} }

View File

@ -427,7 +427,7 @@ backtrace_each(rb_thread_t *th,
void (*iter_cfunc)(void *arg, const rb_control_frame_t *cfp, ID mid), void (*iter_cfunc)(void *arg, const rb_control_frame_t *cfp, ID mid),
void *arg) void *arg)
{ {
rb_control_frame_t *last_cfp = th->ec.cfp; rb_control_frame_t *last_cfp = th->ec->cfp;
rb_control_frame_t *start_cfp = RUBY_VM_END_CONTROL_FRAME(th); rb_control_frame_t *start_cfp = RUBY_VM_END_CONTROL_FRAME(th);
rb_control_frame_t *cfp; rb_control_frame_t *cfp;
ptrdiff_t size, i; ptrdiff_t size, i;
@ -439,7 +439,7 @@ backtrace_each(rb_thread_t *th,
* top frame * top frame
* ... * ...
* 2nd frame <- lev:0 * 2nd frame <- lev:0
* current frame <- th->ec.cfp * current frame <- th->ec->cfp
*/ */
start_cfp = start_cfp =
@ -1172,12 +1172,12 @@ VALUE
rb_debug_inspector_open(rb_debug_inspector_func_t func, void *data) rb_debug_inspector_open(rb_debug_inspector_func_t func, void *data)
{ {
rb_debug_inspector_t dbg_context; rb_debug_inspector_t dbg_context;
rb_thread_t *th = GET_THREAD(); rb_thread_t * volatile th = GET_THREAD();
enum ruby_tag_type state; enum ruby_tag_type state;
volatile VALUE MAYBE_UNUSED(result); volatile VALUE MAYBE_UNUSED(result);
dbg_context.th = th; dbg_context.th = th;
dbg_context.cfp = dbg_context.th->ec.cfp; dbg_context.cfp = dbg_context.th->ec->cfp;
dbg_context.backtrace = rb_threadptr_backtrace_location_ary(th, 0, 0); dbg_context.backtrace = rb_threadptr_backtrace_location_ary(th, 0, 0);
dbg_context.backtrace_size = RARRAY_LEN(dbg_context.backtrace); dbg_context.backtrace_size = RARRAY_LEN(dbg_context.backtrace);
dbg_context.contexts = collect_caller_bindings(th); dbg_context.contexts = collect_caller_bindings(th);
@ -1247,7 +1247,7 @@ rb_profile_frames(int start, int limit, VALUE *buff, int *lines)
{ {
int i; int i;
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = th->ec.cfp, *end_cfp = RUBY_VM_END_CONTROL_FRAME(th); rb_control_frame_t *cfp = th->ec->cfp, *end_cfp = RUBY_VM_END_CONTROL_FRAME(th);
const rb_callable_method_entry_t *cme; const rb_callable_method_entry_t *cme;
for (i=0; i<limit && cfp != end_cfp;) { for (i=0; i<limit && cfp != end_cfp;) {

View File

@ -778,12 +778,14 @@ typedef struct rb_execution_context_struct {
} machine; } machine;
} rb_execution_context_t; } rb_execution_context_t;
void ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
typedef struct rb_thread_struct { typedef struct rb_thread_struct {
struct list_node vmlt_node; struct list_node vmlt_node;
VALUE self; VALUE self;
rb_vm_t *vm; rb_vm_t *vm;
rb_execution_context_t ec; rb_execution_context_t *ec;
VALUE last_status; /* $? */ VALUE last_status; /* $? */
@ -1237,7 +1239,7 @@ VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1) #define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1) #define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
#define RUBY_VM_END_CONTROL_FRAME(th) \ #define RUBY_VM_END_CONTROL_FRAME(th) \
((rb_control_frame_t *)((th)->ec.vm_stack + (th)->ec.vm_stack_size)) ((rb_control_frame_t *)((th)->ec->vm_stack + (th)->ec->vm_stack_size))
#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \ #define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
((void *)(ecfp) > (void *)(cfp)) ((void *)(ecfp) > (void *)(cfp))
#define RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp) \ #define RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp) \
@ -1469,7 +1471,7 @@ extern void rb_vmdebug_stack_dump_raw(rb_thread_t *, rb_control_frame_t *);
extern void rb_vmdebug_debug_print_pre(rb_thread_t *th, rb_control_frame_t *cfp, const VALUE *_pc); extern void rb_vmdebug_debug_print_pre(rb_thread_t *th, rb_control_frame_t *cfp, const VALUE *_pc);
extern void rb_vmdebug_debug_print_post(rb_thread_t *th, rb_control_frame_t *cfp); extern void rb_vmdebug_debug_print_post(rb_thread_t *th, rb_control_frame_t *cfp);
#define SDR() rb_vmdebug_stack_dump_raw(GET_THREAD(), GET_THREAD()->ec.cfp) #define SDR() rb_vmdebug_stack_dump_raw(GET_THREAD(), GET_THREAD()->ec->cfp)
#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_THREAD(), (cfp)) #define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_THREAD(), (cfp))
void rb_vm_bugreport(const void *); void rb_vm_bugreport(const void *);
NORETURN(void rb_bug_context(const void *, const char *fmt, ...)); NORETURN(void rb_bug_context(const void *, const char *fmt, ...));
@ -1569,19 +1571,62 @@ VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_
/* for thread */ /* for thread */
#if RUBY_VM_THREAD_MODEL == 2 #if RUBY_VM_THREAD_MODEL == 2
RUBY_SYMBOL_EXPORT_BEGIN RUBY_SYMBOL_EXPORT_BEGIN
extern rb_thread_t *ruby_current_thread; extern rb_vm_t *ruby_current_vm_ptr;
extern rb_vm_t *ruby_current_vm; extern rb_execution_context_t *ruby_current_execution_context_ptr;
extern rb_event_flag_t ruby_vm_event_flags; extern rb_event_flag_t ruby_vm_event_flags;
RUBY_SYMBOL_EXPORT_END RUBY_SYMBOL_EXPORT_END
#define GET_VM() ruby_current_vm #define GET_VM() ruby_current_vm()
#define GET_THREAD() ruby_current_thread #define GET_THREAD() ruby_current_thread()
#define GET_EC() ruby_current_execution_context()
#define rb_thread_set_current_raw(th) (void)(ruby_current_thread = (th)) rb_thread_t *rb_fiberptr_thread_ptr(const rb_fiber_t *fib);
static inline rb_thread_t *
rb_ec_thread_ptr(const rb_execution_context_t *ec)
{
return rb_fiberptr_thread_ptr(ec->fiber);
}
static inline rb_vm_t *
rb_ec_vm_ptr(const rb_execution_context_t *ec)
{
const rb_thread_t *th = rb_fiberptr_thread_ptr(ec->fiber);
if (th) {
return rb_fiberptr_thread_ptr(ec->fiber)->vm;
}
else {
return NULL;
}
}
static inline rb_execution_context_t *
ruby_current_execution_context(void)
{
return ruby_current_execution_context_ptr;
}
static inline rb_thread_t *
ruby_current_thread(void)
{
const rb_execution_context_t *ec = GET_EC();
return rb_ec_thread_ptr(ec);
}
static inline rb_vm_t *
ruby_current_vm(void)
{
VM_ASSERT(ruby_current_vm_ptr == NULL ||
ruby_current_execution_context_ptr == NULL ||
rb_ec_thread_ptr(GET_EC()) == NULL ||
rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
return ruby_current_vm_ptr;
}
#define rb_thread_set_current_raw(th) (void)(ruby_current_execution_context_ptr = (th)->ec)
#define rb_thread_set_current(th) do { \ #define rb_thread_set_current(th) do { \
if ((th)->vm->running_thread != (th)) { \ if ((th)->vm->running_thread != (th)) { \
(th)->running_time_us = 0; \ (th)->running_time_us = 0; \
@ -1622,11 +1667,14 @@ void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
int rb_threadptr_pending_interrupt_active_p(rb_thread_t *th); int rb_threadptr_pending_interrupt_active_p(rb_thread_t *th);
void rb_threadptr_error_print(rb_thread_t *volatile th, volatile VALUE errinfo); void rb_threadptr_error_print(rb_thread_t *volatile th, volatile VALUE errinfo);
void rb_execution_context_mark(const rb_execution_context_t *ec); void rb_execution_context_mark(const rb_execution_context_t *ec);
void rb_fiber_close(rb_fiber_t *fib);
void Init_native_thread(rb_thread_t *th);
#define RUBY_VM_CHECK_INTS(th) ruby_vm_check_ints(th) #define RUBY_VM_CHECK_INTS(th) ruby_vm_check_ints(th)
static inline void static inline void
ruby_vm_check_ints(rb_thread_t *th) ruby_vm_check_ints(rb_thread_t *th)
{ {
VM_ASSERT(th->ec == ruby_current_execution_context_ptr);
if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(th))) { if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(th))) {
rb_threadptr_execute_interrupts(th, 0); rb_threadptr_execute_interrupts(th, 0);
} }
@ -1669,7 +1717,7 @@ ruby_exec_event_hook_orig(rb_thread_t *const th, const rb_event_flag_t flag,
struct rb_trace_arg_struct trace_arg; struct rb_trace_arg_struct trace_arg;
trace_arg.event = flag; trace_arg.event = flag;
trace_arg.th = th; trace_arg.th = th;
trace_arg.cfp = th->ec.cfp; trace_arg.cfp = th->ec->cfp;
trace_arg.self = self; trace_arg.self = self;
trace_arg.id = id; trace_arg.id = id;
trace_arg.called_id = called_id; trace_arg.called_id = called_id;

View File

@ -22,14 +22,14 @@
#define MAX_POSBUF 128 #define MAX_POSBUF 128
#define VM_CFP_CNT(th, cfp) \ #define VM_CFP_CNT(th, cfp) \
((rb_control_frame_t *)((th)->ec.vm_stack + (th)->ec.vm_stack_size) - \ ((rb_control_frame_t *)((th)->ec->vm_stack + (th)->ec->vm_stack_size) - \
(rb_control_frame_t *)(cfp)) (rb_control_frame_t *)(cfp))
static void static void
control_frame_dump(rb_thread_t *th, rb_control_frame_t *cfp) control_frame_dump(rb_thread_t *th, rb_control_frame_t *cfp)
{ {
ptrdiff_t pc = -1; ptrdiff_t pc = -1;
ptrdiff_t ep = cfp->ep - th->ec.vm_stack; ptrdiff_t ep = cfp->ep - th->ec->vm_stack;
char ep_in_heap = ' '; char ep_in_heap = ' ';
char posbuf[MAX_POSBUF+1]; char posbuf[MAX_POSBUF+1];
int line = 0; int line = 0;
@ -39,7 +39,7 @@ control_frame_dump(rb_thread_t *th, rb_control_frame_t *cfp)
const rb_callable_method_entry_t *me; const rb_callable_method_entry_t *me;
if (ep < 0 || (size_t)ep > th->ec.vm_stack_size) { if (ep < 0 || (size_t)ep > th->ec->vm_stack_size) {
ep = (ptrdiff_t)cfp->ep; ep = (ptrdiff_t)cfp->ep;
ep_in_heap = 'p'; ep_in_heap = 'p';
} }
@ -112,14 +112,14 @@ control_frame_dump(rb_thread_t *th, rb_control_frame_t *cfp)
} }
fprintf(stderr, "c:%04"PRIdPTRDIFF" ", fprintf(stderr, "c:%04"PRIdPTRDIFF" ",
((rb_control_frame_t *)(th->ec.vm_stack + th->ec.vm_stack_size) - cfp)); ((rb_control_frame_t *)(th->ec->vm_stack + th->ec->vm_stack_size) - cfp));
if (pc == -1) { if (pc == -1) {
fprintf(stderr, "p:---- "); fprintf(stderr, "p:---- ");
} }
else { else {
fprintf(stderr, "p:%04"PRIdPTRDIFF" ", pc); fprintf(stderr, "p:%04"PRIdPTRDIFF" ", pc);
} }
fprintf(stderr, "s:%04"PRIdPTRDIFF" ", cfp->sp - th->ec.vm_stack); fprintf(stderr, "s:%04"PRIdPTRDIFF" ", cfp->sp - th->ec->vm_stack);
fprintf(stderr, ep_in_heap == ' ' ? "e:%06"PRIdPTRDIFF" " : "E:%06"PRIxPTRDIFF" ", ep % 10000); fprintf(stderr, ep_in_heap == ' ' ? "e:%06"PRIdPTRDIFF" " : "E:%06"PRIxPTRDIFF" ", ep % 10000);
fprintf(stderr, "%-6s", magic); fprintf(stderr, "%-6s", magic);
if (line) { if (line) {
@ -145,12 +145,12 @@ rb_vmdebug_stack_dump_raw(rb_thread_t *th, rb_control_frame_t *cfp)
VALUE *p, *st, *t; VALUE *p, *st, *t;
fprintf(stderr, "-- stack frame ------------\n"); fprintf(stderr, "-- stack frame ------------\n");
for (p = st = th->ec.vm_stack; p < sp; p++) { for (p = st = th->ec->vm_stack; p < sp; p++) {
fprintf(stderr, "%04ld (%p): %08"PRIxVALUE, (long)(p - st), p, *p); fprintf(stderr, "%04ld (%p): %08"PRIxVALUE, (long)(p - st), p, *p);
t = (VALUE *)*p; t = (VALUE *)*p;
if (th->ec.vm_stack <= t && t < sp) { if (th->ec->vm_stack <= t && t < sp) {
fprintf(stderr, " (= %ld)", (long)((VALUE *)GC_GUARDED_PTR_REF(t) - th->ec.vm_stack)); fprintf(stderr, " (= %ld)", (long)((VALUE *)GC_GUARDED_PTR_REF(t) - th->ec->vm_stack));
} }
if (p == ep) if (p == ep)
@ -162,7 +162,7 @@ rb_vmdebug_stack_dump_raw(rb_thread_t *th, rb_control_frame_t *cfp)
fprintf(stderr, "-- Control frame information " fprintf(stderr, "-- Control frame information "
"-----------------------------------------------\n"); "-----------------------------------------------\n");
while ((void *)cfp < (void *)(th->ec.vm_stack + th->ec.vm_stack_size)) { while ((void *)cfp < (void *)(th->ec->vm_stack + th->ec->vm_stack_size)) {
control_frame_dump(th, cfp); control_frame_dump(th, cfp);
cfp++; cfp++;
} }
@ -173,7 +173,7 @@ void
rb_vmdebug_stack_dump_raw_current(void) rb_vmdebug_stack_dump_raw_current(void)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_vmdebug_stack_dump_raw(th, th->ec.cfp); rb_vmdebug_stack_dump_raw(th, th->ec->cfp);
} }
void void
@ -213,7 +213,7 @@ void
rb_vmdebug_stack_dump_th(VALUE thval) rb_vmdebug_stack_dump_th(VALUE thval)
{ {
rb_thread_t *target_th = rb_thread_ptr(thval); rb_thread_t *target_th = rb_thread_ptr(thval);
rb_vmdebug_stack_dump_raw(target_th, target_th->ec.cfp); rb_vmdebug_stack_dump_raw(target_th, target_th->ec->cfp);
} }
#if VMDEBUG > 2 #if VMDEBUG > 2
@ -285,11 +285,11 @@ vm_stack_dump_each(rb_thread_t *th, rb_control_frame_t *cfp)
break; break;
} }
fprintf(stderr, " stack %2d: %8s (%"PRIdPTRDIFF")\n", i, StringValueCStr(rstr), fprintf(stderr, " stack %2d: %8s (%"PRIdPTRDIFF")\n", i, StringValueCStr(rstr),
(ptr - th->ec.vm_stack)); (ptr - th->ec->vm_stack));
} }
} }
else if (VM_FRAME_FINISHED_P(cfp)) { else if (VM_FRAME_FINISHED_P(cfp)) {
if ((th)->ec.vm_stack + (th)->ec.vm_stack_size > (VALUE *)(cfp + 1)) { if ((th)->ec->vm_stack + (th)->ec->vm_stack_size > (VALUE *)(cfp + 1)) {
vm_stack_dump_each(th, cfp + 1); vm_stack_dump_each(th, cfp + 1);
} }
else { else {
@ -305,22 +305,22 @@ vm_stack_dump_each(rb_thread_t *th, rb_control_frame_t *cfp)
void void
rb_vmdebug_debug_print_register(rb_thread_t *th) rb_vmdebug_debug_print_register(rb_thread_t *th)
{ {
rb_control_frame_t *cfp = th->ec.cfp; rb_control_frame_t *cfp = th->ec->cfp;
ptrdiff_t pc = -1; ptrdiff_t pc = -1;
ptrdiff_t ep = cfp->ep - th->ec.vm_stack; ptrdiff_t ep = cfp->ep - th->ec->vm_stack;
ptrdiff_t cfpi; ptrdiff_t cfpi;
if (VM_FRAME_RUBYFRAME_P(cfp)) { if (VM_FRAME_RUBYFRAME_P(cfp)) {
pc = cfp->pc - cfp->iseq->body->iseq_encoded; pc = cfp->pc - cfp->iseq->body->iseq_encoded;
} }
if (ep < 0 || (size_t)ep > th->ec.vm_stack_size) { if (ep < 0 || (size_t)ep > th->ec->vm_stack_size) {
ep = -1; ep = -1;
} }
cfpi = ((rb_control_frame_t *)(th->ec.vm_stack + th->ec.vm_stack_size)) - cfp; cfpi = ((rb_control_frame_t *)(th->ec->vm_stack + th->ec->vm_stack_size)) - cfp;
fprintf(stderr, " [PC] %04"PRIdPTRDIFF", [SP] %04"PRIdPTRDIFF", [EP] %04"PRIdPTRDIFF", [CFP] %04"PRIdPTRDIFF"\n", fprintf(stderr, " [PC] %04"PRIdPTRDIFF", [SP] %04"PRIdPTRDIFF", [EP] %04"PRIdPTRDIFF", [CFP] %04"PRIdPTRDIFF"\n",
pc, (cfp->sp - th->ec.vm_stack), ep, cfpi); pc, (cfp->sp - th->ec->vm_stack), ep, cfpi);
} }
void void
@ -342,7 +342,7 @@ rb_vmdebug_debug_print_pre(rb_thread_t *th, rb_control_frame_t *cfp, const VALUE
printf(" "); printf(" ");
} }
printf("| "); printf("| ");
if(0)printf("[%03ld] ", (long)(cfp->sp - th->ec.vm_stack)); if(0)printf("[%03ld] ", (long)(cfp->sp - th->ec->vm_stack));
/* printf("%3"PRIdPTRDIFF" ", VM_CFP_CNT(th, cfp)); */ /* printf("%3"PRIdPTRDIFF" ", VM_CFP_CNT(th, cfp)); */
if (pc >= 0) { if (pc >= 0) {
@ -377,7 +377,7 @@ rb_vmdebug_debug_print_post(rb_thread_t *th, rb_control_frame_t *cfp
#if VMDEBUG > 2 #if VMDEBUG > 2
/* stack_dump_thobj(th); */ /* stack_dump_thobj(th); */
vm_stack_dump_each(th, th->ec.cfp); vm_stack_dump_each(th, th->ec->cfp);
#if OPT_STACK_CACHING #if OPT_STACK_CACHING
{ {
@ -397,7 +397,7 @@ VALUE
rb_vmdebug_thread_dump_state(VALUE self) rb_vmdebug_thread_dump_state(VALUE self)
{ {
rb_thread_t *th = rb_thread_ptr(self); rb_thread_t *th = rb_thread_ptr(self);
rb_control_frame_t *cfp = th->ec.cfp; rb_control_frame_t *cfp = th->ec->cfp;
fprintf(stderr, "Thread state dump:\n"); fprintf(stderr, "Thread state dump:\n");
fprintf(stderr, "pc : %p, sp : %p\n", (void *)cfp->pc, (void *)cfp->sp); fprintf(stderr, "pc : %p, sp : %p\n", (void *)cfp->pc, (void *)cfp->sp);
@ -1085,6 +1085,6 @@ rb_vmdebug_stack_dump_all_threads(void)
#else #else
fprintf(stderr, "th: %p, native_id: %p\n", th, (void *)th->thread_id); fprintf(stderr, "th: %p, native_id: %p\n", th, (void *)th->thread_id);
#endif #endif
rb_vmdebug_stack_dump_raw(th, th->ec.cfp); rb_vmdebug_stack_dump_raw(th, th->ec->cfp);
} }
} }

View File

@ -74,7 +74,7 @@ vm_call0_cfunc_with_frame(rb_thread_t* th, struct rb_calling_info *calling, cons
RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, me->owner, me->def->original_id); RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, me->owner, me->def->original_id);
EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, me->def->original_id, mid, me->owner, Qnil); EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, me->def->original_id, mid, me->owner, Qnil);
{ {
rb_control_frame_t *reg_cfp = th->ec.cfp; rb_control_frame_t *reg_cfp = th->ec->cfp;
vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL, recv, vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL, recv,
block_handler, (VALUE)me, block_handler, (VALUE)me,
@ -113,7 +113,7 @@ vm_call0_body(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_
switch (cc->me->def->type) { switch (cc->me->def->type) {
case VM_METHOD_TYPE_ISEQ: case VM_METHOD_TYPE_ISEQ:
{ {
rb_control_frame_t *reg_cfp = th->ec.cfp; rb_control_frame_t *reg_cfp = th->ec->cfp;
int i; int i;
CHECK_VM_STACK_OVERFLOW(reg_cfp, calling->argc + 1); CHECK_VM_STACK_OVERFLOW(reg_cfp, calling->argc + 1);
@ -124,7 +124,7 @@ vm_call0_body(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_
} }
vm_call_iseq_setup(th, reg_cfp, calling, ci, cc); vm_call_iseq_setup(th, reg_cfp, calling, ci, cc);
VM_ENV_FLAGS_SET(th->ec.cfp->ep, VM_FRAME_FLAG_FINISH); VM_ENV_FLAGS_SET(th->ec->cfp->ep, VM_FRAME_FLAG_FINISH);
return vm_exec(th); /* CHECK_INTS in this function */ return vm_exec(th); /* CHECK_INTS in this function */
} }
case VM_METHOD_TYPE_NOTIMPLEMENTED: case VM_METHOD_TYPE_NOTIMPLEMENTED:
@ -211,10 +211,10 @@ rb_vm_call(rb_thread_t *th, VALUE recv, VALUE id, int argc, const VALUE *argv, c
static inline VALUE static inline VALUE
vm_call_super(rb_thread_t *th, int argc, const VALUE *argv) vm_call_super(rb_thread_t *th, int argc, const VALUE *argv)
{ {
VALUE recv = th->ec.cfp->self; VALUE recv = th->ec->cfp->self;
VALUE klass; VALUE klass;
ID id; ID id;
rb_control_frame_t *cfp = th->ec.cfp; rb_control_frame_t *cfp = th->ec->cfp;
const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp); const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
if (VM_FRAME_RUBYFRAME_P(cfp)) { if (VM_FRAME_RUBYFRAME_P(cfp)) {
@ -247,7 +247,7 @@ rb_current_receiver(void)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp; rb_control_frame_t *cfp;
if (!th || !(cfp = th->ec.cfp)) if (!th || !(cfp = th->ec->cfp))
rb_raise(rb_eRuntimeError, "no self, no life"); rb_raise(rb_eRuntimeError, "no self, no life");
return cfp->self; return cfp->self;
} }
@ -348,7 +348,7 @@ check_funcall_respond_to(rb_thread_t *th, VALUE klass, VALUE recv, ID mid)
static int static int
check_funcall_callable(rb_thread_t *th, const rb_callable_method_entry_t *me) check_funcall_callable(rb_thread_t *th, const rb_callable_method_entry_t *me)
{ {
return rb_method_call_status(th, me, CALL_FCALL, th->ec.cfp->self) == MISSING_NONE; return rb_method_call_status(th, me, CALL_FCALL, th->ec->cfp->self) == MISSING_NONE;
} }
static VALUE static VALUE
@ -585,7 +585,7 @@ static inline VALUE
rb_call(VALUE recv, ID mid, int argc, const VALUE *argv, call_type scope) rb_call(VALUE recv, ID mid, int argc, const VALUE *argv, call_type scope)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
return rb_call0(recv, mid, argc, argv, scope, th->ec.cfp->self); return rb_call0(recv, mid, argc, argv, scope, th->ec->cfp->self);
} }
NORETURN(static void raise_method_missing(rb_thread_t *th, int argc, const VALUE *argv, NORETURN(static void raise_method_missing(rb_thread_t *th, int argc, const VALUE *argv,
@ -850,7 +850,7 @@ rb_funcall_with_block(VALUE recv, ID mid, int argc, const VALUE *argv, VALUE pas
static VALUE * static VALUE *
current_vm_stack_arg(rb_thread_t *th, const VALUE *argv) current_vm_stack_arg(rb_thread_t *th, const VALUE *argv)
{ {
rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->ec.cfp); rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->ec->cfp);
if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, prev_cfp)) return NULL; if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, prev_cfp)) return NULL;
if (prev_cfp->sp + 1 != argv) return NULL; if (prev_cfp->sp + 1 != argv) return NULL;
return prev_cfp->sp + 1; return prev_cfp->sp + 1;
@ -869,7 +869,7 @@ send_internal(int argc, const VALUE *argv, VALUE recv, call_type scope)
self = Qundef; self = Qundef;
} }
else { else {
self = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->ec.cfp)->self; self = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->ec->cfp)->self;
} }
if (argc == 0) { if (argc == 0) {
@ -1112,7 +1112,7 @@ rb_iterate0(VALUE (* it_proc) (VALUE), VALUE data1,
{ {
enum ruby_tag_type state; enum ruby_tag_type state;
volatile VALUE retval = Qnil; volatile VALUE retval = Qnil;
rb_control_frame_t *const cfp = th->ec.cfp; rb_control_frame_t *const cfp = th->ec->cfp;
TH_PUSH_TAG(th); TH_PUSH_TAG(th);
state = TH_EXEC_TAG(); state = TH_EXEC_TAG();
@ -1134,15 +1134,15 @@ rb_iterate0(VALUE (* it_proc) (VALUE), VALUE data1,
retval = (*it_proc) (data1); retval = (*it_proc) (data1);
} }
else if (state == TAG_BREAK || state == TAG_RETRY) { else if (state == TAG_BREAK || state == TAG_RETRY) {
const struct vm_throw_data *const err = (struct vm_throw_data *)th->ec.errinfo; const struct vm_throw_data *const err = (struct vm_throw_data *)th->ec->errinfo;
const rb_control_frame_t *const escape_cfp = THROW_DATA_CATCH_FRAME(err); const rb_control_frame_t *const escape_cfp = THROW_DATA_CATCH_FRAME(err);
if (cfp == escape_cfp) { if (cfp == escape_cfp) {
rb_vm_rewind_cfp(th, cfp); rb_vm_rewind_cfp(th, cfp);
state = 0; state = 0;
th->ec.tag->state = TAG_NONE; th->ec->tag->state = TAG_NONE;
th->ec.errinfo = Qnil; th->ec->errinfo = Qnil;
if (state == TAG_RETRY) goto iter_retry; if (state == TAG_RETRY) goto iter_retry;
retval = THROW_DATA_VAL(err); retval = THROW_DATA_VAL(err);
@ -1296,7 +1296,7 @@ eval_string_with_cref(VALUE self, VALUE src, VALUE scope, rb_cref_t *const cref_
base_block = &bind->block; base_block = &bind->block;
} }
else { else {
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec->cfp);
if (cfp != 0) { if (cfp != 0) {
block.as.captured = *VM_CFP_TO_CAPTURED_BLOCK(cfp); block.as.captured = *VM_CFP_TO_CAPTURED_BLOCK(cfp);
@ -1318,7 +1318,7 @@ eval_string_with_cref(VALUE self, VALUE src, VALUE scope, rb_cref_t *const cref_
iseq = rb_iseq_compile_with_option(src, fname, realpath, INT2FIX(line), base_block, Qnil); iseq = rb_iseq_compile_with_option(src, fname, realpath, INT2FIX(line), base_block, Qnil);
if (!iseq) { if (!iseq) {
rb_exc_raise(adjust_backtrace_in_eval(th, th->ec.errinfo)); rb_exc_raise(adjust_backtrace_in_eval(th, th->ec->errinfo));
} }
/* TODO: what the code checking? */ /* TODO: what the code checking? */
@ -1340,7 +1340,7 @@ eval_string_with_cref(VALUE self, VALUE src, VALUE scope, rb_cref_t *const cref_
/* save new env */ /* save new env */
if (bind && iseq->body->local_table_size > 0) { if (bind && iseq->body->local_table_size > 0) {
vm_bind_update_env(scope, bind, vm_make_env_object(th, th->ec.cfp)); vm_bind_update_env(scope, bind, vm_make_env_object(th, th->ec->cfp));
} }
} }
@ -1357,7 +1357,7 @@ eval_string_with_cref(VALUE self, VALUE src, VALUE scope, rb_cref_t *const cref_
if (state) { if (state) {
if (state == TAG_RAISE) { if (state == TAG_RAISE) {
adjust_backtrace_in_eval(th, th->ec.errinfo); adjust_backtrace_in_eval(th, th->ec->errinfo);
} }
TH_JUMP_TAG(th, state); TH_JUMP_TAG(th, state);
} }
@ -1546,7 +1546,7 @@ static VALUE
yield_under(VALUE under, VALUE self, int argc, const VALUE *argv) yield_under(VALUE under, VALUE self, int argc, const VALUE *argv)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = th->ec.cfp; rb_control_frame_t *cfp = th->ec->cfp;
VALUE block_handler = VM_CF_BLOCK_HANDLER(cfp); VALUE block_handler = VM_CF_BLOCK_HANDLER(cfp);
VALUE new_block_handler = 0; VALUE new_block_handler = 0;
const struct rb_captured_block *captured = NULL; const struct rb_captured_block *captured = NULL;
@ -1580,7 +1580,7 @@ yield_under(VALUE under, VALUE self, int argc, const VALUE *argv)
new_captured.self = self; new_captured.self = self;
ep = captured->ep; ep = captured->ep;
VM_FORCE_WRITE_SPECIAL_CONST(&VM_CF_LEP(th->ec.cfp)[VM_ENV_DATA_INDEX_SPECVAL], new_block_handler); VM_FORCE_WRITE_SPECIAL_CONST(&VM_CF_LEP(th->ec->cfp)[VM_ENV_DATA_INDEX_SPECVAL], new_block_handler);
} }
cref = vm_cref_push(th, under, ep, TRUE); cref = vm_cref_push(th, under, ep, TRUE);
@ -1591,7 +1591,7 @@ VALUE
rb_yield_refine_block(VALUE refinement, VALUE refinements) rb_yield_refine_block(VALUE refinement, VALUE refinements)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
VALUE block_handler = VM_CF_BLOCK_HANDLER(th->ec.cfp); VALUE block_handler = VM_CF_BLOCK_HANDLER(th->ec->cfp);
if (vm_block_handler_type(block_handler) != block_handler_type_iseq) { if (vm_block_handler_type(block_handler) != block_handler_type_iseq) {
rb_bug("rb_yield_refine_block: an iseq block is required"); rb_bug("rb_yield_refine_block: an iseq block is required");
@ -1603,7 +1603,7 @@ rb_yield_refine_block(VALUE refinement, VALUE refinements)
const VALUE *ep = captured->ep; const VALUE *ep = captured->ep;
rb_cref_t *cref = vm_cref_push(th, refinement, ep, TRUE); rb_cref_t *cref = vm_cref_push(th, refinement, ep, TRUE);
CREF_REFINEMENTS_SET(cref, refinements); CREF_REFINEMENTS_SET(cref, refinements);
VM_FORCE_WRITE_SPECIAL_CONST(&VM_CF_LEP(th->ec.cfp)[VM_ENV_DATA_INDEX_SPECVAL], new_block_handler); VM_FORCE_WRITE_SPECIAL_CONST(&VM_CF_LEP(th->ec->cfp)[VM_ENV_DATA_INDEX_SPECVAL], new_block_handler);
new_captured.self = refinement; new_captured.self = refinement;
return vm_yield_with_cref(th, 0, NULL, cref, FALSE); return vm_yield_with_cref(th, 0, NULL, cref, FALSE);
} }
@ -1875,7 +1875,7 @@ void
rb_throw_obj(VALUE tag, VALUE value) rb_throw_obj(VALUE tag, VALUE value)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
struct rb_vm_tag *tt = th->ec.tag; struct rb_vm_tag *tt = th->ec->tag;
while (tt) { while (tt) {
if (tt->tag == tag) { if (tt->tag == tag) {
@ -1892,7 +1892,7 @@ rb_throw_obj(VALUE tag, VALUE value)
rb_exc_raise(rb_class_new_instance(numberof(desc), desc, rb_eUncaughtThrow)); rb_exc_raise(rb_class_new_instance(numberof(desc), desc, rb_eUncaughtThrow));
} }
th->ec.errinfo = (VALUE)THROW_DATA_NEW(tag, NULL, TAG_THROW); th->ec->errinfo = (VALUE)THROW_DATA_NEW(tag, NULL, TAG_THROW);
TH_JUMP_TAG(th, TAG_THROW); TH_JUMP_TAG(th, TAG_THROW);
} }
@ -1986,7 +1986,7 @@ vm_catch_protect(VALUE tag, rb_block_call_func *func, VALUE data,
{ {
enum ruby_tag_type state; enum ruby_tag_type state;
VALUE val = Qnil; /* OK */ VALUE val = Qnil; /* OK */
rb_control_frame_t *volatile saved_cfp = th->ec.cfp; rb_control_frame_t *volatile saved_cfp = th->ec->cfp;
TH_PUSH_TAG(th); TH_PUSH_TAG(th);
@ -1996,10 +1996,10 @@ vm_catch_protect(VALUE tag, rb_block_call_func *func, VALUE data,
/* call with argc=1, argv = [tag], block = Qnil to insure compatibility */ /* call with argc=1, argv = [tag], block = Qnil to insure compatibility */
val = (*func)(tag, data, 1, (const VALUE *)&tag, Qnil); val = (*func)(tag, data, 1, (const VALUE *)&tag, Qnil);
} }
else if (state == TAG_THROW && THROW_DATA_VAL((struct vm_throw_data *)th->ec.errinfo) == tag) { else if (state == TAG_THROW && THROW_DATA_VAL((struct vm_throw_data *)th->ec->errinfo) == tag) {
rb_vm_rewind_cfp(th, saved_cfp); rb_vm_rewind_cfp(th, saved_cfp);
val = th->ec.tag->retval; val = th->ec->tag->retval;
th->ec.errinfo = Qnil; th->ec->errinfo = Qnil;
state = 0; state = 0;
} }
TH_POP_TAG(); TH_POP_TAG();
@ -2081,7 +2081,7 @@ rb_f_local_variables(void)
struct local_var_list vars; struct local_var_list vars;
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = rb_control_frame_t *cfp =
vm_get_ruby_level_caller_cfp(th, RUBY_VM_PREVIOUS_CONTROL_FRAME(th->ec.cfp)); vm_get_ruby_level_caller_cfp(th, RUBY_VM_PREVIOUS_CONTROL_FRAME(th->ec->cfp));
unsigned int i; unsigned int i;
local_var_list_init(&vars); local_var_list_init(&vars);
@ -2137,7 +2137,7 @@ VALUE
rb_f_block_given_p(void) rb_f_block_given_p(void)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = th->ec.cfp; rb_control_frame_t *cfp = th->ec->cfp;
cfp = vm_get_ruby_level_caller_cfp(th, RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp)); cfp = vm_get_ruby_level_caller_cfp(th, RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
if (cfp != NULL && VM_CF_BLOCK_HANDLER(cfp) != VM_BLOCK_HANDLER_NONE) { if (cfp != NULL && VM_CF_BLOCK_HANDLER(cfp) != VM_BLOCK_HANDLER_NONE) {
@ -2152,7 +2152,7 @@ VALUE
rb_current_realfilepath(void) rb_current_realfilepath(void)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = th->ec.cfp; rb_control_frame_t *cfp = th->ec->cfp;
cfp = vm_get_ruby_level_caller_cfp(th, RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp)); cfp = vm_get_ruby_level_caller_cfp(th, RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
if (cfp != 0) return rb_iseq_realpath(cfp->iseq); if (cfp != 0) return rb_iseq_realpath(cfp->iseq);
return Qnil; return Qnil;

View File

@ -84,7 +84,7 @@ vm_exec_core(rb_thread_t *th, VALUE initial)
#undef RESTORE_REGS #undef RESTORE_REGS
#define RESTORE_REGS() \ #define RESTORE_REGS() \
{ \ { \
VM_REG_CFP = th->ec.cfp; \ VM_REG_CFP = th->ec->cfp; \
reg_pc = reg_cfp->pc; \ reg_pc = reg_cfp->pc; \
} }
@ -102,7 +102,7 @@ vm_exec_core(rb_thread_t *th, VALUE initial)
return (VALUE)insns_address_table; return (VALUE)insns_address_table;
} }
#endif #endif
reg_cfp = th->ec.cfp; reg_cfp = th->ec->cfp;
reg_pc = reg_cfp->pc; reg_pc = reg_cfp->pc;
#if OPT_STACK_CACHING #if OPT_STACK_CACHING
@ -142,7 +142,7 @@ rb_vm_get_insns_address_table(void)
static VALUE static VALUE
vm_exec_core(rb_thread_t *th, VALUE initial) vm_exec_core(rb_thread_t *th, VALUE initial)
{ {
register rb_control_frame_t *reg_cfp = th->ec.cfp; register rb_control_frame_t *reg_cfp = th->ec->cfp;
while (1) { while (1) {
reg_cfp = ((rb_insn_func_t) (*GET_PC()))(th, reg_cfp); reg_cfp = ((rb_insn_func_t) (*GET_PC()))(th, reg_cfp);
@ -158,8 +158,8 @@ vm_exec_core(rb_thread_t *th, VALUE initial)
return ret; return ret;
} }
else { else {
VALUE err = th->ec.errinfo; VALUE err = th->ec->errinfo;
th->ec.errinfo = Qnil; th->ec->errinfo = Qnil;
return err; return err;
} }
} }

View File

@ -157,11 +157,11 @@ default: \
#endif #endif
#define VM_SP_CNT(th, sp) ((sp) - (th)->ec.vm_stack) #define VM_SP_CNT(th, sp) ((sp) - (th)->ec->vm_stack)
#if OPT_CALL_THREADED_CODE #if OPT_CALL_THREADED_CODE
#define THROW_EXCEPTION(exc) do { \ #define THROW_EXCEPTION(exc) do { \
th->ec.errinfo = (VALUE)(exc); \ th->ec->errinfo = (VALUE)(exc); \
return 0; \ return 0; \
} while (0) } while (0)
#else #else

View File

@ -35,14 +35,14 @@ static void
threadptr_stack_overflow(rb_thread_t *th, int setup) threadptr_stack_overflow(rb_thread_t *th, int setup)
{ {
VALUE mesg = th->vm->special_exceptions[ruby_error_sysstack]; VALUE mesg = th->vm->special_exceptions[ruby_error_sysstack];
th->ec.raised_flag = RAISED_STACKOVERFLOW; th->ec->raised_flag = RAISED_STACKOVERFLOW;
if (setup) { if (setup) {
VALUE at = rb_threadptr_backtrace_object(th); VALUE at = rb_threadptr_backtrace_object(th);
mesg = ruby_vm_special_exception_copy(mesg); mesg = ruby_vm_special_exception_copy(mesg);
rb_ivar_set(mesg, idBt, at); rb_ivar_set(mesg, idBt, at);
rb_ivar_set(mesg, idBt_locations, at); rb_ivar_set(mesg, idBt_locations, at);
} }
th->ec.errinfo = mesg; th->ec->errinfo = mesg;
TH_JUMP_TAG(th, TAG_RAISE); TH_JUMP_TAG(th, TAG_RAISE);
} }
@ -57,8 +57,8 @@ void
rb_threadptr_stack_overflow(rb_thread_t *th, int crit) rb_threadptr_stack_overflow(rb_thread_t *th, int crit)
{ {
if (crit || rb_during_gc()) { if (crit || rb_during_gc()) {
th->ec.raised_flag = RAISED_STACKOVERFLOW; th->ec->raised_flag = RAISED_STACKOVERFLOW;
th->ec.errinfo = th->vm->special_exceptions[ruby_error_stackfatal]; th->ec->errinfo = th->vm->special_exceptions[ruby_error_stackfatal];
TH_JUMP_TAG(th, TAG_RAISE); TH_JUMP_TAG(th, TAG_RAISE);
} }
#ifdef USE_SIGALTSTACK #ifdef USE_SIGALTSTACK
@ -266,7 +266,7 @@ vm_push_frame(rb_thread_t *th,
int local_size, int local_size,
int stack_max) int stack_max)
{ {
return vm_push_frame_(&th->ec, iseq, type, self, specval, cref_or_me, pc, sp, local_size, stack_max); return vm_push_frame_(th->ec, iseq, type, self, specval, cref_or_me, pc, sp, local_size, stack_max);
} }
rb_control_frame_t * rb_control_frame_t *
@ -293,7 +293,7 @@ vm_pop_frame(rb_thread_t *th, rb_control_frame_t *cfp, const VALUE *ep)
if (VM_CHECK_MODE >= 4) rb_gc_verify_internal_consistency(); if (VM_CHECK_MODE >= 4) rb_gc_verify_internal_consistency();
if (VMDEBUG == 2) SDR(); if (VMDEBUG == 2) SDR();
th->ec.cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); th->ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
return flags & VM_FRAME_FLAG_FINISH; return flags & VM_FRAME_FLAG_FINISH;
} }
@ -301,7 +301,7 @@ vm_pop_frame(rb_thread_t *th, rb_control_frame_t *cfp, const VALUE *ep)
void void
rb_vm_pop_frame(rb_thread_t *th) rb_vm_pop_frame(rb_thread_t *th)
{ {
vm_pop_frame(th, th->ec.cfp, th->ec.cfp->ep); vm_pop_frame(th, th->ec->cfp, th->ec->cfp->ep);
} }
/* method dispatch */ /* method dispatch */
@ -406,11 +406,11 @@ lep_svar(rb_thread_t *th, const VALUE *lep)
{ {
VALUE svar; VALUE svar;
if (lep && (th == NULL || th->ec.root_lep != lep)) { if (lep && (th == NULL || th->ec->root_lep != lep)) {
svar = lep[VM_ENV_DATA_INDEX_ME_CREF]; svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
} }
else { else {
svar = th->ec.root_svar; svar = th->ec->root_svar;
} }
VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar)); VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
@ -423,11 +423,11 @@ lep_svar_write(rb_thread_t *th, const VALUE *lep, const struct vm_svar *svar)
{ {
VM_ASSERT(vm_svar_valid_p((VALUE)svar)); VM_ASSERT(vm_svar_valid_p((VALUE)svar));
if (lep && (th == NULL || th->ec.root_lep != lep)) { if (lep && (th == NULL || th->ec->root_lep != lep)) {
vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar); vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
} }
else { else {
RB_OBJ_WRITE(th->self, &th->ec.root_svar, svar); RB_OBJ_WRITE(th->self, &th->ec->root_svar, svar);
} }
} }
@ -757,7 +757,7 @@ vm_cref_push(rb_thread_t *th, VALUE klass, const VALUE *ep, int pushed_by_eval)
prev_cref = vm_env_cref(ep); prev_cref = vm_env_cref(ep);
} }
else { else {
rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(th, th->ec.cfp); rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(th, th->ec->cfp);
if (cfp) { if (cfp) {
prev_cref = vm_env_cref(cfp->ep); prev_cref = vm_env_cref(cfp->ep);
@ -830,7 +830,7 @@ vm_get_ev_const(rb_thread_t *th, VALUE orig_klass, ID id, int is_defined)
if (orig_klass == Qnil) { if (orig_klass == Qnil) {
/* in current lexical scope */ /* in current lexical scope */
const rb_cref_t *root_cref = rb_vm_get_cref(th->ec.cfp->ep); const rb_cref_t *root_cref = rb_vm_get_cref(th->ec->cfp->ep);
const rb_cref_t *cref; const rb_cref_t *cref;
VALUE klass = Qnil; VALUE klass = Qnil;
@ -876,10 +876,10 @@ vm_get_ev_const(rb_thread_t *th, VALUE orig_klass, ID id, int is_defined)
/* search self */ /* search self */
if (root_cref && !NIL_P(CREF_CLASS(root_cref))) { if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
klass = vm_get_iclass(th->ec.cfp, CREF_CLASS(root_cref)); klass = vm_get_iclass(th->ec->cfp, CREF_CLASS(root_cref));
} }
else { else {
klass = CLASS_OF(th->ec.cfp->self); klass = CLASS_OF(th->ec->cfp->self);
} }
if (is_defined) { if (is_defined) {
@ -1066,16 +1066,16 @@ vm_throw_continue(rb_thread_t *th, VALUE err)
/* continue throw */ /* continue throw */
if (FIXNUM_P(err)) { if (FIXNUM_P(err)) {
th->ec.tag->state = FIX2INT(err); th->ec->tag->state = FIX2INT(err);
} }
else if (SYMBOL_P(err)) { else if (SYMBOL_P(err)) {
th->ec.tag->state = TAG_THROW; th->ec->tag->state = TAG_THROW;
} }
else if (THROW_DATA_P(err)) { else if (THROW_DATA_P(err)) {
th->ec.tag->state = THROW_DATA_STATE((struct vm_throw_data *)err); th->ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
} }
else { else {
th->ec.tag->state = TAG_RAISE; th->ec->tag->state = TAG_RAISE;
} }
return err; return err;
} }
@ -1230,7 +1230,7 @@ vm_throw_start(rb_thread_t *const th, rb_control_frame_t *const reg_cfp, enum ru
rb_bug("isns(throw): unsupport throw type"); rb_bug("isns(throw): unsupport throw type");
} }
th->ec.tag->state = state; th->ec->tag->state = state;
return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state); return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
} }
@ -1554,8 +1554,8 @@ vm_base_ptr(const rb_control_frame_t *cfp)
#if VM_DEBUG_BP_CHECK #if VM_DEBUG_BP_CHECK
if (bp != cfp->bp_check) { if (bp != cfp->bp_check) {
fprintf(stderr, "bp_check: %ld, bp: %ld\n", fprintf(stderr, "bp_check: %ld, bp: %ld\n",
(long)(cfp->bp_check - GET_THREAD()->ec.vm_stack), (long)(cfp->bp_check - GET_THREAD()->ec->vm_stack),
(long)(bp - GET_THREAD()->ec.vm_stack)); (long)(bp - GET_THREAD()->ec->vm_stack));
rb_bug("vm_base_ptr: unreachable"); rb_bug("vm_base_ptr: unreachable");
} }
#endif #endif
@ -1624,7 +1624,7 @@ vm_callee_setup_arg(rb_thread_t *th, struct rb_calling_info *calling, const stru
const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size) const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
{ {
if (LIKELY(simple_iseq_p(iseq) && !(ci->flag & VM_CALL_KW_SPLAT))) { if (LIKELY(simple_iseq_p(iseq) && !(ci->flag & VM_CALL_KW_SPLAT))) {
rb_control_frame_t *cfp = th->ec.cfp; rb_control_frame_t *cfp = th->ec->cfp;
CALLER_SETUP_ARG(cfp, calling, ci); /* splat arg */ CALLER_SETUP_ARG(cfp, calling, ci); /* splat arg */
@ -1707,7 +1707,7 @@ vm_call_iseq_setup_tailcall(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_
} }
vm_pop_frame(th, cfp, cfp->ep); vm_pop_frame(th, cfp, cfp->ep);
cfp = th->ec.cfp; cfp = th->ec->cfp;
sp_orig = sp = cfp->sp; sp_orig = sp = cfp->sp;
@ -1873,7 +1873,7 @@ static inline int
vm_cfp_consistent_p(rb_thread_t *th, const rb_control_frame_t *reg_cfp) vm_cfp_consistent_p(rb_thread_t *th, const rb_control_frame_t *reg_cfp)
{ {
const int ov_flags = RAISED_STACKOVERFLOW; const int ov_flags = RAISED_STACKOVERFLOW;
if (LIKELY(reg_cfp == th->ec.cfp + 1)) return TRUE; if (LIKELY(reg_cfp == th->ec->cfp + 1)) return TRUE;
if (rb_thread_raised_p(th, ov_flags)) { if (rb_thread_raised_p(th, ov_flags)) {
rb_thread_raised_reset(th, ov_flags); rb_thread_raised_reset(th, ov_flags);
return TRUE; return TRUE;
@ -1883,7 +1883,7 @@ vm_cfp_consistent_p(rb_thread_t *th, const rb_control_frame_t *reg_cfp)
#define CHECK_CFP_CONSISTENCY(func) \ #define CHECK_CFP_CONSISTENCY(func) \
(LIKELY(vm_cfp_consistent_p(th, reg_cfp)) ? (void)0 : \ (LIKELY(vm_cfp_consistent_p(th, reg_cfp)) ? (void)0 : \
rb_bug(func ": cfp consistency error (%p, %p)", reg_cfp, th->ec.cfp+1)) rb_bug(func ": cfp consistency error (%p, %p)", reg_cfp, th->ec->cfp+1))
static inline static inline
const rb_method_cfunc_t * const rb_method_cfunc_t *
@ -1930,7 +1930,7 @@ vm_call_cfunc_with_frame(rb_thread_t *th, rb_control_frame_t *reg_cfp, struct rb
vm_push_frame(th, NULL, VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL, recv, vm_push_frame(th, NULL, VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL, recv,
block_handler, (VALUE)me, block_handler, (VALUE)me,
0, th->ec.cfp->sp, 0, 0); 0, th->ec->cfp->sp, 0, 0);
if (len >= 0) rb_check_arity(argc, len, len); if (len >= 0) rb_check_arity(argc, len, len);
@ -2564,7 +2564,7 @@ vm_yield_with_cfunc(rb_thread_t *th,
self, self,
VM_GUARDED_PREV_EP(captured->ep), VM_GUARDED_PREV_EP(captured->ep),
(VALUE)me, (VALUE)me,
0, th->ec.cfp->sp, 0, 0); 0, th->ec->cfp->sp, 0, 0);
val = (*ifunc->func)(arg, ifunc->data, argc, argv, blockarg); val = (*ifunc->func)(arg, ifunc->data, argc, argv, blockarg);
rb_vm_pop_frame(th); rb_vm_pop_frame(th);
@ -2609,7 +2609,7 @@ static int
vm_callee_setup_block_arg(rb_thread_t *th, struct rb_calling_info *calling, const struct rb_call_info *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type) vm_callee_setup_block_arg(rb_thread_t *th, struct rb_calling_info *calling, const struct rb_call_info *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
{ {
if (simple_iseq_p(iseq)) { if (simple_iseq_p(iseq)) {
rb_control_frame_t *cfp = th->ec.cfp; rb_control_frame_t *cfp = th->ec->cfp;
VALUE arg0; VALUE arg0;
CALLER_SETUP_ARG(cfp, calling, ci); /* splat arg */ CALLER_SETUP_ARG(cfp, calling, ci); /* splat arg */
@ -2694,7 +2694,7 @@ vm_invoke_symbol_block(rb_thread_t *th, rb_control_frame_t *reg_cfp,
{ {
VALUE val; VALUE val;
int argc; int argc;
CALLER_SETUP_ARG(th->ec.cfp, calling, ci); CALLER_SETUP_ARG(th->ec->cfp, calling, ci);
argc = calling->argc; argc = calling->argc;
val = vm_yield_with_symbol(th, symbol, argc, STACK_ADDR_FROM_TOP(argc), VM_BLOCK_HANDLER_NONE); val = vm_yield_with_symbol(th, symbol, argc, STACK_ADDR_FROM_TOP(argc), VM_BLOCK_HANDLER_NONE);
POPN(argc); POPN(argc);
@ -2708,7 +2708,7 @@ vm_invoke_ifunc_block(rb_thread_t *th, rb_control_frame_t *reg_cfp,
{ {
VALUE val; VALUE val;
int argc; int argc;
CALLER_SETUP_ARG(th->ec.cfp, calling, ci); CALLER_SETUP_ARG(th->ec->cfp, calling, ci);
argc = calling->argc; argc = calling->argc;
val = vm_yield_with_cfunc(th, captured, captured->self, argc, STACK_ADDR_FROM_TOP(argc), VM_BLOCK_HANDLER_NONE); val = vm_yield_with_cfunc(th, captured, captured->self, argc, STACK_ADDR_FROM_TOP(argc), VM_BLOCK_HANDLER_NONE);
POPN(argc); /* TODO: should put before C/yield? */ POPN(argc); /* TODO: should put before C/yield? */
@ -2773,7 +2773,7 @@ static VALUE
vm_make_proc_with_iseq(const rb_iseq_t *blockiseq) vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec->cfp);
struct rb_captured_block *captured; struct rb_captured_block *captured;
if (cfp == 0) { if (cfp == 0) {
@ -3313,7 +3313,7 @@ vm_once_dispatch(ISEQ iseq, IC ic, rb_thread_t *th)
val = is->once.value = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is); val = is->once.value = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
/* is->once.running_thread is cleared by vm_once_clear() */ /* is->once.running_thread is cleared by vm_once_clear() */
is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */ is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
rb_iseq_add_mark_object(th->ec.cfp->iseq, val); rb_iseq_add_mark_object(th->ec->cfp->iseq, val);
return val; return val;
} }
else if (is->once.running_thread == th) { else if (is->once.running_thread == th) {

View File

@ -54,7 +54,7 @@ RUBY_SYMBOL_EXPORT_END
#define VM_REG_EP (VM_REG_CFP->ep) #define VM_REG_EP (VM_REG_CFP->ep)
#define RESTORE_REGS() do { \ #define RESTORE_REGS() do { \
VM_REG_CFP = th->ec.cfp; \ VM_REG_CFP = th->ec->cfp; \
} while (0) } while (0)
#define REG_A reg_a #define REG_A reg_a
@ -102,7 +102,7 @@ enum vm_regan_acttype {
#define SET_SV(x) (*GET_SP() = (x)) #define SET_SV(x) (*GET_SP() = (x))
/* set current stack value as x */ /* set current stack value as x */
#define GET_SP_COUNT() (VM_REG_SP - th->ec.vm_stack) #define GET_SP_COUNT() (VM_REG_SP - th->ec->vm_stack)
/* instruction sequence C struct */ /* instruction sequence C struct */
#define GET_ISEQ() (GET_CFP()->iseq) #define GET_ISEQ() (GET_CFP()->iseq)

View File

@ -264,7 +264,7 @@ method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def,
def->body.attr.id = (ID)(VALUE)opts; def->body.attr.id = (ID)(VALUE)opts;
cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec->cfp);
if (cfp && (line = rb_vm_get_sourceline(cfp))) { if (cfp && (line = rb_vm_get_sourceline(cfp))) {
VALUE location = rb_ary_new3(2, rb_iseq_path(cfp->iseq), INT2FIX(line)); VALUE location = rb_ary_new3(2, rb_iseq_path(cfp->iseq), INT2FIX(line));
@ -1089,7 +1089,7 @@ static rb_method_visibility_t
rb_scope_visibility_get(void) rb_scope_visibility_get(void)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec->cfp);
if (!vm_env_cref_by_cref(cfp->ep)) { if (!vm_env_cref_by_cref(cfp->ep)) {
return METHOD_VISI_PUBLIC; return METHOD_VISI_PUBLIC;
@ -1103,7 +1103,7 @@ static int
rb_scope_module_func_check(void) rb_scope_module_func_check(void)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec->cfp);
if (!vm_env_cref_by_cref(cfp->ep)) { if (!vm_env_cref_by_cref(cfp->ep)) {
return FALSE; return FALSE;

View File

@ -304,31 +304,31 @@ rb_threadptr_exec_event_hooks_orig(rb_trace_arg_t *trace_arg, int pop_p)
rb_thread_t *th = trace_arg->th; rb_thread_t *th = trace_arg->th;
if (trace_arg->event & RUBY_INTERNAL_EVENT_MASK) { if (trace_arg->event & RUBY_INTERNAL_EVENT_MASK) {
if (th->ec.trace_arg && (th->ec.trace_arg->event & RUBY_INTERNAL_EVENT_MASK)) { if (th->ec->trace_arg && (th->ec->trace_arg->event & RUBY_INTERNAL_EVENT_MASK)) {
/* skip hooks because this thread doing INTERNAL_EVENT */ /* skip hooks because this thread doing INTERNAL_EVENT */
} }
else { else {
rb_trace_arg_t *prev_trace_arg = th->ec.trace_arg; rb_trace_arg_t *prev_trace_arg = th->ec->trace_arg;
th->vm->trace_running++; th->vm->trace_running++;
th->ec.trace_arg = trace_arg; th->ec->trace_arg = trace_arg;
exec_hooks_unprotected(th, &th->event_hooks, trace_arg); exec_hooks_unprotected(th, &th->event_hooks, trace_arg);
exec_hooks_unprotected(th, &th->vm->event_hooks, trace_arg); exec_hooks_unprotected(th, &th->vm->event_hooks, trace_arg);
th->ec.trace_arg = prev_trace_arg; th->ec->trace_arg = prev_trace_arg;
th->vm->trace_running--; th->vm->trace_running--;
} }
} }
else { else {
if (th->ec.trace_arg == NULL && /* check reentrant */ if (th->ec->trace_arg == NULL && /* check reentrant */
trace_arg->self != rb_mRubyVMFrozenCore /* skip special methods. TODO: remove it. */) { trace_arg->self != rb_mRubyVMFrozenCore /* skip special methods. TODO: remove it. */) {
const VALUE errinfo = th->ec.errinfo; const VALUE errinfo = th->ec->errinfo;
const VALUE old_recursive = th->ec.local_storage_recursive_hash; const VALUE old_recursive = th->ec->local_storage_recursive_hash;
int state = 0; int state = 0;
th->ec.local_storage_recursive_hash = th->ec.local_storage_recursive_hash_for_trace; th->ec->local_storage_recursive_hash = th->ec->local_storage_recursive_hash_for_trace;
th->ec.errinfo = Qnil; th->ec->errinfo = Qnil;
th->vm->trace_running++; th->vm->trace_running++;
th->ec.trace_arg = trace_arg; th->ec->trace_arg = trace_arg;
{ {
/* thread local traces */ /* thread local traces */
state = exec_hooks_protected(th, &th->event_hooks, trace_arg); state = exec_hooks_protected(th, &th->event_hooks, trace_arg);
@ -338,19 +338,19 @@ rb_threadptr_exec_event_hooks_orig(rb_trace_arg_t *trace_arg, int pop_p)
state = exec_hooks_protected(th, &th->vm->event_hooks, trace_arg); state = exec_hooks_protected(th, &th->vm->event_hooks, trace_arg);
if (state) goto terminate; if (state) goto terminate;
th->ec.errinfo = errinfo; th->ec->errinfo = errinfo;
} }
terminate: terminate:
th->ec.trace_arg = NULL; th->ec->trace_arg = NULL;
th->vm->trace_running--; th->vm->trace_running--;
th->ec.local_storage_recursive_hash_for_trace = th->ec.local_storage_recursive_hash; th->ec->local_storage_recursive_hash_for_trace = th->ec->local_storage_recursive_hash;
th->ec.local_storage_recursive_hash = old_recursive; th->ec->local_storage_recursive_hash = old_recursive;
if (state) { if (state) {
if (pop_p) { if (pop_p) {
if (VM_FRAME_FINISHED_P(th->ec.cfp)) { if (VM_FRAME_FINISHED_P(th->ec->cfp)) {
th->ec.tag = th->ec.tag->prev; th->ec->tag = th->ec->tag->prev;
} }
rb_vm_pop_frame(th); rb_vm_pop_frame(th);
} }
@ -379,12 +379,12 @@ rb_suppress_tracing(VALUE (*func)(VALUE), VALUE arg)
VALUE result = Qnil; VALUE result = Qnil;
rb_thread_t *volatile th = GET_THREAD(); rb_thread_t *volatile th = GET_THREAD();
enum ruby_tag_type state; enum ruby_tag_type state;
const int tracing = th->ec.trace_arg ? 1 : 0; const int volatile tracing = th->ec->trace_arg ? 1 : 0;
rb_trace_arg_t dummy_trace_arg; rb_trace_arg_t dummy_trace_arg;
dummy_trace_arg.event = 0; dummy_trace_arg.event = 0;
if (!tracing) th->vm->trace_running++; if (!tracing) th->vm->trace_running++;
if (!th->ec.trace_arg) th->ec.trace_arg = &dummy_trace_arg; if (!th->ec->trace_arg) th->ec->trace_arg = &dummy_trace_arg;
raised = rb_threadptr_reset_raised(th); raised = rb_threadptr_reset_raised(th);
@ -398,7 +398,7 @@ rb_suppress_tracing(VALUE (*func)(VALUE), VALUE arg)
rb_threadptr_set_raised(th); rb_threadptr_set_raised(th);
} }
if (th->ec.trace_arg == &dummy_trace_arg) th->ec.trace_arg = 0; if (th->ec->trace_arg == &dummy_trace_arg) th->ec->trace_arg = 0;
if (!tracing) th->vm->trace_running--; if (!tracing) th->vm->trace_running--;
if (state) { if (state) {
@ -706,7 +706,7 @@ tpptr(VALUE tpval)
static rb_trace_arg_t * static rb_trace_arg_t *
get_trace_arg(void) get_trace_arg(void)
{ {
rb_trace_arg_t *trace_arg = GET_THREAD()->ec.trace_arg; rb_trace_arg_t *trace_arg = GET_THREAD()->ec->trace_arg;
if (trace_arg == 0) { if (trace_arg == 0) {
rb_raise(rb_eRuntimeError, "access from outside"); rb_raise(rb_eRuntimeError, "access from outside");
} }
@ -1310,7 +1310,7 @@ static VALUE
tracepoint_inspect(VALUE self) tracepoint_inspect(VALUE self)
{ {
rb_tp_t *tp = tpptr(self); rb_tp_t *tp = tpptr(self);
rb_trace_arg_t *trace_arg = GET_THREAD()->ec.trace_arg; rb_trace_arg_t *trace_arg = GET_THREAD()->ec->trace_arg;
if (trace_arg) { if (trace_arg) {
switch (trace_arg->event) { switch (trace_arg->event) {
@ -1591,12 +1591,12 @@ rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func,
void void
rb_postponed_job_flush(rb_vm_t *vm) rb_postponed_job_flush(rb_vm_t *vm)
{ {
rb_thread_t *th = GET_THREAD(); rb_thread_t * volatile th = GET_THREAD();
const unsigned long block_mask = POSTPONED_JOB_INTERRUPT_MASK|TRAP_INTERRUPT_MASK; const unsigned long block_mask = POSTPONED_JOB_INTERRUPT_MASK|TRAP_INTERRUPT_MASK;
unsigned long saved_mask = th->interrupt_mask & block_mask; volatile unsigned long saved_mask = th->interrupt_mask & block_mask;
VALUE saved_errno = th->ec.errinfo; VALUE volatile saved_errno = th->ec->errinfo;
th->ec.errinfo = Qnil; th->ec->errinfo = Qnil;
/* mask POSTPONED_JOB dispatch */ /* mask POSTPONED_JOB dispatch */
th->interrupt_mask |= block_mask; th->interrupt_mask |= block_mask;
{ {
@ -1614,5 +1614,5 @@ rb_postponed_job_flush(rb_vm_t *vm)
} }
/* restore POSTPONED_JOB mask */ /* restore POSTPONED_JOB mask */
th->interrupt_mask &= ~(saved_mask ^ block_mask); th->interrupt_mask &= ~(saved_mask ^ block_mask);
th->ec.errinfo = saved_errno; th->ec->errinfo = saved_errno;
} }