Use VirtualAlloc/VirtualProtect/VirtualFree for windows stack allocation.

git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65909 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
samuel 2018-11-22 02:17:44 +00:00
parent e7d919d265
commit 38f7bb481e

116
cont.c
View File

@ -416,10 +416,11 @@ cont_free(void *ptr)
rb_bug("Illegal root fiber parameter"); rb_bug("Illegal root fiber parameter");
} }
#ifdef _WIN32 #ifdef _WIN32
free((void*)fib->ss_sp); VirtualFree((void*)fib->ss_sp, 0, MEM_RELEASE);
#else #else
munmap((void*)fib->ss_sp, fib->ss_size); munmap((void*)fib->ss_sp, fib->ss_size);
#endif #endif
fib->ss_sp = NULL;
} }
#elif defined(_WIN32) #elif defined(_WIN32)
if (!fiber_is_root_p(fib)) { if (!fiber_is_root_p(fib)) {
@ -870,37 +871,46 @@ static char*
fiber_machine_stack_alloc(size_t size) fiber_machine_stack_alloc(size_t size)
{ {
char *ptr; char *ptr;
#ifdef _WIN32
DWORD old_protect;
#endif
if (machine_stack_cache_index > 0) { if (machine_stack_cache_index > 0) {
if (machine_stack_cache[machine_stack_cache_index - 1].size == (size / sizeof(VALUE))) { if (machine_stack_cache[machine_stack_cache_index - 1].size == (size / sizeof(VALUE))) {
ptr = machine_stack_cache[machine_stack_cache_index - 1].ptr; ptr = machine_stack_cache[machine_stack_cache_index - 1].ptr;
machine_stack_cache_index--; machine_stack_cache_index--;
machine_stack_cache[machine_stack_cache_index].ptr = NULL; machine_stack_cache[machine_stack_cache_index].ptr = NULL;
machine_stack_cache[machine_stack_cache_index].size = 0; machine_stack_cache[machine_stack_cache_index].size = 0;
} } else {
else{
/* TODO handle multiple machine stack size */ /* TODO handle multiple machine stack size */
rb_bug("machine_stack_cache size is not canonicalized"); rb_bug("machine_stack_cache size is not canonicalized");
} }
} } else {
else {
#ifdef _WIN32 #ifdef _WIN32
return malloc(size); ptr = VirtualAlloc(0, size, MEM_COMMIT, PAGE_READWRITE);
if (!ptr) {
rb_raise(rb_eFiberError, "can't allocate machine stack to fiber: %s", ERRNOMSG);
}
if (!VirtualProtect(ptr, RB_PAGE_SIZE, PAGE_READWRITE | PAGE_GUARD, &old_protect)) {
rb_raise(rb_eFiberError, "can't set a guard page: %s", ERRNOMSG);
}
#else #else
void *page; void *page;
STACK_GROW_DIR_DETECTION; STACK_GROW_DIR_DETECTION;
errno = 0; errno = 0;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0); ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
if (ptr == MAP_FAILED) { if (ptr == MAP_FAILED) {
rb_raise(rb_eFiberError, "can't alloc machine stack to fiber: %s", ERRNOMSG); rb_raise(rb_eFiberError, "can't alloc machine stack to fiber: %s", ERRNOMSG);
} }
/* guard page setup */ /* guard page setup */
page = ptr + STACK_DIR_UPPER(size - RB_PAGE_SIZE, 0); page = ptr + STACK_DIR_UPPER(size - RB_PAGE_SIZE, 0);
if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) { if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
rb_raise(rb_eFiberError, "can't set a guard page: %s", ERRNOMSG); rb_raise(rb_eFiberError, "can't set a guard page: %s", ERRNOMSG);
} }
#endif #endif
} }
@ -1689,11 +1699,11 @@ fiber_store(rb_fiber_t *next_fib, rb_thread_t *th)
rb_fiber_t *fib; rb_fiber_t *fib;
if (th->ec->fiber_ptr != NULL) { if (th->ec->fiber_ptr != NULL) {
fib = th->ec->fiber_ptr; fib = th->ec->fiber_ptr;
} }
else { else {
/* create root fiber */ /* create root fiber */
fib = root_fiber_alloc(th); fib = root_fiber_alloc(th);
} }
VM_ASSERT(FIBER_RESUMED_P(fib) || FIBER_TERMINATED_P(fib)); VM_ASSERT(FIBER_RESUMED_P(fib) || FIBER_TERMINATED_P(fib));
@ -1701,7 +1711,7 @@ fiber_store(rb_fiber_t *next_fib, rb_thread_t *th)
#if FIBER_USE_NATIVE #if FIBER_USE_NATIVE
if (FIBER_CREATED_P(next_fib)) { if (FIBER_CREATED_P(next_fib)) {
fiber_initialize_machine_stack_context(next_fib, th->vm->default_params.fiber_machine_stack_size); fiber_initialize_machine_stack_context(next_fib, th->vm->default_params.fiber_machine_stack_size);
} }
#endif #endif
@ -1719,23 +1729,23 @@ fiber_store(rb_fiber_t *next_fib, rb_thread_t *th)
/* restored */ /* restored */
#ifdef MAX_MACHINE_STACK_CACHE #ifdef MAX_MACHINE_STACK_CACHE
if (terminated_machine_stack.ptr) { if (terminated_machine_stack.ptr) {
if (machine_stack_cache_index < MAX_MACHINE_STACK_CACHE) { if (machine_stack_cache_index < MAX_MACHINE_STACK_CACHE) {
machine_stack_cache[machine_stack_cache_index++] = terminated_machine_stack; machine_stack_cache[machine_stack_cache_index++] = terminated_machine_stack;
} }
else { else {
if (terminated_machine_stack.ptr != fib->cont.machine.stack) { if (terminated_machine_stack.ptr != fib->cont.machine.stack) {
#ifdef _WIN32 #ifdef _WIN32
free((void*)terminated_machine_stack.ptr); VirtualFree(terminated_machine_stack.ptr, 0, MEM_RELEASE);
#else #else
munmap((void*)terminated_machine_stack.ptr, terminated_machine_stack.size * sizeof(VALUE)); munmap((void*)terminated_machine_stack.ptr, terminated_machine_stack.size * sizeof(VALUE));
#endif #endif
} }
else { else {
rb_bug("terminated fiber resumed"); rb_bug("terminated fiber resumed");
} }
} }
terminated_machine_stack.ptr = NULL; terminated_machine_stack.ptr = NULL;
terminated_machine_stack.size = 0; terminated_machine_stack.size = 0;
} }
#endif /* not _WIN32 */ #endif /* not _WIN32 */
fib = th->ec->fiber_ptr; fib = th->ec->fiber_ptr;
@ -1744,19 +1754,19 @@ fiber_store(rb_fiber_t *next_fib, rb_thread_t *th)
#else /* FIBER_USE_NATIVE */ #else /* FIBER_USE_NATIVE */
if (ruby_setjmp(fib->cont.jmpbuf)) { if (ruby_setjmp(fib->cont.jmpbuf)) {
/* restored */ /* restored */
fib = th->ec->fiber_ptr; fib = th->ec->fiber_ptr;
if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value); if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value);
if (next_fib->cont.value == Qundef) { if (next_fib->cont.value == Qundef) {
cont_restore_0(&next_fib->cont, &next_fib->cont.value); cont_restore_0(&next_fib->cont, &next_fib->cont.value);
VM_UNREACHABLE(fiber_store); VM_UNREACHABLE(fiber_store);
} }
return fib->cont.value; return fib->cont.value;
} }
else { else {
VALUE undef = Qundef; VALUE undef = Qundef;
cont_restore_0(&next_fib->cont, &undef); cont_restore_0(&next_fib->cont, &undef);
VM_UNREACHABLE(fiber_store); VM_UNREACHABLE(fiber_store);
} }
#endif /* FIBER_USE_NATIVE */ #endif /* FIBER_USE_NATIVE */
} }