struct rb_thread_sched_waiting

Introduce `struct rb_thread_sched_waiting` and `timer_th.waiting`
can contain other than `rb_thread_t`.
This commit is contained in:
Koichi Sasada 2024-04-28 07:19:09 +09:00
parent a9f6bd028a
commit ffc69eec0a
3 changed files with 70 additions and 51 deletions

View File

@ -2873,6 +2873,17 @@ static void timer_thread_wakeup_thread(rb_thread_t *th);
#include "thread_pthread_mn.c" #include "thread_pthread_mn.c"
static rb_thread_t *
thread_sched_waiting_thread(struct rb_thread_sched_waiting *w)
{
if (w) {
return (rb_thread_t *)((size_t)w - offsetof(rb_thread_t, sched.waiting_reason));
}
else {
return NULL;
}
}
static int static int
timer_thread_set_timeout(rb_vm_t *vm) timer_thread_set_timeout(rb_vm_t *vm)
{ {
@ -2905,7 +2916,9 @@ timer_thread_set_timeout(rb_vm_t *vm)
if (vm->ractor.sched.timeslice_wait_inf) { if (vm->ractor.sched.timeslice_wait_inf) {
rb_native_mutex_lock(&timer_th.waiting_lock); rb_native_mutex_lock(&timer_th.waiting_lock);
{ {
rb_thread_t *th = ccan_list_top(&timer_th.waiting, rb_thread_t, sched.waiting_reason.node); struct rb_thread_sched_waiting *w = ccan_list_top(&timer_th.waiting, struct rb_thread_sched_waiting, node);
rb_thread_t *th = thread_sched_waiting_thread(w);
if (th && (th->sched.waiting_reason.flags & thread_sched_waiting_timeout)) { if (th && (th->sched.waiting_reason.flags & thread_sched_waiting_timeout)) {
rb_hrtime_t now = rb_hrtime_now(); rb_hrtime_t now = rb_hrtime_now();
rb_hrtime_t hrrel = rb_hrtime_sub(th->sched.waiting_reason.data.timeout, now); rb_hrtime_t hrrel = rb_hrtime_sub(th->sched.waiting_reason.data.timeout, now);
@ -2955,22 +2968,22 @@ timer_thread_check_exceed(rb_hrtime_t abs, rb_hrtime_t now)
static rb_thread_t * static rb_thread_t *
timer_thread_deq_wakeup(rb_vm_t *vm, rb_hrtime_t now) timer_thread_deq_wakeup(rb_vm_t *vm, rb_hrtime_t now)
{ {
rb_thread_t *th = ccan_list_top(&timer_th.waiting, rb_thread_t, sched.waiting_reason.node); struct rb_thread_sched_waiting *w = ccan_list_top(&timer_th.waiting, struct rb_thread_sched_waiting, node);
if (th != NULL && if (w != NULL &&
(th->sched.waiting_reason.flags & thread_sched_waiting_timeout) && (w->flags & thread_sched_waiting_timeout) &&
timer_thread_check_exceed(th->sched.waiting_reason.data.timeout, now)) { timer_thread_check_exceed(w->data.timeout, now)) {
RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(th)); RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(th));
// delete from waiting list // delete from waiting list
ccan_list_del_init(&th->sched.waiting_reason.node); ccan_list_del_init(&w->node);
// setup result // setup result
th->sched.waiting_reason.flags = thread_sched_waiting_none; w->flags = thread_sched_waiting_none;
th->sched.waiting_reason.data.result = 0; w->data.result = 0;
return th; return thread_sched_waiting_thread(w);
} }
return NULL; return NULL;

View File

@ -17,6 +17,31 @@
#define RB_NATIVETHREAD_LOCK_INIT PTHREAD_MUTEX_INITIALIZER #define RB_NATIVETHREAD_LOCK_INIT PTHREAD_MUTEX_INITIALIZER
#define RB_NATIVETHREAD_COND_INIT PTHREAD_COND_INITIALIZER #define RB_NATIVETHREAD_COND_INIT PTHREAD_COND_INITIALIZER
// this data should be protected by timer_th.waiting_lock
struct rb_thread_sched_waiting {
enum thread_sched_waiting_flag {
thread_sched_waiting_none = 0x00,
thread_sched_waiting_timeout = 0x01,
thread_sched_waiting_io_read = 0x02,
thread_sched_waiting_io_write = 0x08,
thread_sched_waiting_io_force = 0x40, // ignore readable
} flags;
struct {
// should be compat with hrtime.h
#ifdef MY_RUBY_BUILD_MAY_TIME_TRAVEL
int128_t timeout;
#else
uint64_t timeout;
#endif
int fd; // -1 for timeout only
int result;
} data;
// connected to timer_th.waiting
struct ccan_list_node node;
};
// per-Thead scheduler helper data // per-Thead scheduler helper data
struct rb_thread_sched_item { struct rb_thread_sched_item {
struct { struct {
@ -38,30 +63,7 @@ struct rb_thread_sched_item {
struct ccan_list_node zombie_threads; struct ccan_list_node zombie_threads;
} node; } node;
// this data should be protected by timer_th.waiting_lock struct rb_thread_sched_waiting waiting_reason;
struct {
enum thread_sched_waiting_flag {
thread_sched_waiting_none = 0x00,
thread_sched_waiting_timeout = 0x01,
thread_sched_waiting_io_read = 0x02,
thread_sched_waiting_io_write = 0x08,
thread_sched_waiting_io_force = 0x40, // ignore readable
} flags;
struct {
// should be compat with hrtime.h
#ifdef MY_RUBY_BUILD_MAY_TIME_TRAVEL
int128_t timeout;
#else
uint64_t timeout;
#endif
int fd; // -1 for timeout only
int result;
} data;
// connected to timer_th.waiting
struct ccan_list_node node;
} waiting_reason;
bool finished; bool finished;
bool malloc_stack; bool malloc_stack;

View File

@ -546,15 +546,18 @@ static void
verify_waiting_list(void) verify_waiting_list(void)
{ {
#if VM_CHECK_MODE > 0 #if VM_CHECK_MODE > 0
rb_thread_t *wth, *prev_wth = NULL; struct rb_thread_sched_waiting *w, *prev_w = NULL;
ccan_list_for_each(&timer_th.waiting, wth, sched.waiting_reason.node) {
// waiting list's timeout order should be [1, 2, 3, ..., 0, 0, 0]
ccan_list_for_each(&timer_th.waiting, w, node) {
// fprintf(stderr, "verify_waiting_list th:%u abs:%lu\n", rb_th_serial(wth), (unsigned long)wth->sched.waiting_reason.data.timeout); // fprintf(stderr, "verify_waiting_list th:%u abs:%lu\n", rb_th_serial(wth), (unsigned long)wth->sched.waiting_reason.data.timeout);
if (prev_wth) { if (prev_w) {
rb_hrtime_t timeout = wth->sched.waiting_reason.data.timeout; rb_hrtime_t timeout = w->data.timeout;
rb_hrtime_t prev_timeout = prev_wth->sched.waiting_reason.data.timeout; rb_hrtime_t prev_timeout = w->data.timeout;
VM_ASSERT(timeout == 0 || prev_timeout <= timeout); VM_ASSERT(timeout == 0 || prev_timeout <= timeout);
} }
prev_wth = wth; prev_w = w;
} }
#endif #endif
} }
@ -632,16 +635,17 @@ kqueue_unregister_waiting(int fd, enum thread_sched_waiting_flag flags)
static bool static bool
kqueue_already_registered(int fd) kqueue_already_registered(int fd)
{ {
rb_thread_t *wth, *found_wth = NULL; struct rb_thread_sched_waiting *w, *found_w = NULL;
ccan_list_for_each(&timer_th.waiting, wth, sched.waiting_reason.node) {
ccan_list_for_each(&timer_th.waiting, w, node) {
// Similar to EEXIST in epoll_ctl, but more strict because it checks fd rather than flags // Similar to EEXIST in epoll_ctl, but more strict because it checks fd rather than flags
// for simplicity // for simplicity
if (wth->sched.waiting_reason.flags && wth->sched.waiting_reason.data.fd == fd) { if (w->flags && w->data.fd == fd) {
found_wth = wth; found_w = w;
break; break;
} }
} }
return found_wth != NULL; return found_w != NULL;
} }
#endif // HAVE_SYS_EVENT_H #endif // HAVE_SYS_EVENT_H
@ -786,20 +790,20 @@ timer_thread_register_waiting(rb_thread_t *th, int fd, enum thread_sched_waiting
VM_ASSERT(flags & thread_sched_waiting_timeout); VM_ASSERT(flags & thread_sched_waiting_timeout);
// insert th to sorted list (TODO: O(n)) // insert th to sorted list (TODO: O(n))
rb_thread_t *wth, *prev_wth = NULL; struct rb_thread_sched_waiting *w, *prev_w = NULL;
ccan_list_for_each(&timer_th.waiting, wth, sched.waiting_reason.node) { ccan_list_for_each(&timer_th.waiting, w, node) {
if ((wth->sched.waiting_reason.flags & thread_sched_waiting_timeout) && if ((w->flags & thread_sched_waiting_timeout) &&
wth->sched.waiting_reason.data.timeout < abs) { w->data.timeout < abs) {
prev_wth = wth; prev_w = w;
} }
else { else {
break; break;
} }
} }
if (prev_wth) { if (prev_w) {
ccan_list_add_after(&timer_th.waiting, &prev_wth->sched.waiting_reason.node, &th->sched.waiting_reason.node); ccan_list_add_after(&timer_th.waiting, &prev_w->node, &th->sched.waiting_reason.node);
} }
else { else {
ccan_list_add(&timer_th.waiting, &th->sched.waiting_reason.node); ccan_list_add(&timer_th.waiting, &th->sched.waiting_reason.node);