offsetof(type, foo.bar) is (arguably) a GCCism
TL;DR see http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2031.htm Suppose we have: struct X { struct Y { z_t z; } y; } x; then, you _cant_ infer offsetof(struct X, y.z). The ISO C99 section 7.17 says nothing about such situation. At least clang warns this being an extension to the language (-Wextended-offsetof). git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@61560 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
parent
8dc0c7c035
commit
5471bf9cc2
@ -1221,11 +1221,13 @@ static void
|
|||||||
ubf_wakeup_all_threads(void)
|
ubf_wakeup_all_threads(void)
|
||||||
{
|
{
|
||||||
rb_thread_t *th;
|
rb_thread_t *th;
|
||||||
|
native_thread_data_t *dat;
|
||||||
|
|
||||||
if (!ubf_threads_empty()) {
|
if (!ubf_threads_empty()) {
|
||||||
native_mutex_lock(&ubf_list_lock);
|
native_mutex_lock(&ubf_list_lock);
|
||||||
list_for_each(&ubf_list_head, th,
|
list_for_each(&ubf_list_head, dat, ubf_list) {
|
||||||
native_thread_data.ubf_list) {
|
th = (rb_thread_t *)(
|
||||||
|
((char *)dat) - offsetof(rb_thread_t, native_thread_data));
|
||||||
ubf_wakeup_thread(th);
|
ubf_wakeup_thread(th);
|
||||||
}
|
}
|
||||||
native_mutex_unlock(&ubf_list_lock);
|
native_mutex_unlock(&ubf_list_lock);
|
||||||
|
12
variable.c
12
variable.c
@ -1846,10 +1846,8 @@ struct autoload_state {
|
|||||||
VALUE result;
|
VALUE result;
|
||||||
ID id;
|
ID id;
|
||||||
VALUE thread;
|
VALUE thread;
|
||||||
union {
|
|
||||||
struct list_node node;
|
struct list_node node;
|
||||||
struct list_head head;
|
struct list_head head;
|
||||||
} waitq;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct autoload_data_i {
|
struct autoload_data_i {
|
||||||
@ -2102,11 +2100,11 @@ autoload_reset(VALUE arg)
|
|||||||
if (need_wakeups) {
|
if (need_wakeups) {
|
||||||
struct autoload_state *cur = 0, *nxt;
|
struct autoload_state *cur = 0, *nxt;
|
||||||
|
|
||||||
list_for_each_safe(&state->waitq.head, cur, nxt, waitq.node) {
|
list_for_each_safe(&state->head, cur, nxt, node) {
|
||||||
VALUE th = cur->thread;
|
VALUE th = cur->thread;
|
||||||
|
|
||||||
cur->thread = Qfalse;
|
cur->thread = Qfalse;
|
||||||
list_del_init(&cur->waitq.node); /* idempotent */
|
list_del_init(&cur->node); /* idempotent */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* cur is stored on the stack of cur->waiting_th,
|
* cur is stored on the stack of cur->waiting_th,
|
||||||
@ -2141,7 +2139,7 @@ autoload_sleep_done(VALUE arg)
|
|||||||
struct autoload_state *state = (struct autoload_state *)arg;
|
struct autoload_state *state = (struct autoload_state *)arg;
|
||||||
|
|
||||||
if (state->thread != Qfalse && rb_thread_to_be_killed(state->thread)) {
|
if (state->thread != Qfalse && rb_thread_to_be_killed(state->thread)) {
|
||||||
list_del(&state->waitq.node); /* idempotent after list_del_init */
|
list_del(&state->node); /* idempotent after list_del_init */
|
||||||
}
|
}
|
||||||
|
|
||||||
return Qfalse;
|
return Qfalse;
|
||||||
@ -2177,13 +2175,13 @@ rb_autoload_load(VALUE mod, ID id)
|
|||||||
* autoload_reset will wake up any threads added to this
|
* autoload_reset will wake up any threads added to this
|
||||||
* iff the GVL is released during autoload_require
|
* iff the GVL is released during autoload_require
|
||||||
*/
|
*/
|
||||||
list_head_init(&state.waitq.head);
|
list_head_init(&state.head);
|
||||||
}
|
}
|
||||||
else if (state.thread == ele->state->thread) {
|
else if (state.thread == ele->state->thread) {
|
||||||
return Qfalse;
|
return Qfalse;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
list_add_tail(&ele->state->waitq.head, &state.waitq.node);
|
list_add_tail(&ele->state->head, &state.node);
|
||||||
|
|
||||||
rb_ensure(autoload_sleep, (VALUE)&state,
|
rb_ensure(autoload_sleep, (VALUE)&state,
|
||||||
autoload_sleep_done, (VALUE)&state);
|
autoload_sleep_done, (VALUE)&state);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user