Prefix ccan headers (#4568)

* Prefixed ccan headers

* Remove unprefixed names in ccan/build_assert

* Remove unprefixed names in ccan/check_type

* Remove unprefixed names in ccan/container_of

* Remove unprefixed names in ccan/list

Co-authored-by: Samuel Williams <samuel.williams@oriontransfer.co.nz>
This commit is contained in:
Nobuyoshi Nakada 2022-03-30 16:36:31 +09:00 committed by GitHub
parent 8d27d00af5
commit 42a0bed351
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
Notes: git 2022-03-30 16:37:01 +09:00
Merged-By: ioquatix <samuel@codeotaku.com>
22 changed files with 567 additions and 565 deletions

View File

@ -3,7 +3,7 @@
#define CCAN_BUILD_ASSERT_H #define CCAN_BUILD_ASSERT_H
/** /**
* BUILD_ASSERT - assert a build-time dependency. * CCAN_BUILD_ASSERT - assert a build-time dependency.
* @cond: the compile-time condition which must be true. * @cond: the compile-time condition which must be true.
* *
* Your compile will fail if the condition isn't true, or can't be evaluated * Your compile will fail if the condition isn't true, or can't be evaluated
@ -15,15 +15,15 @@
* static char *foo_to_char(struct foo *foo) * static char *foo_to_char(struct foo *foo)
* { * {
* // This code needs string to be at start of foo. * // This code needs string to be at start of foo.
* BUILD_ASSERT(offsetof(struct foo, string) == 0); * CCAN_BUILD_ASSERT(offsetof(struct foo, string) == 0);
* return (char *)foo; * return (char *)foo;
* } * }
*/ */
#define BUILD_ASSERT(cond) \ #define CCAN_BUILD_ASSERT(cond) \
do { (void) sizeof(char [1 - 2*!(cond)]); } while(0) do { (void) sizeof(char [1 - 2*!(cond)]); } while(0)
/** /**
* BUILD_ASSERT_OR_ZERO - assert a build-time dependency, as an expression. * CCAN_BUILD_ASSERT_OR_ZERO - assert a build-time dependency, as an expression.
* @cond: the compile-time condition which must be true. * @cond: the compile-time condition which must be true.
* *
* Your compile will fail if the condition isn't true, or can't be evaluated * Your compile will fail if the condition isn't true, or can't be evaluated
@ -32,9 +32,9 @@
* Example: * Example:
* #define foo_to_char(foo) \ * #define foo_to_char(foo) \
* ((char *)(foo) \ * ((char *)(foo) \
* + BUILD_ASSERT_OR_ZERO(offsetof(struct foo, string) == 0)) * + CCAN_BUILD_ASSERT_OR_ZERO(offsetof(struct foo, string) == 0))
*/ */
#define BUILD_ASSERT_OR_ZERO(cond) \ #define CCAN_BUILD_ASSERT_OR_ZERO(cond) \
(sizeof(char [1 - 2*!(cond)]) - 1) (sizeof(char [1 - 2*!(cond)]) - 1)
#endif /* CCAN_BUILD_ASSERT_H */ #endif /* CCAN_BUILD_ASSERT_H */

View File

@ -3,7 +3,7 @@
#define CCAN_CHECK_TYPE_H #define CCAN_CHECK_TYPE_H
/** /**
* check_type - issue a warning or build failure if type is not correct. * ccan_check_type - issue a warning or build failure if type is not correct.
* @expr: the expression whose type we should check (not evaluated). * @expr: the expression whose type we should check (not evaluated).
* @type: the exact type we expect the expression to be. * @type: the exact type we expect the expression to be.
* *
@ -11,7 +11,7 @@
* argument is of the expected type. No type promotion of the expression is * argument is of the expected type. No type promotion of the expression is
* done: an unsigned int is not the same as an int! * done: an unsigned int is not the same as an int!
* *
* check_type() always evaluates to 0. * ccan_check_type() always evaluates to 0.
* *
* If your compiler does not support typeof, then the best we can do is fail * If your compiler does not support typeof, then the best we can do is fail
* to compile if the sizes of the types are unequal (a less complete check). * to compile if the sizes of the types are unequal (a less complete check).
@ -19,11 +19,11 @@
* Example: * Example:
* // They should always pass a 64-bit value to _set_some_value! * // They should always pass a 64-bit value to _set_some_value!
* #define set_some_value(expr) \ * #define set_some_value(expr) \
* _set_some_value((check_type((expr), uint64_t), (expr))) * _set_some_value((ccan_check_type((expr), uint64_t), (expr)))
*/ */
/** /**
* check_types_match - issue a warning or build failure if types are not same. * ccan_check_types_match - issue a warning or build failure if types are not same.
* @expr1: the first expression (not evaluated). * @expr1: the first expression (not evaluated).
* @expr2: the second expression (not evaluated). * @expr2: the second expression (not evaluated).
* *
@ -31,7 +31,7 @@
* arguments are of identical types. No type promotion of the expressions is * arguments are of identical types. No type promotion of the expressions is
* done: an unsigned int is not the same as an int! * done: an unsigned int is not the same as an int!
* *
* check_types_match() always evaluates to 0. * ccan_check_types_match() always evaluates to 0.
* *
* If your compiler does not support typeof, then the best we can do is fail * If your compiler does not support typeof, then the best we can do is fail
* to compile if the sizes of the types are unequal (a less complete check). * to compile if the sizes of the types are unequal (a less complete check).
@ -39,25 +39,25 @@
* Example: * Example:
* // Do subtraction to get to enclosing type, but make sure that * // Do subtraction to get to enclosing type, but make sure that
* // pointer is of correct type for that member. * // pointer is of correct type for that member.
* #define container_of(mbr_ptr, encl_type, mbr) \ * #define ccan_container_of(mbr_ptr, encl_type, mbr) \
* (check_types_match((mbr_ptr), &((encl_type *)0)->mbr), \ * (ccan_check_types_match((mbr_ptr), &((encl_type *)0)->mbr), \
* ((encl_type *) \ * ((encl_type *) \
* ((char *)(mbr_ptr) - offsetof(enclosing_type, mbr)))) * ((char *)(mbr_ptr) - offsetof(enclosing_type, mbr))))
*/ */
#if HAVE_TYPEOF #if HAVE_TYPEOF
#define check_type(expr, type) \ #define ccan_check_type(expr, type) \
((typeof(expr) *)0 != (type *)0) ((typeof(expr) *)0 != (type *)0)
#define check_types_match(expr1, expr2) \ #define ccan_check_types_match(expr1, expr2) \
((typeof(expr1) *)0 != (typeof(expr2) *)0) ((typeof(expr1) *)0 != (typeof(expr2) *)0)
#else #else
#include "ccan/build_assert/build_assert.h" #include "ccan/build_assert/build_assert.h"
/* Without typeof, we can only test the sizes. */ /* Without typeof, we can only test the sizes. */
#define check_type(expr, type) \ #define ccan_check_type(expr, type) \
BUILD_ASSERT_OR_ZERO(sizeof(expr) == sizeof(type)) CCAN_BUILD_ASSERT_OR_ZERO(sizeof(expr) == sizeof(type))
#define check_types_match(expr1, expr2) \ #define ccan_check_types_match(expr1, expr2) \
BUILD_ASSERT_OR_ZERO(sizeof(expr1) == sizeof(expr2)) CCAN_BUILD_ASSERT_OR_ZERO(sizeof(expr1) == sizeof(expr2))
#endif /* HAVE_TYPEOF */ #endif /* HAVE_TYPEOF */
#endif /* CCAN_CHECK_TYPE_H */ #endif /* CCAN_CHECK_TYPE_H */

View File

@ -4,7 +4,7 @@
#include "ccan/check_type/check_type.h" #include "ccan/check_type/check_type.h"
/** /**
* container_of - get pointer to enclosing structure * ccan_container_of - get pointer to enclosing structure
* @member_ptr: pointer to the structure member * @member_ptr: pointer to the structure member
* @containing_type: the type this member is within * @containing_type: the type this member is within
* @member: the name of this member within the structure. * @member: the name of this member within the structure.
@ -24,18 +24,18 @@
* *
* static struct info *foo_to_info(struct foo *foo) * static struct info *foo_to_info(struct foo *foo)
* { * {
* return container_of(foo, struct info, my_foo); * return ccan_container_of(foo, struct info, my_foo);
* } * }
*/ */
#define container_of(member_ptr, containing_type, member) \ #define ccan_container_of(member_ptr, containing_type, member) \
((containing_type *) \ ((containing_type *) \
((char *)(member_ptr) \ ((char *)(member_ptr) \
- container_off(containing_type, member)) \ - ccan_container_off(containing_type, member)) \
+ check_types_match(*(member_ptr), ((containing_type *)0)->member)) + ccan_check_types_match(*(member_ptr), ((containing_type *)0)->member))
/** /**
* container_of_or_null - get pointer to enclosing structure, or NULL * ccan_container_of_or_null - get pointer to enclosing structure, or NULL
* @member_ptr: pointer to the structure member * @member_ptr: pointer to the structure member
* @containing_type: the type this member is within * @containing_type: the type this member is within
* @member: the name of this member within the structure. * @member: the name of this member within the structure.
@ -56,21 +56,21 @@
* *
* static struct info *foo_to_info_allowing_null(struct foo *foo) * static struct info *foo_to_info_allowing_null(struct foo *foo)
* { * {
* return container_of_or_null(foo, struct info, my_foo); * return ccan_container_of_or_null(foo, struct info, my_foo);
* } * }
*/ */
static inline char *container_of_or_null_(void *member_ptr, size_t offset) static inline char *container_of_or_null_(void *member_ptr, size_t offset)
{ {
return member_ptr ? (char *)member_ptr - offset : NULL; return member_ptr ? (char *)member_ptr - offset : NULL;
} }
#define container_of_or_null(member_ptr, containing_type, member) \ #define ccan_container_of_or_null(member_ptr, containing_type, member) \
((containing_type *) \ ((containing_type *) \
container_of_or_null_(member_ptr, \ ccan_container_of_or_null_(member_ptr, \
container_off(containing_type, member)) \ ccan_container_off(containing_type, member)) \
+ check_types_match(*(member_ptr), ((containing_type *)0)->member)) + ccan_check_types_match(*(member_ptr), ((containing_type *)0)->member))
/** /**
* container_off - get offset to enclosing structure * ccan_container_off - get offset to enclosing structure
* @containing_type: the type this member is within * @containing_type: the type this member is within
* @member: the name of this member within the structure. * @member: the name of this member within the structure.
* *
@ -89,15 +89,15 @@ static inline char *container_of_or_null_(void *member_ptr, size_t offset)
* *
* static struct info *foo_to_info(struct foo *foo) * static struct info *foo_to_info(struct foo *foo)
* { * {
* size_t off = container_off(struct info, my_foo); * size_t off = ccan_container_off(struct info, my_foo);
* return (void *)((char *)foo - off); * return (void *)((char *)foo - off);
* } * }
*/ */
#define container_off(containing_type, member) \ #define ccan_container_off(containing_type, member) \
offsetof(containing_type, member) offsetof(containing_type, member)
/** /**
* container_of_var - get pointer to enclosing structure using a variable * ccan_container_of_var - get pointer to enclosing structure using a variable
* @member_ptr: pointer to the structure member * @member_ptr: pointer to the structure member
* @container_var: a pointer of same type as this member's container * @container_var: a pointer of same type as this member's container
* @member: the name of this member within the structure. * @member: the name of this member within the structure.
@ -108,21 +108,21 @@ static inline char *container_of_or_null_(void *member_ptr, size_t offset)
* Example: * Example:
* static struct info *foo_to_i(struct foo *foo) * static struct info *foo_to_i(struct foo *foo)
* { * {
* struct info *i = container_of_var(foo, i, my_foo); * struct info *i = ccan_container_of_var(foo, i, my_foo);
* return i; * return i;
* } * }
*/ */
#if HAVE_TYPEOF #if HAVE_TYPEOF
#define container_of_var(member_ptr, container_var, member) \ #define ccan_container_of_var(member_ptr, container_var, member) \
container_of(member_ptr, typeof(*container_var), member) ccan_container_of(member_ptr, typeof(*container_var), member)
#else #else
#define container_of_var(member_ptr, container_var, member) \ #define ccan_container_of_var(member_ptr, container_var, member) \
((void *)((char *)(member_ptr) - \ ((void *)((char *)(member_ptr) - \
container_off_var(container_var, member))) ccan_container_off_var(container_var, member)))
#endif #endif
/** /**
* container_off_var - get offset of a field in enclosing structure * ccan_container_off_var - get offset of a field in enclosing structure
* @container_var: a pointer to a container structure * @container_var: a pointer to a container structure
* @member: the name of a member within the structure. * @member: the name of a member within the structure.
* *
@ -132,10 +132,10 @@ static inline char *container_of_or_null_(void *member_ptr, size_t offset)
* *
*/ */
#if HAVE_TYPEOF #if HAVE_TYPEOF
#define container_off_var(var, member) \ #define ccan_container_off_var(var, member) \
container_off(typeof(*var), member) ccan_container_off(typeof(*var), member)
#else #else
#define container_off_var(var, member) \ #define ccan_container_off_var(var, member) \
((const char *)&(var)->member - (const char *)(var)) ((const char *)&(var)->member - (const char *)(var))
#endif #endif

File diff suppressed because it is too large Load Diff

View File

@ -2,15 +2,16 @@
#ifndef CCAN_STR_H #ifndef CCAN_STR_H
#define CCAN_STR_H #define CCAN_STR_H
/** /**
* stringify - Turn expression into a string literal * ccan_stringify - Turn expression into a string literal
* @expr: any C expression * @expr: any C expression
* *
* Example: * Example:
* #define PRINT_COND_IF_FALSE(cond) \ * #define PRINT_COND_IF_FALSE(cond) \
* ((cond) || printf("%s is false!", stringify(cond))) * ((cond) || printf("%s is false!", ccan_stringify(cond)))
*/ */
#define stringify(expr) stringify_1(expr) #define stringify(expr) ccan_stringify_1(expr)
#define ccan_stringify(expr) ccan_stringify_1(expr)
/* Double-indirection required to stringify expansions */ /* Double-indirection required to stringify expansions */
#define stringify_1(expr) #expr #define ccan_stringify_1(expr) #expr
#endif /* CCAN_STR_H */ #endif /* CCAN_STR_H */

58
gc.c
View File

@ -673,7 +673,7 @@ typedef struct mark_stack {
typedef struct rb_heap_struct { typedef struct rb_heap_struct {
struct heap_page *free_pages; struct heap_page *free_pages;
struct list_head pages; struct ccan_list_head pages;
struct heap_page *sweeping_page; /* iterator for .pages */ struct heap_page *sweeping_page; /* iterator for .pages */
struct heap_page *compact_cursor; struct heap_page *compact_cursor;
uintptr_t compact_cursor_index; uintptr_t compact_cursor_index;
@ -918,7 +918,7 @@ struct heap_page {
struct heap_page *free_next; struct heap_page *free_next;
uintptr_t start; uintptr_t start;
RVALUE *freelist; RVALUE *freelist;
struct list_node page_node; struct ccan_list_node page_node;
bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT]; bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT];
/* the following three bitmaps are cleared at the beginning of full GC */ /* the following three bitmaps are cleared at the beginning of full GC */
@ -1423,7 +1423,7 @@ check_rvalue_consistency_force(const VALUE obj, int terminate)
struct heap_page *page = NULL; struct heap_page *page = NULL;
for (int i = 0; i < SIZE_POOL_COUNT; i++) { for (int i = 0; i < SIZE_POOL_COUNT; i++) {
rb_size_pool_t *size_pool = &size_pools[i]; rb_size_pool_t *size_pool = &size_pools[i];
list_for_each(&size_pool->tomb_heap.pages, page, page_node) { ccan_list_for_each(&size_pool->tomb_heap.pages, page, page_node) {
if (page->start <= (uintptr_t)obj && if (page->start <= (uintptr_t)obj &&
(uintptr_t)obj < (page->start + (page->total_slots * size_pool->slot_size))) { (uintptr_t)obj < (page->start + (page->total_slots * size_pool->slot_size))) {
fprintf(stderr, "check_rvalue_consistency: %p is in a tomb_heap (%p).\n", fprintf(stderr, "check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
@ -1768,8 +1768,8 @@ rb_objspace_alloc(void)
size_pool->slot_size = (1 << i) * BASE_SLOT_SIZE; size_pool->slot_size = (1 << i) * BASE_SLOT_SIZE;
list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages); ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool)->pages); ccan_list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool)->pages);
} }
dont_gc_on(); dont_gc_on();
@ -1941,7 +1941,7 @@ heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *pa
static void static void
heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page) heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
{ {
list_del(&page->page_node); ccan_list_del(&page->page_node);
heap->total_pages--; heap->total_pages--;
heap->total_slots -= page->total_slots; heap->total_slots -= page->total_slots;
} }
@ -1964,7 +1964,7 @@ heap_pages_free_unused_pages(rb_objspace_t *objspace)
bool has_pages_in_tomb_heap = FALSE; bool has_pages_in_tomb_heap = FALSE;
for (i = 0; i < SIZE_POOL_COUNT; i++) { for (i = 0; i < SIZE_POOL_COUNT; i++) {
if (!list_empty(&SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages)) { if (!ccan_list_empty(&SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages)) {
has_pages_in_tomb_heap = TRUE; has_pages_in_tomb_heap = TRUE;
break; break;
} }
@ -2102,7 +2102,7 @@ heap_page_resurrect(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
{ {
struct heap_page *page = 0, *next; struct heap_page *page = 0, *next;
list_for_each_safe(&SIZE_POOL_TOMB_HEAP(size_pool)->pages, page, next, page_node) { ccan_list_for_each_safe(&SIZE_POOL_TOMB_HEAP(size_pool)->pages, page, next, page_node) {
asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false); asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
if (page->freelist != NULL) { if (page->freelist != NULL) {
heap_unlink_page(objspace, &size_pool->tomb_heap, page); heap_unlink_page(objspace, &size_pool->tomb_heap, page);
@ -2142,7 +2142,7 @@ heap_add_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *hea
/* Adding to eden heap during incremental sweeping is forbidden */ /* Adding to eden heap during incremental sweeping is forbidden */
GC_ASSERT(!(heap == SIZE_POOL_EDEN_HEAP(size_pool) && heap->sweeping_page)); GC_ASSERT(!(heap == SIZE_POOL_EDEN_HEAP(size_pool) && heap->sweeping_page));
page->flags.in_tomb = (heap == SIZE_POOL_TOMB_HEAP(size_pool)); page->flags.in_tomb = (heap == SIZE_POOL_TOMB_HEAP(size_pool));
list_add_tail(&heap->pages, &page->page_node); ccan_list_add_tail(&heap->pages, &page->page_node);
heap->total_pages++; heap->total_pages++;
heap->total_slots += page->total_slots; heap->total_slots += page->total_slots;
} }
@ -3610,7 +3610,7 @@ objspace_each_objects_try(VALUE arg)
* an infinite loop. */ * an infinite loop. */
struct heap_page *page = 0; struct heap_page *page = 0;
size_t pages_count = 0; size_t pages_count = 0;
list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) { ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
pages[pages_count] = page; pages[pages_count] = page;
pages_count++; pages_count++;
} }
@ -3624,7 +3624,7 @@ objspace_each_objects_try(VALUE arg)
size_t pages_count = data->pages_counts[i]; size_t pages_count = data->pages_counts[i];
struct heap_page **pages = data->pages[i]; struct heap_page **pages = data->pages[i];
struct heap_page *page = list_top(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, struct heap_page, page_node); struct heap_page *page = ccan_list_top(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, struct heap_page, page_node);
for (size_t i = 0; i < pages_count; i++) { for (size_t i = 0; i < pages_count; i++) {
/* If we have reached the end of the linked list then there are no /* If we have reached the end of the linked list then there are no
* more pages, so break. */ * more pages, so break. */
@ -3641,7 +3641,7 @@ objspace_each_objects_try(VALUE arg)
break; break;
} }
page = list_next(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node); page = ccan_list_next(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node);
} }
} }
@ -5028,7 +5028,7 @@ try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page,
struct heap_page * next; struct heap_page * next;
next = list_prev(&heap->pages, cursor, page_node); next = ccan_list_prev(&heap->pages, cursor, page_node);
/* Protect the current cursor since it probably has T_MOVED slots. */ /* Protect the current cursor since it probably has T_MOVED slots. */
lock_page_body(objspace, GET_PAGE_BODY(cursor->start)); lock_page_body(objspace, GET_PAGE_BODY(cursor->start));
@ -5055,7 +5055,7 @@ gc_unprotect_pages(rb_objspace_t *objspace, rb_heap_t *heap)
while (cursor) { while (cursor) {
unlock_page_body(objspace, GET_PAGE_BODY(cursor->start)); unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
cursor = list_next(&heap->pages, cursor, page_node); cursor = ccan_list_next(&heap->pages, cursor, page_node);
} }
} }
@ -5610,7 +5610,7 @@ heap_page_freelist_append(struct heap_page *page, RVALUE *freelist)
static void static void
gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap) gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
{ {
heap->sweeping_page = list_top(&heap->pages, struct heap_page, page_node); heap->sweeping_page = ccan_list_top(&heap->pages, struct heap_page, page_node);
heap->free_pages = NULL; heap->free_pages = NULL;
#if GC_ENABLE_INCREMENTAL_MARK #if GC_ENABLE_INCREMENTAL_MARK
heap->pooled_pages = NULL; heap->pooled_pages = NULL;
@ -5636,7 +5636,7 @@ gc_sweep_start(rb_objspace_t *objspace)
} }
rb_ractor_t *r = NULL; rb_ractor_t *r = NULL;
list_for_each(&GET_VM()->ractor.set, r, vmlr_node) { ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
rb_gc_ractor_newobj_cache_clear(&r->newobj_cache); rb_gc_ractor_newobj_cache_clear(&r->newobj_cache);
} }
} }
@ -5763,7 +5763,7 @@ gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *hea
gc_sweep_page(objspace, size_pool, heap, &ctx); gc_sweep_page(objspace, size_pool, heap, &ctx);
int free_slots = ctx.freed_slots + ctx.empty_slots; int free_slots = ctx.freed_slots + ctx.empty_slots;
heap->sweeping_page = list_next(&heap->pages, sweep_page, page_node); heap->sweeping_page = ccan_list_next(&heap->pages, sweep_page, page_node);
if (sweep_page->final_slots + free_slots == sweep_page->total_slots && if (sweep_page->final_slots + free_slots == sweep_page->total_slots &&
heap_pages_freeable_pages > 0 && heap_pages_freeable_pages > 0 &&
@ -5936,11 +5936,11 @@ gc_compact_start(rb_objspace_t *objspace)
for (int i = 0; i < SIZE_POOL_COUNT; i++) { for (int i = 0; i < SIZE_POOL_COUNT; i++) {
rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]); rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]);
list_for_each(&heap->pages, page, page_node) { ccan_list_for_each(&heap->pages, page, page_node) {
page->flags.before_sweep = TRUE; page->flags.before_sweep = TRUE;
} }
heap->compact_cursor = list_tail(&heap->pages, struct heap_page, page_node); heap->compact_cursor = ccan_list_tail(&heap->pages, struct heap_page, page_node);
heap->compact_cursor_index = 0; heap->compact_cursor_index = 0;
} }
@ -5986,7 +5986,7 @@ gc_sweep(rb_objspace_t *objspace)
} }
for (int i = 0; i < SIZE_POOL_COUNT; i++) { for (int i = 0; i < SIZE_POOL_COUNT; i++) {
list_for_each(&(SIZE_POOL_EDEN_HEAP(&size_pools[i])->pages), page, page_node) { ccan_list_for_each(&(SIZE_POOL_EDEN_HEAP(&size_pools[i])->pages), page, page_node) {
page->flags.before_sweep = TRUE; page->flags.before_sweep = TRUE;
} }
} }
@ -7767,12 +7767,12 @@ gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
} }
static int static int
gc_verify_heap_pages_(rb_objspace_t *objspace, struct list_head *head) gc_verify_heap_pages_(rb_objspace_t *objspace, struct ccan_list_head *head)
{ {
int remembered_old_objects = 0; int remembered_old_objects = 0;
struct heap_page *page = 0; struct heap_page *page = 0;
list_for_each(head, page, page_node) { ccan_list_for_each(head, page, page_node) {
asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false); asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
RVALUE *p = page->freelist; RVALUE *p = page->freelist;
while (p) { while (p) {
@ -8008,7 +8008,7 @@ gc_marks_wb_unprotected_objects(rb_objspace_t *objspace, rb_heap_t *heap)
{ {
struct heap_page *page = 0; struct heap_page *page = 0;
list_for_each(&heap->pages, page, page_node) { ccan_list_for_each(&heap->pages, page, page_node) {
bits_t *mark_bits = page->mark_bits; bits_t *mark_bits = page->mark_bits;
bits_t *wbun_bits = page->wb_unprotected_bits; bits_t *wbun_bits = page->wb_unprotected_bits;
uintptr_t p = page->start; uintptr_t p = page->start;
@ -8425,7 +8425,7 @@ rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
#endif #endif
gc_report(1, objspace, "rgengc_rememberset_mark: start\n"); gc_report(1, objspace, "rgengc_rememberset_mark: start\n");
list_for_each(&heap->pages, page, page_node) { ccan_list_for_each(&heap->pages, page, page_node) {
if (page->flags.has_remembered_objects | page->flags.has_uncollectible_shady_objects) { if (page->flags.has_remembered_objects | page->flags.has_uncollectible_shady_objects) {
uintptr_t p = page->start; uintptr_t p = page->start;
bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT]; bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
@ -8472,7 +8472,7 @@ rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
{ {
struct heap_page *page = 0; struct heap_page *page = 0;
list_for_each(&heap->pages, page, page_node) { ccan_list_for_each(&heap->pages, page, page_node) {
memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE); memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE); memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE); memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
@ -9586,7 +9586,7 @@ gc_sort_heap_by_empty_slots(rb_objspace_t *objspace)
struct heap_page *page = 0, **page_list = malloc(size); struct heap_page *page = 0, **page_list = malloc(size);
size_t i = 0; size_t i = 0;
list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) { ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
page_list[i++] = page; page_list[i++] = page;
GC_ASSERT(page); GC_ASSERT(page);
} }
@ -9598,10 +9598,10 @@ gc_sort_heap_by_empty_slots(rb_objspace_t *objspace)
ruby_qsort(page_list, total_pages, sizeof(struct heap_page *), compare_free_slots, NULL); ruby_qsort(page_list, total_pages, sizeof(struct heap_page *), compare_free_slots, NULL);
/* Reset the eden heap */ /* Reset the eden heap */
list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages); ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
for (i = 0; i < total_pages; i++) { for (i = 0; i < total_pages; i++) {
list_add(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, &page_list[i]->page_node); ccan_list_add(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, &page_list[i]->page_node);
if (page_list[i]->free_slots != 0) { if (page_list[i]->free_slots != 0) {
heap_add_freepage(SIZE_POOL_EDEN_HEAP(size_pool), page_list[i]); heap_add_freepage(SIZE_POOL_EDEN_HEAP(size_pool), page_list[i]);
} }
@ -10276,7 +10276,7 @@ gc_update_references(rb_objspace_t *objspace)
rb_size_pool_t *size_pool = &size_pools[i]; rb_size_pool_t *size_pool = &size_pools[i];
rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool); rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
list_for_each(&heap->pages, page, page_node) { ccan_list_for_each(&heap->pages, page, page_node) {
uintptr_t start = (uintptr_t)page->start; uintptr_t start = (uintptr_t)page->start;
uintptr_t end = start + (page->total_slots * size_pool->slot_size); uintptr_t end = start + (page->total_slots * size_pool->slot_size);

10
io.c
View File

@ -5191,7 +5191,7 @@ static void clear_codeconv(rb_io_t *fptr);
static void static void
fptr_finalize_flush(rb_io_t *fptr, int noraise, int keepgvl, fptr_finalize_flush(rb_io_t *fptr, int noraise, int keepgvl,
struct list_head *busy) struct ccan_list_head *busy)
{ {
VALUE err = Qnil; VALUE err = Qnil;
int fd = fptr->fd; int fd = fptr->fd;
@ -5233,7 +5233,7 @@ fptr_finalize_flush(rb_io_t *fptr, int noraise, int keepgvl,
// Ensure waiting_fd users do not hit EBADF. // Ensure waiting_fd users do not hit EBADF.
if (busy) { if (busy) {
// Wait for them to exit before we call close(). // Wait for them to exit before we call close().
do rb_thread_schedule(); while (!list_empty(busy)); do rb_thread_schedule(); while (!ccan_list_empty(busy));
} }
// Disable for now. // Disable for now.
@ -5378,16 +5378,16 @@ rb_io_memsize(const rb_io_t *fptr)
# define KEEPGVL FALSE # define KEEPGVL FALSE
#endif #endif
int rb_notify_fd_close(int fd, struct list_head *); int rb_notify_fd_close(int fd, struct ccan_list_head *);
static rb_io_t * static rb_io_t *
io_close_fptr(VALUE io) io_close_fptr(VALUE io)
{ {
rb_io_t *fptr; rb_io_t *fptr;
VALUE write_io; VALUE write_io;
rb_io_t *write_fptr; rb_io_t *write_fptr;
struct list_head busy; struct ccan_list_head busy;
list_head_init(&busy); ccan_list_head_init(&busy);
write_io = GetWriteIO(io); write_io = GetWriteIO(io);
if (io != write_io) { if (io != write_io) {
write_fptr = RFILE(write_io)->fptr; write_fptr = RFILE(write_io)->fptr;

14
mjit.c
View File

@ -117,7 +117,7 @@ mjit_update_references(const rb_iseq_t *iseq)
// `ISEQ_BODY(iseq)->jit_unit` anymore (because new one replaces that). So we need to check them too. // `ISEQ_BODY(iseq)->jit_unit` anymore (because new one replaces that). So we need to check them too.
// TODO: we should be able to reduce the number of units checked here. // TODO: we should be able to reduce the number of units checked here.
struct rb_mjit_unit *unit = NULL; struct rb_mjit_unit *unit = NULL;
list_for_each(&stale_units.head, unit, unode) { ccan_list_for_each(&stale_units.head, unit, unode) {
if (unit->iseq == iseq) { if (unit->iseq == iseq) {
unit->iseq = (rb_iseq_t *)rb_gc_location((VALUE)unit->iseq); unit->iseq = (rb_iseq_t *)rb_gc_location((VALUE)unit->iseq);
} }
@ -145,7 +145,7 @@ mjit_free_iseq(const rb_iseq_t *iseq)
// `ISEQ_BODY(iseq)->jit_unit` anymore (because new one replaces that). So we need to check them too. // `ISEQ_BODY(iseq)->jit_unit` anymore (because new one replaces that). So we need to check them too.
// TODO: we should be able to reduce the number of units checked here. // TODO: we should be able to reduce the number of units checked here.
struct rb_mjit_unit *unit = NULL; struct rb_mjit_unit *unit = NULL;
list_for_each(&stale_units.head, unit, unode) { ccan_list_for_each(&stale_units.head, unit, unode) {
if (unit->iseq == iseq) { if (unit->iseq == iseq) {
unit->iseq = NULL; unit->iseq = NULL;
} }
@ -161,8 +161,8 @@ free_list(struct rb_mjit_unit_list *list, bool close_handle_p)
{ {
struct rb_mjit_unit *unit = 0, *next; struct rb_mjit_unit *unit = 0, *next;
list_for_each_safe(&list->head, unit, next, unode) { ccan_list_for_each_safe(&list->head, unit, next, unode) {
list_del(&unit->unode); ccan_list_del(&unit->unode);
if (!close_handle_p) unit->handle = NULL; /* Skip dlclose in free_unit() */ if (!close_handle_p) unit->handle = NULL; /* Skip dlclose in free_unit() */
if (list == &stale_units) { // `free_unit(unit)` crashes after GC.compact on `stale_units` if (list == &stale_units) { // `free_unit(unit)` crashes after GC.compact on `stale_units`
@ -886,7 +886,7 @@ skip_cleaning_object_files(struct rb_mjit_unit_list *list)
struct rb_mjit_unit *unit = NULL, *next; struct rb_mjit_unit *unit = NULL, *next;
// No mutex for list, assuming MJIT worker does not exist yet since it's immediately after fork. // No mutex for list, assuming MJIT worker does not exist yet since it's immediately after fork.
list_for_each_safe(&list->head, unit, next, unode) { ccan_list_for_each_safe(&list->head, unit, next, unode) {
#if defined(_WIN32) // mswin doesn't reach here either. This is for MinGW. #if defined(_WIN32) // mswin doesn't reach here either. This is for MinGW.
if (unit->so_file) unit->so_file = NULL; if (unit->so_file) unit->so_file = NULL;
#endif #endif
@ -930,7 +930,7 @@ mjit_dump_total_calls(void)
{ {
struct rb_mjit_unit *unit; struct rb_mjit_unit *unit;
fprintf(stderr, "[MJIT_COUNTER] total_calls of active_units:\n"); fprintf(stderr, "[MJIT_COUNTER] total_calls of active_units:\n");
list_for_each(&active_units.head, unit, unode) { ccan_list_for_each(&active_units.head, unit, unode) {
const rb_iseq_t *iseq = unit->iseq; const rb_iseq_t *iseq = unit->iseq;
fprintf(stderr, "%8ld: %s@%s:%d\n", ISEQ_BODY(iseq)->total_calls, RSTRING_PTR(ISEQ_BODY(iseq)->location.label), fprintf(stderr, "%8ld: %s@%s:%d\n", ISEQ_BODY(iseq)->total_calls, RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
RSTRING_PTR(rb_iseq_path(iseq)), FIX2INT(ISEQ_BODY(iseq)->location.first_lineno)); RSTRING_PTR(rb_iseq_path(iseq)), FIX2INT(ISEQ_BODY(iseq)->location.first_lineno));
@ -1036,7 +1036,7 @@ mjit_mark(void)
i++; i++;
} }
} }
list_for_each(&active_units.head, unit, unode) { ccan_list_for_each(&active_units.head, unit, unode) {
iseqs[i] = unit->iseq; iseqs[i] = unit->iseq;
i++; i++;
} }

View File

@ -152,7 +152,7 @@ typedef intptr_t pid_t;
// The unit structure that holds metadata of ISeq for MJIT. // The unit structure that holds metadata of ISeq for MJIT.
struct rb_mjit_unit { struct rb_mjit_unit {
struct list_node unode; struct ccan_list_node unode;
// Unique order number of unit. // Unique order number of unit.
int id; int id;
// Dlopen handle of the loaded object file. // Dlopen handle of the loaded object file.
@ -175,7 +175,7 @@ struct rb_mjit_unit {
// Linked list of struct rb_mjit_unit. // Linked list of struct rb_mjit_unit.
struct rb_mjit_unit_list { struct rb_mjit_unit_list {
struct list_head head; struct ccan_list_head head;
int length; // the list length int length; // the list length
}; };
@ -206,13 +206,13 @@ bool mjit_call_p = false;
// Priority queue of iseqs waiting for JIT compilation. // Priority queue of iseqs waiting for JIT compilation.
// This variable is a pointer to head unit of the queue. // This variable is a pointer to head unit of the queue.
static struct rb_mjit_unit_list unit_queue = { LIST_HEAD_INIT(unit_queue.head) }; static struct rb_mjit_unit_list unit_queue = { CCAN_LIST_HEAD_INIT(unit_queue.head) };
// List of units which are successfully compiled. // List of units which are successfully compiled.
static struct rb_mjit_unit_list active_units = { LIST_HEAD_INIT(active_units.head) }; static struct rb_mjit_unit_list active_units = { CCAN_LIST_HEAD_INIT(active_units.head) };
// List of compacted so files which will be cleaned up by `free_list()` in `mjit_finish()`. // List of compacted so files which will be cleaned up by `free_list()` in `mjit_finish()`.
static struct rb_mjit_unit_list compact_units = { LIST_HEAD_INIT(compact_units.head) }; static struct rb_mjit_unit_list compact_units = { CCAN_LIST_HEAD_INIT(compact_units.head) };
// List of units before recompilation and just waiting for dlclose(). // List of units before recompilation and just waiting for dlclose().
static struct rb_mjit_unit_list stale_units = { LIST_HEAD_INIT(stale_units.head) }; static struct rb_mjit_unit_list stale_units = { CCAN_LIST_HEAD_INIT(stale_units.head) };
// The number of so far processed ISEQs, used to generate unique id. // The number of so far processed ISEQs, used to generate unique id.
static int current_unit_num; static int current_unit_num;
// A mutex for conitionals and critical sections. // A mutex for conitionals and critical sections.
@ -370,7 +370,7 @@ add_to_list(struct rb_mjit_unit *unit, struct rb_mjit_unit_list *list)
(void)RB_DEBUG_COUNTER_INC_IF(mjit_length_compact_units, list == &compact_units); (void)RB_DEBUG_COUNTER_INC_IF(mjit_length_compact_units, list == &compact_units);
(void)RB_DEBUG_COUNTER_INC_IF(mjit_length_stale_units, list == &stale_units); (void)RB_DEBUG_COUNTER_INC_IF(mjit_length_stale_units, list == &stale_units);
list_add_tail(&list->head, &unit->unode); ccan_list_add_tail(&list->head, &unit->unode);
list->length++; list->length++;
} }
@ -384,7 +384,7 @@ remove_from_list(struct rb_mjit_unit *unit, struct rb_mjit_unit_list *list)
rb_debug_counter_add(RB_DEBUG_COUNTER_mjit_length_stale_units, -1, list == &stale_units); rb_debug_counter_add(RB_DEBUG_COUNTER_mjit_length_stale_units, -1, list == &stale_units);
#endif #endif
list_del(&unit->unode); ccan_list_del(&unit->unode);
list->length--; list->length--;
} }
@ -503,7 +503,7 @@ get_from_list(struct rb_mjit_unit_list *list)
// Find iseq with max total_calls // Find iseq with max total_calls
struct rb_mjit_unit *unit = NULL, *next, *best = NULL; struct rb_mjit_unit *unit = NULL, *next, *best = NULL;
list_for_each_safe(&list->head, unit, next, unode) { ccan_list_for_each_safe(&list->head, unit, next, unode) {
if (unit->iseq == NULL) { // ISeq is GCed. if (unit->iseq == NULL) { // ISeq is GCed.
remove_from_list(unit, list); remove_from_list(unit, list);
free_unit(unit); free_unit(unit);
@ -977,7 +977,7 @@ compile_compact_jit_code(char* c_file)
// We need to check again here because we could've waited on GC above // We need to check again here because we could've waited on GC above
bool iseq_gced = false; bool iseq_gced = false;
struct rb_mjit_unit *child_unit = 0, *next; struct rb_mjit_unit *child_unit = 0, *next;
list_for_each_safe(&active_units.head, child_unit, next, unode) { ccan_list_for_each_safe(&active_units.head, child_unit, next, unode) {
if (child_unit->iseq == NULL) { // ISeq is GC-ed if (child_unit->iseq == NULL) { // ISeq is GC-ed
iseq_gced = true; iseq_gced = true;
verbose(1, "JIT compaction: A method for JIT code u%d is obsoleted. Compaction will be skipped.", child_unit->id); verbose(1, "JIT compaction: A method for JIT code u%d is obsoleted. Compaction will be skipped.", child_unit->id);
@ -1002,7 +1002,7 @@ compile_compact_jit_code(char* c_file)
// TODO: Consider using a more granular lock after we implement inlining across // TODO: Consider using a more granular lock after we implement inlining across
// compacted functions (not done yet). // compacted functions (not done yet).
bool success = true; bool success = true;
list_for_each(&active_units.head, child_unit, unode) { ccan_list_for_each(&active_units.head, child_unit, unode) {
CRITICAL_SECTION_START(3, "before set_compiling_iseqs"); CRITICAL_SECTION_START(3, "before set_compiling_iseqs");
success &= set_compiling_iseqs(child_unit->iseq); success &= set_compiling_iseqs(child_unit->iseq);
CRITICAL_SECTION_FINISH(3, "after set_compiling_iseqs"); CRITICAL_SECTION_FINISH(3, "after set_compiling_iseqs");
@ -1080,7 +1080,7 @@ compact_all_jit_code(void)
remove_so_file(so_file, unit); remove_so_file(so_file, unit);
CRITICAL_SECTION_START(3, "in compact_all_jit_code to read list"); CRITICAL_SECTION_START(3, "in compact_all_jit_code to read list");
list_for_each(&active_units.head, cur, unode) { ccan_list_for_each(&active_units.head, cur, unode) {
void *func; void *func;
char funcname[MAXPATHLEN]; char funcname[MAXPATHLEN];
sprint_funcname(funcname, cur); sprint_funcname(funcname, cur);
@ -1347,7 +1347,7 @@ unload_units(void)
// For now, we don't unload units when ISeq is GCed. We should // For now, we don't unload units when ISeq is GCed. We should
// unload such ISeqs first here. // unload such ISeqs first here.
list_for_each_safe(&active_units.head, unit, next, unode) { ccan_list_for_each_safe(&active_units.head, unit, next, unode) {
if (unit->iseq == NULL) { // ISeq is GCed. if (unit->iseq == NULL) { // ISeq is GCed.
remove_from_list(unit, &active_units); remove_from_list(unit, &active_units);
free_unit(unit); free_unit(unit);
@ -1355,7 +1355,7 @@ unload_units(void)
} }
// Detect units which are in use and can't be unloaded. // Detect units which are in use and can't be unloaded.
list_for_each(&active_units.head, unit, unode) { ccan_list_for_each(&active_units.head, unit, unode) {
assert(unit->iseq != NULL && unit->handle != NULL); assert(unit->iseq != NULL && unit->handle != NULL);
unit->used_code_p = false; unit->used_code_p = false;
} }
@ -1372,7 +1372,7 @@ unload_units(void)
while (true) { while (true) {
// Calculate the next max total_calls in unit_queue // Calculate the next max total_calls in unit_queue
long unsigned max_queue_calls = 0; long unsigned max_queue_calls = 0;
list_for_each(&unit_queue.head, unit, unode) { ccan_list_for_each(&unit_queue.head, unit, unode) {
if (unit->iseq != NULL && max_queue_calls < ISEQ_BODY(unit->iseq)->total_calls if (unit->iseq != NULL && max_queue_calls < ISEQ_BODY(unit->iseq)->total_calls
&& ISEQ_BODY(unit->iseq)->total_calls < prev_queue_calls) { && ISEQ_BODY(unit->iseq)->total_calls < prev_queue_calls) {
max_queue_calls = ISEQ_BODY(unit->iseq)->total_calls; max_queue_calls = ISEQ_BODY(unit->iseq)->total_calls;
@ -1381,7 +1381,7 @@ unload_units(void)
prev_queue_calls = max_queue_calls; prev_queue_calls = max_queue_calls;
bool unloaded_p = false; bool unloaded_p = false;
list_for_each_safe(&active_units.head, unit, next, unode) { ccan_list_for_each_safe(&active_units.head, unit, next, unode) {
if (unit->used_code_p) // We can't unload code on stack. if (unit->used_code_p) // We can't unload code on stack.
continue; continue;
@ -1441,7 +1441,7 @@ mjit_worker(void)
// Wait until a unit becomes available // Wait until a unit becomes available
CRITICAL_SECTION_START(3, "in worker dequeue"); CRITICAL_SECTION_START(3, "in worker dequeue");
while ((list_empty(&unit_queue.head) || active_units.length >= mjit_opts.max_cache_size) && !stop_worker_p) { while ((ccan_list_empty(&unit_queue.head) || active_units.length >= mjit_opts.max_cache_size) && !stop_worker_p) {
rb_native_cond_wait(&mjit_worker_wakeup, &mjit_engine_mutex); rb_native_cond_wait(&mjit_worker_wakeup, &mjit_engine_mutex);
verbose(3, "Getting wakeup from client"); verbose(3, "Getting wakeup from client");
@ -1449,7 +1449,7 @@ mjit_worker(void)
if (pending_stale_p) { if (pending_stale_p) {
pending_stale_p = false; pending_stale_p = false;
struct rb_mjit_unit *next; struct rb_mjit_unit *next;
list_for_each_safe(&active_units.head, unit, next, unode) { ccan_list_for_each_safe(&active_units.head, unit, next, unode) {
if (unit->stale_p) { if (unit->stale_p) {
unit->stale_p = false; unit->stale_p = false;
remove_from_list(unit, &active_units); remove_from_list(unit, &active_units);

View File

@ -1076,7 +1076,7 @@ do_waitpid(rb_pid_t pid, int *st, int flags)
#define WAITPID_LOCK_ONLY ((struct waitpid_state *)-1) #define WAITPID_LOCK_ONLY ((struct waitpid_state *)-1)
struct waitpid_state { struct waitpid_state {
struct list_node wnode; struct ccan_list_node wnode;
rb_execution_context_t *ec; rb_execution_context_t *ec;
rb_nativethread_cond_t *cond; rb_nativethread_cond_t *cond;
rb_pid_t ret; rb_pid_t ret;
@ -1110,12 +1110,12 @@ waitpid_signal(struct waitpid_state *w)
// Used for VM memsize reporting. Returns the size of a list of waitpid_state // Used for VM memsize reporting. Returns the size of a list of waitpid_state
// structs. Defined here because the struct definition lives here as well. // structs. Defined here because the struct definition lives here as well.
size_t size_t
rb_vm_memsize_waiting_list(struct list_head *waiting_list) rb_vm_memsize_waiting_list(struct ccan_list_head *waiting_list)
{ {
struct waitpid_state *waitpid = 0; struct waitpid_state *waitpid = 0;
size_t size = 0; size_t size = 0;
list_for_each(waiting_list, waitpid, wnode) { ccan_list_for_each(waiting_list, waitpid, wnode) {
size += sizeof(struct waitpid_state); size += sizeof(struct waitpid_state);
} }
@ -1132,10 +1132,10 @@ sigwait_fd_migrate_sleeper(rb_vm_t *vm)
{ {
struct waitpid_state *w = 0; struct waitpid_state *w = 0;
list_for_each(&vm->waiting_pids, w, wnode) { ccan_list_for_each(&vm->waiting_pids, w, wnode) {
if (waitpid_signal(w)) return; if (waitpid_signal(w)) return;
} }
list_for_each(&vm->waiting_grps, w, wnode) { ccan_list_for_each(&vm->waiting_grps, w, wnode) {
if (waitpid_signal(w)) return; if (waitpid_signal(w)) return;
} }
} }
@ -1152,18 +1152,18 @@ rb_sigwait_fd_migrate(rb_vm_t *vm)
extern volatile unsigned int ruby_nocldwait; /* signal.c */ extern volatile unsigned int ruby_nocldwait; /* signal.c */
/* called by timer thread or thread which acquired sigwait_fd */ /* called by timer thread or thread which acquired sigwait_fd */
static void static void
waitpid_each(struct list_head *head) waitpid_each(struct ccan_list_head *head)
{ {
struct waitpid_state *w = 0, *next; struct waitpid_state *w = 0, *next;
list_for_each_safe(head, w, next, wnode) { ccan_list_for_each_safe(head, w, next, wnode) {
rb_pid_t ret = do_waitpid(w->pid, &w->status, w->options | WNOHANG); rb_pid_t ret = do_waitpid(w->pid, &w->status, w->options | WNOHANG);
if (!ret) continue; if (!ret) continue;
if (ret == -1) w->errnum = errno; if (ret == -1) w->errnum = errno;
w->ret = ret; w->ret = ret;
list_del_init(&w->wnode); ccan_list_del_init(&w->wnode);
waitpid_signal(w); waitpid_signal(w);
} }
} }
@ -1177,11 +1177,11 @@ ruby_waitpid_all(rb_vm_t *vm)
#if RUBY_SIGCHLD #if RUBY_SIGCHLD
rb_native_mutex_lock(&vm->waitpid_lock); rb_native_mutex_lock(&vm->waitpid_lock);
waitpid_each(&vm->waiting_pids); waitpid_each(&vm->waiting_pids);
if (list_empty(&vm->waiting_pids)) { if (ccan_list_empty(&vm->waiting_pids)) {
waitpid_each(&vm->waiting_grps); waitpid_each(&vm->waiting_grps);
} }
/* emulate SA_NOCLDWAIT */ /* emulate SA_NOCLDWAIT */
if (list_empty(&vm->waiting_pids) && list_empty(&vm->waiting_grps)) { if (ccan_list_empty(&vm->waiting_pids) && ccan_list_empty(&vm->waiting_grps)) {
while (ruby_nocldwait && do_waitpid(-1, 0, WNOHANG) > 0) while (ruby_nocldwait && do_waitpid(-1, 0, WNOHANG) > 0)
; /* keep looping */ ; /* keep looping */
} }
@ -1222,7 +1222,7 @@ ruby_waitpid_locked(rb_vm_t *vm, rb_pid_t pid, int *status, int options,
assert(!ruby_thread_has_gvl_p() && "must not have GVL"); assert(!ruby_thread_has_gvl_p() && "must not have GVL");
waitpid_state_init(&w, pid, options); waitpid_state_init(&w, pid, options);
if (w.pid > 0 || list_empty(&vm->waiting_pids)) if (w.pid > 0 || ccan_list_empty(&vm->waiting_pids))
w.ret = do_waitpid(w.pid, &w.status, w.options | WNOHANG); w.ret = do_waitpid(w.pid, &w.status, w.options | WNOHANG);
if (w.ret) { if (w.ret) {
if (w.ret == -1) w.errnum = errno; if (w.ret == -1) w.errnum = errno;
@ -1231,7 +1231,7 @@ ruby_waitpid_locked(rb_vm_t *vm, rb_pid_t pid, int *status, int options,
int sigwait_fd = -1; int sigwait_fd = -1;
w.ec = 0; w.ec = 0;
list_add(w.pid > 0 ? &vm->waiting_pids : &vm->waiting_grps, &w.wnode); ccan_list_add(w.pid > 0 ? &vm->waiting_pids : &vm->waiting_grps, &w.wnode);
do { do {
if (sigwait_fd < 0) if (sigwait_fd < 0)
sigwait_fd = rb_sigwait_fd_get(0); sigwait_fd = rb_sigwait_fd_get(0);
@ -1247,7 +1247,7 @@ ruby_waitpid_locked(rb_vm_t *vm, rb_pid_t pid, int *status, int options,
rb_native_cond_wait(w.cond, &vm->waitpid_lock); rb_native_cond_wait(w.cond, &vm->waitpid_lock);
} }
} while (!w.ret); } while (!w.ret);
list_del(&w.wnode); ccan_list_del(&w.wnode);
/* we're done, maybe other waitpid callers are not: */ /* we're done, maybe other waitpid callers are not: */
if (sigwait_fd >= 0) { if (sigwait_fd >= 0) {
@ -1280,14 +1280,14 @@ waitpid_cleanup(VALUE x)
struct waitpid_state *w = (struct waitpid_state *)x; struct waitpid_state *w = (struct waitpid_state *)x;
/* /*
* XXX w->ret is sometimes set but list_del is still needed, here, * XXX w->ret is sometimes set but ccan_list_del is still needed, here,
* Not sure why, so we unconditionally do list_del here: * Not sure why, so we unconditionally do ccan_list_del here:
*/ */
if (TRUE || w->ret == 0) { if (TRUE || w->ret == 0) {
rb_vm_t *vm = rb_ec_vm_ptr(w->ec); rb_vm_t *vm = rb_ec_vm_ptr(w->ec);
rb_native_mutex_lock(&vm->waitpid_lock); rb_native_mutex_lock(&vm->waitpid_lock);
list_del(&w->wnode); ccan_list_del(&w->wnode);
rb_native_mutex_unlock(&vm->waitpid_lock); rb_native_mutex_unlock(&vm->waitpid_lock);
} }
@ -1307,7 +1307,7 @@ waitpid_wait(struct waitpid_state *w)
*/ */
rb_native_mutex_lock(&vm->waitpid_lock); rb_native_mutex_lock(&vm->waitpid_lock);
if (w->pid > 0 || list_empty(&vm->waiting_pids)) { if (w->pid > 0 || ccan_list_empty(&vm->waiting_pids)) {
w->ret = do_waitpid(w->pid, &w->status, w->options | WNOHANG); w->ret = do_waitpid(w->pid, &w->status, w->options | WNOHANG);
} }
@ -1323,7 +1323,7 @@ waitpid_wait(struct waitpid_state *w)
if (need_sleep) { if (need_sleep) {
w->cond = 0; w->cond = 0;
/* order matters, favor specified PIDs rather than -1 or 0 */ /* order matters, favor specified PIDs rather than -1 or 0 */
list_add(w->pid > 0 ? &vm->waiting_pids : &vm->waiting_grps, &w->wnode); ccan_list_add(w->pid > 0 ? &vm->waiting_pids : &vm->waiting_grps, &w->wnode);
} }
rb_native_mutex_unlock(&vm->waitpid_lock); rb_native_mutex_unlock(&vm->waitpid_lock);
@ -4229,7 +4229,7 @@ retry_fork_async_signal_safe(struct rb_process_status *status, int *ep,
if (waitpid_lock) { if (waitpid_lock) {
if (pid > 0 && w != WAITPID_LOCK_ONLY) { if (pid > 0 && w != WAITPID_LOCK_ONLY) {
w->pid = pid; w->pid = pid;
list_add(&GET_VM()->waiting_pids, &w->wnode); ccan_list_add(&GET_VM()->waiting_pids, &w->wnode);
} }
rb_native_mutex_unlock(waitpid_lock); rb_native_mutex_unlock(waitpid_lock);
} }

View File

@ -202,7 +202,7 @@ ractor_mark(void *ptr)
if (r->threads.cnt > 0) { if (r->threads.cnt > 0) {
rb_thread_t *th = 0; rb_thread_t *th = 0;
list_for_each(&r->threads.set, th, lt_node) { ccan_list_for_each(&r->threads.set, th, lt_node) {
VM_ASSERT(th != NULL); VM_ASSERT(th != NULL);
rb_gc_mark(th->self); rb_gc_mark(th->self);
} }
@ -1414,7 +1414,7 @@ vm_insert_ractor0(rb_vm_t *vm, rb_ractor_t *r, bool single_ractor_mode)
RUBY_DEBUG_LOG("r:%u ractor.cnt:%u++", r->pub.id, vm->ractor.cnt); RUBY_DEBUG_LOG("r:%u ractor.cnt:%u++", r->pub.id, vm->ractor.cnt);
VM_ASSERT(single_ractor_mode || RB_VM_LOCKED_P()); VM_ASSERT(single_ractor_mode || RB_VM_LOCKED_P());
list_add_tail(&vm->ractor.set, &r->vmlr_node); ccan_list_add_tail(&vm->ractor.set, &r->vmlr_node);
vm->ractor.cnt++; vm->ractor.cnt++;
} }
@ -1483,7 +1483,7 @@ vm_remove_ractor(rb_vm_t *vm, rb_ractor_t *cr)
vm->ractor.cnt, vm->ractor.sync.terminate_waiting); vm->ractor.cnt, vm->ractor.sync.terminate_waiting);
VM_ASSERT(vm->ractor.cnt > 0); VM_ASSERT(vm->ractor.cnt > 0);
list_del(&cr->vmlr_node); ccan_list_del(&cr->vmlr_node);
if (vm->ractor.cnt <= 2 && vm->ractor.sync.terminate_waiting) { if (vm->ractor.cnt <= 2 && vm->ractor.sync.terminate_waiting) {
rb_native_cond_signal(&vm->ractor.sync.terminate_cond); rb_native_cond_signal(&vm->ractor.sync.terminate_cond);
@ -1550,7 +1550,7 @@ void rb_gvl_init(rb_global_vm_lock_t *gvl);
void void
rb_ractor_living_threads_init(rb_ractor_t *r) rb_ractor_living_threads_init(rb_ractor_t *r)
{ {
list_head_init(&r->threads.set); ccan_list_head_init(&r->threads.set);
r->threads.cnt = 0; r->threads.cnt = 0;
r->threads.blocking_cnt = 0; r->threads.blocking_cnt = 0;
} }
@ -1741,7 +1741,7 @@ rb_ractor_thread_list(rb_ractor_t *r)
ts = ALLOCA_N(VALUE, r->threads.cnt); ts = ALLOCA_N(VALUE, r->threads.cnt);
ts_cnt = 0; ts_cnt = 0;
list_for_each(&r->threads.set, th, lt_node) { ccan_list_for_each(&r->threads.set, th, lt_node) {
switch (th->status) { switch (th->status) {
case THREAD_RUNNABLE: case THREAD_RUNNABLE:
case THREAD_STOPPED: case THREAD_STOPPED:
@ -1770,7 +1770,7 @@ rb_ractor_living_threads_insert(rb_ractor_t *r, rb_thread_t *th)
RACTOR_LOCK(r); RACTOR_LOCK(r);
{ {
RUBY_DEBUG_LOG("r(%d)->threads.cnt:%d++", r->pub.id, r->threads.cnt); RUBY_DEBUG_LOG("r(%d)->threads.cnt:%d++", r->pub.id, r->threads.cnt);
list_add_tail(&r->threads.set, &th->lt_node); ccan_list_add_tail(&r->threads.set, &th->lt_node);
r->threads.cnt++; r->threads.cnt++;
} }
RACTOR_UNLOCK(r); RACTOR_UNLOCK(r);
@ -1853,7 +1853,7 @@ rb_ractor_living_threads_remove(rb_ractor_t *cr, rb_thread_t *th)
else { else {
RACTOR_LOCK(cr); RACTOR_LOCK(cr);
{ {
list_del(&th->lt_node); ccan_list_del(&th->lt_node);
cr->threads.cnt--; cr->threads.cnt--;
} }
RACTOR_UNLOCK(cr); RACTOR_UNLOCK(cr);
@ -1940,7 +1940,7 @@ ractor_terminal_interrupt_all(rb_vm_t *vm)
if (vm->ractor.cnt > 1) { if (vm->ractor.cnt > 1) {
// send terminate notification to all ractors // send terminate notification to all ractors
rb_ractor_t *r = 0; rb_ractor_t *r = 0;
list_for_each(&vm->ractor.set, r, vmlr_node) { ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
if (r != vm->ractor.main_ractor) { if (r != vm->ractor.main_ractor) {
rb_ractor_terminate_interrupt_main_thread(r); rb_ractor_terminate_interrupt_main_thread(r);
} }
@ -2119,7 +2119,7 @@ rb_ractor_dump(void)
rb_vm_t *vm = GET_VM(); rb_vm_t *vm = GET_VM();
rb_ractor_t *r = 0; rb_ractor_t *r = 0;
list_for_each(&vm->ractor.set, r, vmlr_node) { ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
if (r != vm->ractor.main_ractor) { if (r != vm->ractor.main_ractor) {
fprintf(stderr, "r:%u (%s)\n", r->pub.id, ractor_status_str(r->status_)); fprintf(stderr, "r:%u (%s)\n", r->pub.id, ractor_status_str(r->status_));
} }

View File

@ -91,7 +91,7 @@ struct rb_ractor_struct {
// thread management // thread management
struct { struct {
struct list_head set; struct ccan_list_head set;
unsigned int cnt; unsigned int cnt;
unsigned int blocking_cnt; unsigned int blocking_cnt;
unsigned int sleeper; unsigned int sleeper;
@ -126,7 +126,7 @@ struct rb_ractor_struct {
ractor_terminated, ractor_terminated,
} status_; } status_;
struct list_node vmlr_node; struct ccan_list_node vmlr_node;
// ractor local data // ractor local data

View File

@ -150,7 +150,7 @@ void rb_sigwait_fd_migrate(rb_vm_t *); /* process.c */
static volatile int system_working = 1; static volatile int system_working = 1;
struct waiting_fd { struct waiting_fd {
struct list_node wfd_node; /* <=> vm.waiting_fds */ struct ccan_list_node wfd_node; /* <=> vm.waiting_fds */
rb_thread_t *th; rb_thread_t *th;
int fd; int fd;
}; };
@ -500,7 +500,7 @@ terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
{ {
rb_thread_t *th = 0; rb_thread_t *th = 0;
list_for_each(&r->threads.set, th, lt_node) { ccan_list_for_each(&r->threads.set, th, lt_node) {
if (th != main_thread) { if (th != main_thread) {
thread_debug("terminate_all: begin (thid: %"PRI_THREAD_ID", status: %s)\n", thread_debug("terminate_all: begin (thid: %"PRI_THREAD_ID", status: %s)\n",
thread_id_str(th), thread_status_name(th, TRUE)); thread_id_str(th), thread_status_name(th, TRUE));
@ -1799,7 +1799,7 @@ rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
RB_VM_LOCK_ENTER(); RB_VM_LOCK_ENTER();
{ {
list_add(&rb_ec_vm_ptr(ec)->waiting_fds, &waiting_fd.wfd_node); ccan_list_add(&rb_ec_vm_ptr(ec)->waiting_fds, &waiting_fd.wfd_node);
} }
RB_VM_LOCK_LEAVE(); RB_VM_LOCK_LEAVE();
@ -1814,11 +1814,11 @@ rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
/* /*
* must be deleted before jump * must be deleted before jump
* this will delete either from waiting_fds or on-stack LIST_HEAD(busy) * this will delete either from waiting_fds or on-stack CCAN_LIST_HEAD(busy)
*/ */
RB_VM_LOCK_ENTER(); RB_VM_LOCK_ENTER();
{ {
list_del(&waiting_fd.wfd_node); ccan_list_del(&waiting_fd.wfd_node);
} }
RB_VM_LOCK_LEAVE(); RB_VM_LOCK_LEAVE();
@ -2574,20 +2574,20 @@ rb_ec_reset_raised(rb_execution_context_t *ec)
} }
int int
rb_notify_fd_close(int fd, struct list_head *busy) rb_notify_fd_close(int fd, struct ccan_list_head *busy)
{ {
rb_vm_t *vm = GET_THREAD()->vm; rb_vm_t *vm = GET_THREAD()->vm;
struct waiting_fd *wfd = 0, *next; struct waiting_fd *wfd = 0, *next;
RB_VM_LOCK_ENTER(); RB_VM_LOCK_ENTER();
{ {
list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) { ccan_list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
if (wfd->fd == fd) { if (wfd->fd == fd) {
rb_thread_t *th = wfd->th; rb_thread_t *th = wfd->th;
VALUE err; VALUE err;
list_del(&wfd->wfd_node); ccan_list_del(&wfd->wfd_node);
list_add(busy, &wfd->wfd_node); ccan_list_add(busy, &wfd->wfd_node);
err = th->vm->special_exceptions[ruby_error_stream_closed]; err = th->vm->special_exceptions[ruby_error_stream_closed];
rb_threadptr_pending_interrupt_enque(th, err); rb_threadptr_pending_interrupt_enque(th, err);
@ -2597,17 +2597,17 @@ rb_notify_fd_close(int fd, struct list_head *busy)
} }
RB_VM_LOCK_LEAVE(); RB_VM_LOCK_LEAVE();
return !list_empty(busy); return !ccan_list_empty(busy);
} }
void void
rb_thread_fd_close(int fd) rb_thread_fd_close(int fd)
{ {
struct list_head busy; struct ccan_list_head busy;
list_head_init(&busy); ccan_list_head_init(&busy);
if (rb_notify_fd_close(fd, &busy)) { if (rb_notify_fd_close(fd, &busy)) {
do rb_thread_schedule(); while (!list_empty(&busy)); do rb_thread_schedule(); while (!ccan_list_empty(&busy));
} }
} }
@ -4353,7 +4353,7 @@ rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
RB_VM_LOCK_ENTER(); RB_VM_LOCK_ENTER();
{ {
list_add(&wfd.th->vm->waiting_fds, &wfd.wfd_node); ccan_list_add(&wfd.th->vm->waiting_fds, &wfd.wfd_node);
} }
RB_VM_LOCK_LEAVE(); RB_VM_LOCK_LEAVE();
@ -4404,7 +4404,7 @@ rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
RB_VM_LOCK_ENTER(); RB_VM_LOCK_ENTER();
{ {
list_del(&wfd.wfd_node); ccan_list_del(&wfd.wfd_node);
} }
RB_VM_LOCK_LEAVE(); RB_VM_LOCK_LEAVE();
@ -4480,7 +4480,7 @@ select_single_cleanup(VALUE ptr)
{ {
struct select_args *args = (struct select_args *)ptr; struct select_args *args = (struct select_args *)ptr;
list_del(&args->wfd.wfd_node); ccan_list_del(&args->wfd.wfd_node);
if (args->read) rb_fd_term(args->read); if (args->read) rb_fd_term(args->read);
if (args->write) rb_fd_term(args->write); if (args->write) rb_fd_term(args->write);
if (args->except) rb_fd_term(args->except); if (args->except) rb_fd_term(args->except);
@ -4506,7 +4506,7 @@ rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
RB_VM_LOCK_ENTER(); RB_VM_LOCK_ENTER();
{ {
list_add(&args.wfd.th->vm->waiting_fds, &args.wfd.wfd_node); ccan_list_add(&args.wfd.th->vm->waiting_fds, &args.wfd.wfd_node);
} }
RB_VM_LOCK_LEAVE(); RB_VM_LOCK_LEAVE();
@ -4702,8 +4702,8 @@ rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const r
ubf_list_atfork(); ubf_list_atfork();
// OK. Only this thread accesses: // OK. Only this thread accesses:
list_for_each(&vm->ractor.set, r, vmlr_node) { ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
list_for_each(&r->threads.set, i, lt_node) { ccan_list_for_each(&r->threads.set, i, lt_node) {
atfork(i, th); atfork(i, th);
} }
} }
@ -4843,7 +4843,7 @@ thgroup_list(VALUE group)
rb_thread_t *th = 0; rb_thread_t *th = 0;
rb_ractor_t *r = GET_RACTOR(); rb_ractor_t *r = GET_RACTOR();
list_for_each(&r->threads.set, th, lt_node) { ccan_list_for_each(&r->threads.set, th, lt_node) {
if (th->thgroup == group) { if (th->thgroup == group) {
rb_ary_push(ary, th->self); rb_ary_push(ary, th->self);
} }
@ -5513,7 +5513,7 @@ debug_deadlock_check(rb_ractor_t *r, VALUE msg)
rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r), rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
(void *)GET_THREAD(), (void *)r->threads.main); (void *)GET_THREAD(), (void *)r->threads.main);
list_for_each(&r->threads.set, th, lt_node) { ccan_list_for_each(&r->threads.set, th, lt_node) {
rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p " rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
"native:%"PRI_THREAD_ID" int:%u", "native:%"PRI_THREAD_ID" int:%u",
th->self, (void *)th, thread_id_str(th), th->ec->interrupt_flag); th->self, (void *)th, thread_id_str(th), th->ec->interrupt_flag);
@ -5551,13 +5551,13 @@ rb_check_deadlock(rb_ractor_t *r)
if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)"); if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
if (patrol_thread && patrol_thread != GET_THREAD()) return; if (patrol_thread && patrol_thread != GET_THREAD()) return;
list_for_each(&r->threads.set, th, lt_node) { ccan_list_for_each(&r->threads.set, th, lt_node) {
if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) { if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
found = 1; found = 1;
} }
else if (th->locking_mutex) { else if (th->locking_mutex) {
rb_mutex_t *mutex = mutex_ptr(th->locking_mutex); rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !list_empty(&mutex->waitq))) { if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
found = 1; found = 1;
} }
} }
@ -5578,12 +5578,12 @@ rb_check_deadlock(rb_ractor_t *r)
// Used for VM memsize reporting. Returns the size of a list of waiting_fd // Used for VM memsize reporting. Returns the size of a list of waiting_fd
// structs. Defined here because the struct definition lives here as well. // structs. Defined here because the struct definition lives here as well.
size_t size_t
rb_vm_memsize_waiting_fds(struct list_head *waiting_fds) rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds)
{ {
struct waiting_fd *waitfd = 0; struct waiting_fd *waitfd = 0;
size_t size = 0; size_t size = 0;
list_for_each(waiting_fds, waitfd, wfd_node) { ccan_list_for_each(waiting_fds, waitfd, wfd_node) {
size += sizeof(struct waiting_fd); size += sizeof(struct waiting_fd);
} }

View File

@ -228,7 +228,7 @@ designate_timer_thread(rb_global_vm_lock_t *gvl)
{ {
native_thread_data_t *last; native_thread_data_t *last;
last = list_tail(&gvl->waitq, native_thread_data_t, node.ubf); last = ccan_list_tail(&gvl->waitq, native_thread_data_t, node.ubf);
if (last) { if (last) {
rb_native_cond_signal(&last->cond.gvlq); rb_native_cond_signal(&last->cond.gvlq);
return TRUE; return TRUE;
@ -289,7 +289,7 @@ gvl_acquire_common(rb_global_vm_lock_t *gvl, rb_thread_t *th)
VM_ASSERT(th->unblock.func == 0 && VM_ASSERT(th->unblock.func == 0 &&
"we must not be in ubf_list and GVL waitq at the same time"); "we must not be in ubf_list and GVL waitq at the same time");
list_add_tail(&gvl->waitq, &nd->node.gvl); ccan_list_add_tail(&gvl->waitq, &nd->node.gvl);
do { do {
if (!gvl->timer) { if (!gvl->timer) {
@ -300,7 +300,7 @@ gvl_acquire_common(rb_global_vm_lock_t *gvl, rb_thread_t *th)
} }
} while (gvl->owner); } while (gvl->owner);
list_del_init(&nd->node.gvl); ccan_list_del_init(&nd->node.gvl);
if (gvl->need_yield) { if (gvl->need_yield) {
gvl->need_yield = 0; gvl->need_yield = 0;
@ -331,7 +331,7 @@ gvl_release_common(rb_global_vm_lock_t *gvl)
{ {
native_thread_data_t *next; native_thread_data_t *next;
gvl->owner = 0; gvl->owner = 0;
next = list_top(&gvl->waitq, native_thread_data_t, node.ubf); next = ccan_list_top(&gvl->waitq, native_thread_data_t, node.ubf);
if (next) rb_native_cond_signal(&next->cond.gvlq); if (next) rb_native_cond_signal(&next->cond.gvlq);
return next; return next;
@ -388,7 +388,7 @@ rb_gvl_init(rb_global_vm_lock_t *gvl)
rb_native_mutex_initialize(&gvl->lock); rb_native_mutex_initialize(&gvl->lock);
rb_native_cond_initialize(&gvl->switch_cond); rb_native_cond_initialize(&gvl->switch_cond);
rb_native_cond_initialize(&gvl->switch_wait_cond); rb_native_cond_initialize(&gvl->switch_wait_cond);
list_head_init(&gvl->waitq); ccan_list_head_init(&gvl->waitq);
gvl->owner = 0; gvl->owner = 0;
gvl->timer = 0; gvl->timer = 0;
gvl->timer_err = ETIMEDOUT; gvl->timer_err = ETIMEDOUT;
@ -690,7 +690,7 @@ native_thread_init(rb_thread_t *th)
th->tid = get_native_thread_id(); th->tid = get_native_thread_id();
#endif #endif
#ifdef USE_UBF_LIST #ifdef USE_UBF_LIST
list_node_init(&nd->node.ubf); ccan_list_node_init(&nd->node.ubf);
#endif #endif
rb_native_cond_initialize(&nd->cond.gvlq); rb_native_cond_initialize(&nd->cond.gvlq);
if (&nd->cond.gvlq != &nd->cond.intr) if (&nd->cond.gvlq != &nd->cond.intr)
@ -1072,19 +1072,19 @@ struct cached_thread_entry {
rb_nativethread_id_t thread_id; rb_nativethread_id_t thread_id;
rb_thread_t *th; rb_thread_t *th;
void *altstack; void *altstack;
struct list_node node; struct ccan_list_node node;
}; };
#if USE_THREAD_CACHE #if USE_THREAD_CACHE
static rb_nativethread_lock_t thread_cache_lock = RB_NATIVETHREAD_LOCK_INIT; static rb_nativethread_lock_t thread_cache_lock = RB_NATIVETHREAD_LOCK_INIT;
static LIST_HEAD(cached_thread_head); static CCAN_LIST_HEAD(cached_thread_head);
# if defined(HAVE_WORKING_FORK) # if defined(HAVE_WORKING_FORK)
static void static void
thread_cache_reset(void) thread_cache_reset(void)
{ {
rb_native_mutex_initialize(&thread_cache_lock); rb_native_mutex_initialize(&thread_cache_lock);
list_head_init(&cached_thread_head); ccan_list_head_init(&cached_thread_head);
} }
# endif # endif
@ -1111,12 +1111,12 @@ register_cached_thread_and_wait(void *altstack)
rb_native_mutex_lock(&thread_cache_lock); rb_native_mutex_lock(&thread_cache_lock);
{ {
list_add(&cached_thread_head, &entry.node); ccan_list_add(&cached_thread_head, &entry.node);
native_cond_timedwait(&entry.cond, &thread_cache_lock, &end); native_cond_timedwait(&entry.cond, &thread_cache_lock, &end);
if (entry.th == NULL) { /* unused */ if (entry.th == NULL) { /* unused */
list_del(&entry.node); ccan_list_del(&entry.node);
} }
} }
rb_native_mutex_unlock(&thread_cache_lock); rb_native_mutex_unlock(&thread_cache_lock);
@ -1141,7 +1141,7 @@ use_cached_thread(rb_thread_t *th)
struct cached_thread_entry *entry; struct cached_thread_entry *entry;
rb_native_mutex_lock(&thread_cache_lock); rb_native_mutex_lock(&thread_cache_lock);
entry = list_pop(&cached_thread_head, struct cached_thread_entry, node); entry = ccan_list_pop(&cached_thread_head, struct cached_thread_entry, node);
if (entry) { if (entry) {
entry->th = th; entry->th = th;
/* th->thread_id must be set before signal for Thread#name= */ /* th->thread_id must be set before signal for Thread#name= */
@ -1162,7 +1162,7 @@ clear_thread_cache_altstack(void)
struct cached_thread_entry *entry; struct cached_thread_entry *entry;
rb_native_mutex_lock(&thread_cache_lock); rb_native_mutex_lock(&thread_cache_lock);
list_for_each(&cached_thread_head, entry, node) { ccan_list_for_each(&cached_thread_head, entry, node) {
void MAYBE_UNUSED(*altstack) = entry->altstack; void MAYBE_UNUSED(*altstack) = entry->altstack;
entry->altstack = 0; entry->altstack = 0;
RB_ALTSTACK_FREE(altstack); RB_ALTSTACK_FREE(altstack);
@ -1305,13 +1305,13 @@ native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel)
} }
#ifdef USE_UBF_LIST #ifdef USE_UBF_LIST
static LIST_HEAD(ubf_list_head); static CCAN_LIST_HEAD(ubf_list_head);
static rb_nativethread_lock_t ubf_list_lock = RB_NATIVETHREAD_LOCK_INIT; static rb_nativethread_lock_t ubf_list_lock = RB_NATIVETHREAD_LOCK_INIT;
static void static void
ubf_list_atfork(void) ubf_list_atfork(void)
{ {
list_head_init(&ubf_list_head); ccan_list_head_init(&ubf_list_head);
rb_native_mutex_initialize(&ubf_list_lock); rb_native_mutex_initialize(&ubf_list_lock);
} }
@ -1319,11 +1319,11 @@ ubf_list_atfork(void)
static void static void
register_ubf_list(rb_thread_t *th) register_ubf_list(rb_thread_t *th)
{ {
struct list_node *node = &th->native_thread_data.node.ubf; struct ccan_list_node *node = &th->native_thread_data.node.ubf;
if (list_empty((struct list_head*)node)) { if (ccan_list_empty((struct ccan_list_head*)node)) {
rb_native_mutex_lock(&ubf_list_lock); rb_native_mutex_lock(&ubf_list_lock);
list_add(&ubf_list_head, node); ccan_list_add(&ubf_list_head, node);
rb_native_mutex_unlock(&ubf_list_lock); rb_native_mutex_unlock(&ubf_list_lock);
} }
} }
@ -1332,15 +1332,15 @@ register_ubf_list(rb_thread_t *th)
static void static void
unregister_ubf_list(rb_thread_t *th) unregister_ubf_list(rb_thread_t *th)
{ {
struct list_node *node = &th->native_thread_data.node.ubf; struct ccan_list_node *node = &th->native_thread_data.node.ubf;
/* we can't allow re-entry into ubf_list_head */ /* we can't allow re-entry into ubf_list_head */
VM_ASSERT(th->unblock.func == 0); VM_ASSERT(th->unblock.func == 0);
if (!list_empty((struct list_head*)node)) { if (!ccan_list_empty((struct ccan_list_head*)node)) {
rb_native_mutex_lock(&ubf_list_lock); rb_native_mutex_lock(&ubf_list_lock);
list_del_init(node); ccan_list_del_init(node);
if (list_empty(&ubf_list_head) && !rb_signal_buff_size()) { if (ccan_list_empty(&ubf_list_head) && !rb_signal_buff_size()) {
ubf_timer_disarm(); ubf_timer_disarm();
} }
rb_native_mutex_unlock(&ubf_list_lock); rb_native_mutex_unlock(&ubf_list_lock);
@ -1397,7 +1397,7 @@ ubf_select(void *ptr)
static int static int
ubf_threads_empty(void) ubf_threads_empty(void)
{ {
return list_empty(&ubf_list_head); return ccan_list_empty(&ubf_list_head);
} }
static void static void
@ -1408,8 +1408,8 @@ ubf_wakeup_all_threads(void)
if (!ubf_threads_empty()) { if (!ubf_threads_empty()) {
rb_native_mutex_lock(&ubf_list_lock); rb_native_mutex_lock(&ubf_list_lock);
list_for_each(&ubf_list_head, dat, node.ubf) { ccan_list_for_each(&ubf_list_head, dat, node.ubf) {
th = container_of(dat, rb_thread_t, native_thread_data); th = ccan_container_of(dat, rb_thread_t, native_thread_data);
ubf_wakeup_thread(th); ubf_wakeup_thread(th);
} }
rb_native_mutex_unlock(&ubf_list_lock); rb_native_mutex_unlock(&ubf_list_lock);

View File

@ -19,8 +19,8 @@
typedef struct native_thread_data_struct { typedef struct native_thread_data_struct {
union { union {
struct list_node ubf; struct ccan_list_node ubf;
struct list_node gvl; struct ccan_list_node gvl;
} node; } node;
#if defined(__GLIBC__) || defined(__FreeBSD__) #if defined(__GLIBC__) || defined(__FreeBSD__)
union union
@ -58,7 +58,7 @@ typedef struct rb_global_vm_lock_struct {
* switching between contended/uncontended GVL won't reset the * switching between contended/uncontended GVL won't reset the
* timer. * timer.
*/ */
struct list_head waitq; /* <=> native_thread_data_t.node.ubf */ struct ccan_list_head waitq; /* <=> native_thread_data_t.node.ubf */
const struct rb_thread_struct *timer; const struct rb_thread_struct *timer;
int timer_err; int timer_err;

View File

@ -8,7 +8,7 @@ static VALUE rb_eClosedQueueError;
typedef struct rb_mutex_struct { typedef struct rb_mutex_struct {
rb_fiber_t *fiber; rb_fiber_t *fiber;
struct rb_mutex_struct *next_mutex; struct rb_mutex_struct *next_mutex;
struct list_head waitq; /* protected by GVL */ struct ccan_list_head waitq; /* protected by GVL */
} rb_mutex_t; } rb_mutex_t;
/* sync_waiter is always on-stack */ /* sync_waiter is always on-stack */
@ -16,18 +16,18 @@ struct sync_waiter {
VALUE self; VALUE self;
rb_thread_t *th; rb_thread_t *th;
rb_fiber_t *fiber; rb_fiber_t *fiber;
struct list_node node; struct ccan_list_node node;
}; };
#define MUTEX_ALLOW_TRAP FL_USER1 #define MUTEX_ALLOW_TRAP FL_USER1
static void static void
sync_wakeup(struct list_head *head, long max) sync_wakeup(struct ccan_list_head *head, long max)
{ {
struct sync_waiter *cur = 0, *next; struct sync_waiter *cur = 0, *next;
list_for_each_safe(head, cur, next, node) { ccan_list_for_each_safe(head, cur, next, node) {
list_del_init(&cur->node); ccan_list_del_init(&cur->node);
if (cur->th->status != THREAD_KILLED) { if (cur->th->status != THREAD_KILLED) {
@ -45,13 +45,13 @@ sync_wakeup(struct list_head *head, long max)
} }
static void static void
wakeup_one(struct list_head *head) wakeup_one(struct ccan_list_head *head)
{ {
sync_wakeup(head, 1); sync_wakeup(head, 1);
} }
static void static void
wakeup_all(struct list_head *head) wakeup_all(struct ccan_list_head *head)
{ {
sync_wakeup(head, LONG_MAX); sync_wakeup(head, LONG_MAX);
} }
@ -95,7 +95,7 @@ rb_mutex_num_waiting(rb_mutex_t *mutex)
struct sync_waiter *w = 0; struct sync_waiter *w = 0;
size_t n = 0; size_t n = 0;
list_for_each(&mutex->waitq, w, node) { ccan_list_for_each(&mutex->waitq, w, node) {
n++; n++;
} }
@ -152,7 +152,7 @@ mutex_alloc(VALUE klass)
obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex); obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
list_head_init(&mutex->waitq); ccan_list_head_init(&mutex->waitq);
return obj; return obj;
} }
@ -269,7 +269,7 @@ static VALUE
delete_from_waitq(VALUE value) delete_from_waitq(VALUE value)
{ {
struct sync_waiter *sync_waiter = (void *)value; struct sync_waiter *sync_waiter = (void *)value;
list_del(&sync_waiter->node); ccan_list_del(&sync_waiter->node);
return Qnil; return Qnil;
} }
@ -302,7 +302,7 @@ do_mutex_lock(VALUE self, int interruptible_p)
.fiber = fiber .fiber = fiber
}; };
list_add_tail(&mutex->waitq, &sync_waiter.node); ccan_list_add_tail(&mutex->waitq, &sync_waiter.node);
rb_ensure(call_rb_fiber_scheduler_block, self, delete_from_waitq, (VALUE)&sync_waiter); rb_ensure(call_rb_fiber_scheduler_block, self, delete_from_waitq, (VALUE)&sync_waiter);
@ -335,11 +335,11 @@ do_mutex_lock(VALUE self, int interruptible_p)
.fiber = fiber .fiber = fiber
}; };
list_add_tail(&mutex->waitq, &sync_waiter.node); ccan_list_add_tail(&mutex->waitq, &sync_waiter.node);
native_sleep(th, timeout); /* release GVL */ native_sleep(th, timeout); /* release GVL */
list_del(&sync_waiter.node); ccan_list_del(&sync_waiter.node);
if (!mutex->fiber) { if (!mutex->fiber) {
mutex->fiber = fiber; mutex->fiber = fiber;
@ -427,8 +427,8 @@ rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th, rb_fiber_t *fiber)
struct sync_waiter *cur = 0, *next; struct sync_waiter *cur = 0, *next;
mutex->fiber = 0; mutex->fiber = 0;
list_for_each_safe(&mutex->waitq, cur, next, node) { ccan_list_for_each_safe(&mutex->waitq, cur, next, node) {
list_del_init(&cur->node); ccan_list_del_init(&cur->node);
if (cur->th->scheduler != Qnil && rb_fiberptr_blocking(cur->fiber) == 0) { if (cur->th->scheduler != Qnil && rb_fiberptr_blocking(cur->fiber) == 0) {
rb_fiber_scheduler_unblock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber)); rb_fiber_scheduler_unblock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber));
@ -491,7 +491,7 @@ rb_mutex_abandon_locking_mutex(rb_thread_t *th)
if (th->locking_mutex) { if (th->locking_mutex) {
rb_mutex_t *mutex = mutex_ptr(th->locking_mutex); rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
list_head_init(&mutex->waitq); ccan_list_head_init(&mutex->waitq);
th->locking_mutex = Qfalse; th->locking_mutex = Qfalse;
} }
} }
@ -506,7 +506,7 @@ rb_mutex_abandon_all(rb_mutex_t *mutexes)
mutexes = mutex->next_mutex; mutexes = mutex->next_mutex;
mutex->fiber = 0; mutex->fiber = 0;
mutex->next_mutex = 0; mutex->next_mutex = 0;
list_head_init(&mutex->waitq); ccan_list_head_init(&mutex->waitq);
} }
} }
#endif #endif
@ -631,7 +631,7 @@ void rb_mutex_allow_trap(VALUE self, int val)
#define queue_waitq(q) UNALIGNED_MEMBER_PTR(q, waitq) #define queue_waitq(q) UNALIGNED_MEMBER_PTR(q, waitq)
PACKED_STRUCT_UNALIGNED(struct rb_queue { PACKED_STRUCT_UNALIGNED(struct rb_queue {
struct list_head waitq; struct ccan_list_head waitq;
rb_serial_t fork_gen; rb_serial_t fork_gen;
const VALUE que; const VALUE que;
int num_waiting; int num_waiting;
@ -642,7 +642,7 @@ PACKED_STRUCT_UNALIGNED(struct rb_queue {
PACKED_STRUCT_UNALIGNED(struct rb_szqueue { PACKED_STRUCT_UNALIGNED(struct rb_szqueue {
struct rb_queue q; struct rb_queue q;
int num_waiting_push; int num_waiting_push;
struct list_head pushq; struct ccan_list_head pushq;
long max; long max;
}); });
@ -674,7 +674,7 @@ queue_alloc(VALUE klass)
struct rb_queue *q; struct rb_queue *q;
obj = TypedData_Make_Struct(klass, struct rb_queue, &queue_data_type, q); obj = TypedData_Make_Struct(klass, struct rb_queue, &queue_data_type, q);
list_head_init(queue_waitq(q)); ccan_list_head_init(queue_waitq(q));
return obj; return obj;
} }
@ -688,7 +688,7 @@ queue_fork_check(struct rb_queue *q)
} }
/* forked children can't reach into parent thread stacks */ /* forked children can't reach into parent thread stacks */
q->fork_gen = fork_gen; q->fork_gen = fork_gen;
list_head_init(queue_waitq(q)); ccan_list_head_init(queue_waitq(q));
q->num_waiting = 0; q->num_waiting = 0;
return 1; return 1;
} }
@ -732,8 +732,8 @@ szqueue_alloc(VALUE klass)
struct rb_szqueue *sq; struct rb_szqueue *sq;
VALUE obj = TypedData_Make_Struct(klass, struct rb_szqueue, VALUE obj = TypedData_Make_Struct(klass, struct rb_szqueue,
&szqueue_data_type, sq); &szqueue_data_type, sq);
list_head_init(szqueue_waitq(sq)); ccan_list_head_init(szqueue_waitq(sq));
list_head_init(szqueue_pushq(sq)); ccan_list_head_init(szqueue_pushq(sq));
return obj; return obj;
} }
@ -744,7 +744,7 @@ szqueue_ptr(VALUE obj)
TypedData_Get_Struct(obj, struct rb_szqueue, &szqueue_data_type, sq); TypedData_Get_Struct(obj, struct rb_szqueue, &szqueue_data_type, sq);
if (queue_fork_check(&sq->q)) { if (queue_fork_check(&sq->q)) {
list_head_init(szqueue_pushq(sq)); ccan_list_head_init(szqueue_pushq(sq));
sq->num_waiting_push = 0; sq->num_waiting_push = 0;
} }
@ -869,7 +869,7 @@ rb_queue_initialize(int argc, VALUE *argv, VALUE self)
initial = rb_to_array(initial); initial = rb_to_array(initial);
} }
RB_OBJ_WRITE(self, &q->que, ary_buf_new()); RB_OBJ_WRITE(self, &q->que, ary_buf_new());
list_head_init(queue_waitq(q)); ccan_list_head_init(queue_waitq(q));
if (argc == 1) { if (argc == 1) {
rb_ary_concat(q->que, initial); rb_ary_concat(q->que, initial);
} }
@ -983,7 +983,7 @@ queue_sleep_done(VALUE p)
{ {
struct queue_waiter *qw = (struct queue_waiter *)p; struct queue_waiter *qw = (struct queue_waiter *)p;
list_del(&qw->w.node); ccan_list_del(&qw->w.node);
qw->as.q->num_waiting--; qw->as.q->num_waiting--;
return Qfalse; return Qfalse;
@ -994,7 +994,7 @@ szqueue_sleep_done(VALUE p)
{ {
struct queue_waiter *qw = (struct queue_waiter *)p; struct queue_waiter *qw = (struct queue_waiter *)p;
list_del(&qw->w.node); ccan_list_del(&qw->w.node);
qw->as.sq->num_waiting_push--; qw->as.sq->num_waiting_push--;
return Qfalse; return Qfalse;
@ -1023,9 +1023,9 @@ queue_do_pop(VALUE self, struct rb_queue *q, int should_block)
.as = {.q = q} .as = {.q = q}
}; };
struct list_head *waitq = queue_waitq(q); struct ccan_list_head *waitq = queue_waitq(q);
list_add_tail(waitq, &queue_waiter.w.node); ccan_list_add_tail(waitq, &queue_waiter.w.node);
queue_waiter.as.q->num_waiting++; queue_waiter.as.q->num_waiting++;
rb_ensure(queue_sleep, self, queue_sleep_done, (VALUE)&queue_waiter); rb_ensure(queue_sleep, self, queue_sleep_done, (VALUE)&queue_waiter);
@ -1152,8 +1152,8 @@ rb_szqueue_initialize(VALUE self, VALUE vmax)
} }
RB_OBJ_WRITE(self, &sq->q.que, ary_buf_new()); RB_OBJ_WRITE(self, &sq->q.que, ary_buf_new());
list_head_init(szqueue_waitq(sq)); ccan_list_head_init(szqueue_waitq(sq));
list_head_init(szqueue_pushq(sq)); ccan_list_head_init(szqueue_pushq(sq));
sq->max = max; sq->max = max;
return self; return self;
@ -1266,9 +1266,9 @@ rb_szqueue_push(int argc, VALUE *argv, VALUE self)
.as = {.sq = sq} .as = {.sq = sq}
}; };
struct list_head *pushq = szqueue_pushq(sq); struct ccan_list_head *pushq = szqueue_pushq(sq);
list_add_tail(pushq, &queue_waiter.w.node); ccan_list_add_tail(pushq, &queue_waiter.w.node);
sq->num_waiting_push++; sq->num_waiting_push++;
rb_ensure(queue_sleep, self, szqueue_sleep_done, (VALUE)&queue_waiter); rb_ensure(queue_sleep, self, szqueue_sleep_done, (VALUE)&queue_waiter);
@ -1381,7 +1381,7 @@ rb_szqueue_empty_p(VALUE self)
/* ConditionalVariable */ /* ConditionalVariable */
struct rb_condvar { struct rb_condvar {
struct list_head waitq; struct ccan_list_head waitq;
rb_serial_t fork_gen; rb_serial_t fork_gen;
}; };
@ -1436,7 +1436,7 @@ condvar_ptr(VALUE self)
/* forked children can't reach into parent thread stacks */ /* forked children can't reach into parent thread stacks */
if (cv->fork_gen != fork_gen) { if (cv->fork_gen != fork_gen) {
cv->fork_gen = fork_gen; cv->fork_gen = fork_gen;
list_head_init(&cv->waitq); ccan_list_head_init(&cv->waitq);
} }
return cv; return cv;
@ -1449,7 +1449,7 @@ condvar_alloc(VALUE klass)
VALUE obj; VALUE obj;
obj = TypedData_Make_Struct(klass, struct rb_condvar, &cv_data_type, cv); obj = TypedData_Make_Struct(klass, struct rb_condvar, &cv_data_type, cv);
list_head_init(&cv->waitq); ccan_list_head_init(&cv->waitq);
return obj; return obj;
} }
@ -1464,7 +1464,7 @@ static VALUE
rb_condvar_initialize(VALUE self) rb_condvar_initialize(VALUE self)
{ {
struct rb_condvar *cv = condvar_ptr(self); struct rb_condvar *cv = condvar_ptr(self);
list_head_init(&cv->waitq); ccan_list_head_init(&cv->waitq);
return self; return self;
} }
@ -1510,7 +1510,7 @@ rb_condvar_wait(int argc, VALUE *argv, VALUE self)
.fiber = ec->fiber_ptr .fiber = ec->fiber_ptr
}; };
list_add_tail(&cv->waitq, &sync_waiter.node); ccan_list_add_tail(&cv->waitq, &sync_waiter.node);
return rb_ensure(do_sleep, (VALUE)&args, delete_from_waitq, (VALUE)&sync_waiter); return rb_ensure(do_sleep, (VALUE)&args, delete_from_waitq, (VALUE)&sync_waiter);
} }

View File

@ -2104,7 +2104,7 @@ autoload_data(VALUE mod, ID id)
} }
struct autoload_const { struct autoload_const {
struct list_node cnode; /* <=> autoload_data_i.constants */ struct ccan_list_node cnode; /* <=> autoload_data_i.constants */
VALUE mod; VALUE mod;
VALUE ad; /* autoload_data_i */ VALUE ad; /* autoload_data_i */
VALUE value; VALUE value;
@ -2119,14 +2119,14 @@ struct autoload_state {
struct autoload_const *ac; struct autoload_const *ac;
VALUE result; VALUE result;
VALUE thread; VALUE thread;
struct list_head waitq; struct ccan_list_head waitq;
}; };
struct autoload_data_i { struct autoload_data_i {
VALUE feature; VALUE feature;
struct autoload_state *state; /* points to on-stack struct */ struct autoload_state *state; /* points to on-stack struct */
rb_serial_t fork_gen; rb_serial_t fork_gen;
struct list_head constants; /* <=> autoload_const.cnode */ struct ccan_list_head constants; /* <=> autoload_const.cnode */
}; };
static void static void
@ -2144,7 +2144,7 @@ autoload_i_mark(void *ptr)
rb_gc_mark_movable(p->feature); rb_gc_mark_movable(p->feature);
/* allow GC to free us if no modules refer to this via autoload_const.ad */ /* allow GC to free us if no modules refer to this via autoload_const.ad */
if (list_empty(&p->constants)) { if (ccan_list_empty(&p->constants)) {
rb_hash_delete(autoload_featuremap, p->feature); rb_hash_delete(autoload_featuremap, p->feature);
} }
} }
@ -2155,7 +2155,7 @@ autoload_i_free(void *ptr)
struct autoload_data_i *p = ptr; struct autoload_data_i *p = ptr;
/* we may leak some memory at VM shutdown time, no big deal */ /* we may leak some memory at VM shutdown time, no big deal */
if (list_empty(&p->constants)) { if (ccan_list_empty(&p->constants)) {
xfree(p); xfree(p);
} }
} }
@ -2198,7 +2198,7 @@ static void
autoload_c_free(void *ptr) autoload_c_free(void *ptr)
{ {
struct autoload_const *ac = ptr; struct autoload_const *ac = ptr;
list_del(&ac->cnode); ccan_list_del(&ac->cnode);
xfree(ac); xfree(ac);
} }
@ -2288,7 +2288,7 @@ rb_autoload_str(VALUE mod, ID id, VALUE file)
&autoload_data_i_type, ele); &autoload_data_i_type, ele);
ele->feature = file; ele->feature = file;
ele->state = 0; ele->state = 0;
list_head_init(&ele->constants); ccan_list_head_init(&ele->constants);
rb_hash_aset(autoload_featuremap, file, ad); rb_hash_aset(autoload_featuremap, file, ad);
} }
else { else {
@ -2304,7 +2304,7 @@ rb_autoload_str(VALUE mod, ID id, VALUE file)
ac->value = Qundef; ac->value = Qundef;
ac->flag = CONST_PUBLIC; ac->flag = CONST_PUBLIC;
ac->ad = ad; ac->ad = ad;
list_add_tail(&ele->constants, &ac->cnode); ccan_list_add_tail(&ele->constants, &ac->cnode);
st_insert(tbl, (st_data_t)id, (st_data_t)acv); st_insert(tbl, (st_data_t)id, (st_data_t)acv);
} }
} }
@ -2325,7 +2325,7 @@ autoload_delete(VALUE mod, ID id)
ele = get_autoload_data((VALUE)load, &ac); ele = get_autoload_data((VALUE)load, &ac);
VM_ASSERT(ele); VM_ASSERT(ele);
if (ele) { if (ele) {
VM_ASSERT(!list_empty(&ele->constants)); VM_ASSERT(!ccan_list_empty(&ele->constants));
} }
/* /*
@ -2333,7 +2333,7 @@ autoload_delete(VALUE mod, ID id)
* with parallel autoload. Using list_del_init here so list_del * with parallel autoload. Using list_del_init here so list_del
* works in autoload_c_free * works in autoload_c_free
*/ */
list_del_init(&ac->cnode); ccan_list_del_init(&ac->cnode);
if (tbl->num_entries == 0) { if (tbl->num_entries == 0) {
n = autoload; n = autoload;
@ -2480,7 +2480,7 @@ autoload_reset(VALUE arg)
if (RTEST(state->result)) { if (RTEST(state->result)) {
struct autoload_const *next; struct autoload_const *next;
list_for_each_safe(&ele->constants, ac, next, cnode) { ccan_list_for_each_safe(&ele->constants, ac, next, cnode) {
if (ac->value != Qundef) { if (ac->value != Qundef) {
autoload_const_set(ac); autoload_const_set(ac);
} }
@ -2491,11 +2491,11 @@ autoload_reset(VALUE arg)
if (need_wakeups) { if (need_wakeups) {
struct autoload_state *cur = 0, *nxt; struct autoload_state *cur = 0, *nxt;
list_for_each_safe(&state->waitq, cur, nxt, waitq.n) { ccan_list_for_each_safe(&state->waitq, cur, nxt, waitq.n) {
VALUE th = cur->thread; VALUE th = cur->thread;
cur->thread = Qfalse; cur->thread = Qfalse;
list_del_init(&cur->waitq.n); /* idempotent */ ccan_list_del_init(&cur->waitq.n); /* idempotent */
/* /*
* cur is stored on the stack of cur->waiting_th, * cur is stored on the stack of cur->waiting_th,
@ -2530,7 +2530,7 @@ autoload_sleep_done(VALUE arg)
struct autoload_state *state = (struct autoload_state *)arg; struct autoload_state *state = (struct autoload_state *)arg;
if (state->thread != Qfalse && rb_thread_to_be_killed(state->thread)) { if (state->thread != Qfalse && rb_thread_to_be_killed(state->thread)) {
list_del(&state->waitq.n); /* idempotent after list_del_init */ ccan_list_del(&state->waitq.n); /* idempotent after list_del_init */
} }
return Qfalse; return Qfalse;
@ -2575,13 +2575,13 @@ rb_autoload_load(VALUE mod, ID id)
* autoload_reset will wake up any threads added to this * autoload_reset will wake up any threads added to this
* if and only if the GVL is released during autoload_require * if and only if the GVL is released during autoload_require
*/ */
list_head_init(&state.waitq); ccan_list_head_init(&state.waitq);
} }
else if (state.thread == ele->state->thread) { else if (state.thread == ele->state->thread) {
return Qfalse; return Qfalse;
} }
else { else {
list_add_tail(&ele->state->waitq, &state.waitq.n); ccan_list_add_tail(&ele->state->waitq, &state.waitq.n);
rb_ensure(autoload_sleep, (VALUE)&state, rb_ensure(autoload_sleep, (VALUE)&state,
autoload_sleep_done, (VALUE)&state); autoload_sleep_done, (VALUE)&state);

14
vm.c
View File

@ -2634,12 +2634,12 @@ rb_vm_each_stack_value(void *ptr, void (*cb)(VALUE, void*), void *ctx)
if (ptr) { if (ptr) {
rb_vm_t *vm = ptr; rb_vm_t *vm = ptr;
rb_ractor_t *r = 0; rb_ractor_t *r = 0;
list_for_each(&vm->ractor.set, r, vmlr_node) { ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) || VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
rb_ractor_status_p(r, ractor_running)); rb_ractor_status_p(r, ractor_running));
if (r->threads.cnt > 0) { if (r->threads.cnt > 0) {
rb_thread_t *th = 0; rb_thread_t *th = 0;
list_for_each(&r->threads.set, th, lt_node) { ccan_list_for_each(&r->threads.set, th, lt_node) {
VM_ASSERT(th != NULL); VM_ASSERT(th != NULL);
rb_execution_context_t * ec = th->ec; rb_execution_context_t * ec = th->ec;
if (ec->vm_stack) { if (ec->vm_stack) {
@ -2676,7 +2676,7 @@ rb_vm_mark(void *ptr)
long i, len; long i, len;
const VALUE *obj_ary; const VALUE *obj_ary;
list_for_each(&vm->ractor.set, r, vmlr_node) { ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
// ractor.set only contains blocking or running ractors // ractor.set only contains blocking or running ractors
VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) || VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
rb_ractor_status_p(r, ractor_running)); rb_ractor_status_p(r, ractor_running));
@ -2808,10 +2808,10 @@ ruby_vm_destruct(rb_vm_t *vm)
return 0; return 0;
} }
size_t rb_vm_memsize_waiting_list(struct list_head *waiting_list); // process.c size_t rb_vm_memsize_waiting_list(struct ccan_list_head *waiting_list); // process.c
size_t rb_vm_memsize_waiting_fds(struct list_head *waiting_fds); // thread.c size_t rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds); // thread.c
size_t rb_vm_memsize_postponed_job_buffer(void); // vm_trace.c size_t rb_vm_memsize_postponed_job_buffer(void); // vm_trace.c
size_t rb_vm_memsize_workqueue(struct list_head *workqueue); // vm_trace.c size_t rb_vm_memsize_workqueue(struct ccan_list_head *workqueue); // vm_trace.c
// Used for VM memsize reporting. Returns the size of the at_exit list by // Used for VM memsize reporting. Returns the size of the at_exit list by
// looping through the linked list and adding up the size of the structs. // looping through the linked list and adding up the size of the structs.
@ -2862,7 +2862,7 @@ vm_memsize(const void *ptr)
); );
// TODO // TODO
// struct { struct list_head set; } ractor; // struct { struct ccan_list_head set; } ractor;
// void *main_altstack; #ifdef USE_SIGALTSTACK // void *main_altstack; #ifdef USE_SIGALTSTACK
// struct rb_objspace *objspace; // struct rb_objspace *objspace;
} }

View File

@ -628,7 +628,7 @@ typedef struct rb_vm_struct {
VALUE self; VALUE self;
struct { struct {
struct list_head set; struct ccan_list_head set;
unsigned int cnt; unsigned int cnt;
unsigned int blocking_cnt; unsigned int blocking_cnt;
@ -658,9 +658,9 @@ typedef struct rb_vm_struct {
rb_serial_t fork_gen; rb_serial_t fork_gen;
rb_nativethread_lock_t waitpid_lock; rb_nativethread_lock_t waitpid_lock;
struct list_head waiting_pids; /* PID > 0: <=> struct waitpid_state */ struct ccan_list_head waiting_pids; /* PID > 0: <=> struct waitpid_state */
struct list_head waiting_grps; /* PID <= 0: <=> struct waitpid_state */ struct ccan_list_head waiting_grps; /* PID <= 0: <=> struct waitpid_state */
struct list_head waiting_fds; /* <=> struct waiting_fd */ struct ccan_list_head waiting_fds; /* <=> struct waiting_fd */
/* set in single-threaded processes only: */ /* set in single-threaded processes only: */
volatile int ubf_async_safe; volatile int ubf_async_safe;
@ -701,7 +701,7 @@ typedef struct rb_vm_struct {
int src_encoding_index; int src_encoding_index;
/* workqueue (thread-safe, NOT async-signal-safe) */ /* workqueue (thread-safe, NOT async-signal-safe) */
struct list_head workqueue; /* <=> rb_workqueue_job.jnode */ struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
rb_nativethread_lock_t workqueue_lock; rb_nativethread_lock_t workqueue_lock;
VALUE orig_progname, progname; VALUE orig_progname, progname;
@ -998,7 +998,7 @@ typedef struct rb_ractor_struct rb_ractor_t;
#endif #endif
typedef struct rb_thread_struct { typedef struct rb_thread_struct {
struct list_node lt_node; // managed by a ractor struct ccan_list_node lt_node; // managed by a ractor
VALUE self; VALUE self;
rb_ractor_t *ractor; rb_ractor_t *ractor;
rb_vm_t *vm; rb_vm_t *vm;
@ -1769,11 +1769,11 @@ void rb_thread_wakeup_timer_thread(int);
static inline void static inline void
rb_vm_living_threads_init(rb_vm_t *vm) rb_vm_living_threads_init(rb_vm_t *vm)
{ {
list_head_init(&vm->waiting_fds); ccan_list_head_init(&vm->waiting_fds);
list_head_init(&vm->waiting_pids); ccan_list_head_init(&vm->waiting_pids);
list_head_init(&vm->workqueue); ccan_list_head_init(&vm->workqueue);
list_head_init(&vm->waiting_grps); ccan_list_head_init(&vm->waiting_grps);
list_head_init(&vm->ractor.set); ccan_list_head_init(&vm->ractor.set);
} }
typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE); typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);

View File

@ -1188,7 +1188,7 @@ rb_vmdebug_stack_dump_all_threads(void)
rb_ractor_t *r = GET_RACTOR(); rb_ractor_t *r = GET_RACTOR();
// TODO: now it only shows current ractor // TODO: now it only shows current ractor
list_for_each(&r->threads.set, th, lt_node) { ccan_list_for_each(&r->threads.set, th, lt_node) {
#ifdef NON_SCALAR_THREAD_ID #ifdef NON_SCALAR_THREAD_ID
rb_thread_id_string_t buf; rb_thread_id_string_t buf;
ruby_fill_thread_id_string(th->thread_id, buf); ruby_fill_thread_id_string(th->thread_id, buf);

View File

@ -254,7 +254,7 @@ rb_vm_barrier(void)
// send signal // send signal
rb_ractor_t *r = 0; rb_ractor_t *r = 0;
list_for_each(&vm->ractor.set, r, vmlr_node) { ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
if (r != cr) { if (r != cr) {
rb_ractor_vm_barrier_interrupt_running_thread(r); rb_ractor_vm_barrier_interrupt_running_thread(r);
} }
@ -272,7 +272,7 @@ rb_vm_barrier(void)
vm->ractor.sync.barrier_waiting = false; vm->ractor.sync.barrier_waiting = false;
vm->ractor.sync.barrier_cnt++; vm->ractor.sync.barrier_cnt++;
list_for_each(&vm->ractor.set, r, vmlr_node) { ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
rb_native_cond_signal(&r->barrier_wait_cond); rb_native_cond_signal(&r->barrier_wait_cond);
} }
} }

View File

@ -1594,19 +1594,19 @@ typedef struct rb_postponed_job_struct {
#define MAX_POSTPONED_JOB_SPECIAL_ADDITION 24 #define MAX_POSTPONED_JOB_SPECIAL_ADDITION 24
struct rb_workqueue_job { struct rb_workqueue_job {
struct list_node jnode; /* <=> vm->workqueue */ struct ccan_list_node jnode; /* <=> vm->workqueue */
rb_postponed_job_t job; rb_postponed_job_t job;
}; };
// Used for VM memsize reporting. Returns the size of a list of rb_workqueue_job // Used for VM memsize reporting. Returns the size of a list of rb_workqueue_job
// structs. Defined here because the struct definition lives here as well. // structs. Defined here because the struct definition lives here as well.
size_t size_t
rb_vm_memsize_workqueue(struct list_head *workqueue) rb_vm_memsize_workqueue(struct ccan_list_head *workqueue)
{ {
struct rb_workqueue_job *work = 0; struct rb_workqueue_job *work = 0;
size_t size = 0; size_t size = 0;
list_for_each(workqueue, work, jnode) { ccan_list_for_each(workqueue, work, jnode) {
size += sizeof(struct rb_workqueue_job); size += sizeof(struct rb_workqueue_job);
} }
@ -1732,7 +1732,7 @@ rb_workqueue_register(unsigned flags, rb_postponed_job_func_t func, void *data)
wq_job->job.data = data; wq_job->job.data = data;
rb_nativethread_lock_lock(&vm->workqueue_lock); rb_nativethread_lock_lock(&vm->workqueue_lock);
list_add_tail(&vm->workqueue, &wq_job->jnode); ccan_list_add_tail(&vm->workqueue, &wq_job->jnode);
rb_nativethread_lock_unlock(&vm->workqueue_lock); rb_nativethread_lock_unlock(&vm->workqueue_lock);
// TODO: current implementation affects only main ractor // TODO: current implementation affects only main ractor
@ -1748,12 +1748,12 @@ rb_postponed_job_flush(rb_vm_t *vm)
const rb_atomic_t block_mask = POSTPONED_JOB_INTERRUPT_MASK|TRAP_INTERRUPT_MASK; const rb_atomic_t block_mask = POSTPONED_JOB_INTERRUPT_MASK|TRAP_INTERRUPT_MASK;
volatile rb_atomic_t saved_mask = ec->interrupt_mask & block_mask; volatile rb_atomic_t saved_mask = ec->interrupt_mask & block_mask;
VALUE volatile saved_errno = ec->errinfo; VALUE volatile saved_errno = ec->errinfo;
struct list_head tmp; struct ccan_list_head tmp;
list_head_init(&tmp); ccan_list_head_init(&tmp);
rb_nativethread_lock_lock(&vm->workqueue_lock); rb_nativethread_lock_lock(&vm->workqueue_lock);
list_append_list(&tmp, &vm->workqueue); ccan_list_append_list(&tmp, &vm->workqueue);
rb_nativethread_lock_unlock(&vm->workqueue_lock); rb_nativethread_lock_unlock(&vm->workqueue_lock);
ec->errinfo = Qnil; ec->errinfo = Qnil;
@ -1771,7 +1771,7 @@ rb_postponed_job_flush(rb_vm_t *vm)
(*pjob->func)(pjob->data); (*pjob->func)(pjob->data);
} }
} }
while ((wq_job = list_pop(&tmp, struct rb_workqueue_job, jnode))) { while ((wq_job = ccan_list_pop(&tmp, struct rb_workqueue_job, jnode))) {
rb_postponed_job_t pjob = wq_job->job; rb_postponed_job_t pjob = wq_job->job;
free(wq_job); free(wq_job);
@ -1785,9 +1785,9 @@ rb_postponed_job_flush(rb_vm_t *vm)
ec->errinfo = saved_errno; ec->errinfo = saved_errno;
/* don't leak memory if a job threw an exception */ /* don't leak memory if a job threw an exception */
if (!list_empty(&tmp)) { if (!ccan_list_empty(&tmp)) {
rb_nativethread_lock_lock(&vm->workqueue_lock); rb_nativethread_lock_lock(&vm->workqueue_lock);
list_prepend_list(&vm->workqueue, &tmp); ccan_list_prepend_list(&vm->workqueue, &tmp);
rb_nativethread_lock_unlock(&vm->workqueue_lock); rb_nativethread_lock_unlock(&vm->workqueue_lock);
RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(GET_EC()); RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(GET_EC());