Fix thrashing of major GC when size pool is small
If a size pooll is small, then `min_free_slots < heap_init_slots` is true. This means that min_free_slots will be set to heap_init_slots. This causes `swept_slots < min_free_slots` to be true in a later if statement. The if statement could trigger a major GC which could cause major GC thrashing.
This commit is contained in:
parent
c72a748b27
commit
80e56d1438
Notes:
git
2022-12-20 16:33:14 +00:00
18
gc.c
18
gc.c
@ -5844,18 +5844,13 @@ gc_sweep_finish_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
|
|||||||
size_t swept_slots = size_pool->freed_slots + size_pool->empty_slots;
|
size_t swept_slots = size_pool->freed_slots + size_pool->empty_slots;
|
||||||
|
|
||||||
size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
|
size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
|
||||||
/* Some size pools may have very few pages (or even no pages). These size pools
|
|
||||||
* should still have allocatable pages. */
|
|
||||||
if (min_free_slots < gc_params.heap_init_slots) {
|
|
||||||
min_free_slots = gc_params.heap_init_slots;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If we don't have enough slots and we have pages on the tomb heap, move
|
/* If we don't have enough slots and we have pages on the tomb heap, move
|
||||||
* pages from the tomb heap to the eden heap. This may prevent page
|
* pages from the tomb heap to the eden heap. This may prevent page
|
||||||
* creation thrashing (frequently allocating and deallocting pages) and
|
* creation thrashing (frequently allocating and deallocting pages) and
|
||||||
* GC thrashing (running GC more frequently than required). */
|
* GC thrashing (running GC more frequently than required). */
|
||||||
struct heap_page *resurrected_page;
|
struct heap_page *resurrected_page;
|
||||||
while (swept_slots < min_free_slots &&
|
while ((swept_slots < min_free_slots || swept_slots < gc_params.heap_init_slots) &&
|
||||||
(resurrected_page = heap_page_resurrect(objspace, size_pool))) {
|
(resurrected_page = heap_page_resurrect(objspace, size_pool))) {
|
||||||
swept_slots += resurrected_page->free_slots;
|
swept_slots += resurrected_page->free_slots;
|
||||||
|
|
||||||
@ -5863,6 +5858,17 @@ gc_sweep_finish_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
|
|||||||
heap_add_freepage(heap, resurrected_page);
|
heap_add_freepage(heap, resurrected_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Some size pools may have very few pages (or even no pages). These size pools
|
||||||
|
* should still have allocatable pages. */
|
||||||
|
if (min_free_slots < gc_params.heap_init_slots && swept_slots < gc_params.heap_init_slots) {
|
||||||
|
int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
|
||||||
|
size_t extra_slots = gc_params.heap_init_slots - swept_slots;
|
||||||
|
size_t extend_page_count = CEILDIV(extra_slots * multiple, HEAP_PAGE_OBJ_LIMIT);
|
||||||
|
if (extend_page_count > size_pool->allocatable_pages) {
|
||||||
|
size_pool_allocatable_pages_set(objspace, size_pool, extend_page_count);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (swept_slots < min_free_slots) {
|
if (swept_slots < min_free_slots) {
|
||||||
bool grow_heap = is_full_marking(objspace);
|
bool grow_heap = is_full_marking(objspace);
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user