MDEV-36646: innodb_buffer_pool_size change aborted

A statement SET GLOBAL innodb_buffer_pool_size=...
could fail for no good reason when the buffer pool contains many
pages that can actually be evicted.

buf_flush_LRU_list_batch(): Keep evicting as long as the buffer pool
is being shrunk, for at most innodb_lru_scan_depth extra blocks.
Disregard the flush limit for pages that are marked as freed in files.

buf_flush_LRU_to_withdraw(): Update the to_withdraw target during
buf_flush_LRU_list_batch().

buf_pool_t::will_be_withdrawn(): Allow also ptr=nullptr (the condition
will not hold for it).

This fixes a regression that was introduced in
commit b6923420f326ac030e4f3ef89a2acddb45eccb30 (MDEV-29445)
and caught by the test innodb.temp_truncate_freed in MariaDB Server 11.4.

Tested by: Thirunarayanan Balathandayuthapani
Reviewed by: Thirunarayanan Balathandayuthapani
This commit is contained in:
Marko Mäkelä 2025-04-23 15:42:12 +03:00
parent 1a044437a3
commit f1a8b7fe95
3 changed files with 36 additions and 8 deletions

View File

@ -1696,8 +1696,7 @@ ATTRIBUTE_COLD buf_pool_t::shrink_status buf_pool_t::shrink(size_t size)
continue; continue;
} }
if (UNIV_LIKELY_NULL(b->zip.data) && if (UNIV_UNLIKELY(will_be_withdrawn(b->zip.data, size)))
will_be_withdrawn(b->zip.data, size))
{ {
block= buf_buddy_shrink(b, block); block= buf_buddy_shrink(b, block);
ut_ad(mach_read_from_4(b->zip.data + FIL_PAGE_OFFSET) == id.page_no()); ut_ad(mach_read_from_4(b->zip.data + FIL_PAGE_OFFSET) == id.page_no());

View File

@ -1212,6 +1212,21 @@ static void buf_flush_discard_page(buf_page_t *bpage) noexcept
buf_LRU_free_page(bpage, true); buf_LRU_free_page(bpage, true);
} }
/** Adjust to_withdraw during buf_pool_t::shrink() */
ATTRIBUTE_COLD static size_t buf_flush_LRU_to_withdraw(size_t to_withdraw,
const buf_page_t &bpage)
noexcept
{
mysql_mutex_assert_owner(&buf_pool.mutex);
if (!buf_pool.is_shrinking())
return 0;
const size_t size{buf_pool.size_in_bytes_requested};
if (buf_pool.will_be_withdrawn(bpage.frame, size) ||
buf_pool.will_be_withdrawn(bpage.zip.data, size))
to_withdraw--;
return to_withdraw;
}
/** Flush dirty blocks from the end buf_pool.LRU, /** Flush dirty blocks from the end buf_pool.LRU,
and move clean blocks to buf_pool.free. and move clean blocks to buf_pool.free.
@param max maximum number of blocks to flush @param max maximum number of blocks to flush
@ -1222,7 +1237,9 @@ static void buf_flush_LRU_list_batch(ulint max, flush_counters_t *n,
{ {
size_t scanned= 0; size_t scanned= 0;
mysql_mutex_assert_owner(&buf_pool.mutex); mysql_mutex_assert_owner(&buf_pool.mutex);
size_t free_limit{buf_pool.LRU_scan_depth + to_withdraw}; size_t free_limit{buf_pool.LRU_scan_depth};
if (UNIV_UNLIKELY(to_withdraw > free_limit))
to_withdraw= free_limit;
const auto neighbors= UT_LIST_GET_LEN(buf_pool.LRU) < BUF_LRU_OLD_MIN_LEN const auto neighbors= UT_LIST_GET_LEN(buf_pool.LRU) < BUF_LRU_OLD_MIN_LEN
? 0 : buf_pool.flush_neighbors; ? 0 : buf_pool.flush_neighbors;
fil_space_t *space= nullptr; fil_space_t *space= nullptr;
@ -1246,6 +1263,7 @@ static void buf_flush_LRU_list_batch(ulint max, flush_counters_t *n,
bpage && bpage &&
((UT_LIST_GET_LEN(buf_pool.LRU) > buf_lru_min_len && ((UT_LIST_GET_LEN(buf_pool.LRU) > buf_lru_min_len &&
UT_LIST_GET_LEN(buf_pool.free) < free_limit) || UT_LIST_GET_LEN(buf_pool.free) < free_limit) ||
to_withdraw ||
recv_recovery_is_on()); recv_recovery_is_on());
++scanned, bpage= buf_pool.lru_hp.get()) ++scanned, bpage= buf_pool.lru_hp.get())
{ {
@ -1261,6 +1279,8 @@ static void buf_flush_LRU_list_batch(ulint max, flush_counters_t *n,
if (state != buf_page_t::FREED && if (state != buf_page_t::FREED &&
(state >= buf_page_t::READ_FIX || (~buf_page_t::LRU_MASK & state))) (state >= buf_page_t::READ_FIX || (~buf_page_t::LRU_MASK & state)))
continue; continue;
if (UNIV_UNLIKELY(to_withdraw != 0))
to_withdraw= buf_flush_LRU_to_withdraw(to_withdraw, *bpage);
buf_LRU_free_page(bpage, true); buf_LRU_free_page(bpage, true);
++n->evicted; ++n->evicted;
if (UNIV_LIKELY(scanned & 31)) if (UNIV_LIKELY(scanned & 31))
@ -1332,23 +1352,32 @@ static void buf_flush_LRU_list_batch(ulint max, flush_counters_t *n,
continue; continue;
} }
if (state < buf_page_t::UNFIXED)
goto flush;
if (n->flushed >= max && !recv_recovery_is_on()) if (n->flushed >= max && !recv_recovery_is_on())
{ {
bpage->lock.u_unlock(true); bpage->lock.u_unlock(true);
break; break;
} }
if (neighbors && space->is_rotational() && if (neighbors && space->is_rotational() && UNIV_LIKELY(!to_withdraw) &&
/* Skip neighbourhood flush from LRU list if we haven't yet reached /* Skip neighbourhood flush from LRU list if we haven't yet reached
half of the free page target. */ half of the free page target. */
UT_LIST_GET_LEN(buf_pool.free) * 2 >= free_limit) UT_LIST_GET_LEN(buf_pool.free) * 2 >= free_limit)
n->flushed+= buf_flush_try_neighbors(space, page_id, bpage, n->flushed+= buf_flush_try_neighbors(space, page_id, bpage,
neighbors == 1, neighbors == 1,
n->flushed, max); n->flushed, max);
else if (bpage->flush(space)) else
{
flush:
if (UNIV_UNLIKELY(to_withdraw != 0))
to_withdraw= buf_flush_LRU_to_withdraw(to_withdraw, *bpage);
if (bpage->flush(space))
++n->flushed; ++n->flushed;
else else
continue; continue;
}
goto reacquire_mutex; goto reacquire_mutex;
} }

View File

@ -1288,7 +1288,7 @@ public:
bool will_be_withdrawn(const byte *ptr, size_t size) const noexcept bool will_be_withdrawn(const byte *ptr, size_t size) const noexcept
{ {
const char *p= reinterpret_cast<const char*>(ptr); const char *p= reinterpret_cast<const char*>(ptr);
ut_ad(p >= memory); ut_ad(!p || p >= memory);
ut_ad(p < memory + size_in_bytes_max); ut_ad(p < memory + size_in_bytes_max);
return p >= memory + size; return p >= memory + size;
} }