MDEV-27700 ASAN: Heap_use_after_free in btr_search_drop_page_hash_index()

Reason:
=======
Race condition between btr_search_drop_hash_index() and
btr_search_lazy_free(). One thread does resizing of buffer pool
and clears the ahi on all pages in the buffer pool, frees the
index and table while removing the last reference. At the same time,
other thread access index->heap in btr_search_drop_hash_index().

Solution:
=========
Acquire the respective ahi latch before checking index->freed()

btr_search_drop_page_hash_index(): Added new parameter to indicate
that drop ahi entries only if the index is marked as freed

btr_search_check_marked_free_index(): Acquire all ahi latches and
return true if the index was freed
This commit is contained in:
Thirunarayanan Balathandayuthapani 2022-08-16 15:31:49 +05:30
parent fd0cd4801a
commit c7f8cfc9e7
5 changed files with 63 additions and 22 deletions

View File

@ -721,8 +721,9 @@ void btr_page_free(dict_index_t* index, buf_block_t* block, mtr_t* mtr,
bool blob)
{
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
#ifdef BTR_CUR_HASH_ADAPT
if (block->index && !block->index->freed()) {
#if defined BTR_CUR_HASH_ADAPT && defined UNIV_DEBUG
if (block->index
&& !btr_search_check_marked_free_index(block)) {
ut_ad(!blob);
ut_ad(page_is_leaf(block->frame));
}

View File

@ -1102,8 +1102,11 @@ fail_and_release_page:
index page for which we know that
block->buf_fix_count == 0 or it is an index page which
has already been removed from the buf_pool->page_hash
i.e.: it is in state BUF_BLOCK_REMOVE_HASH */
void btr_search_drop_page_hash_index(buf_block_t* block)
i.e.: it is in state BUF_BLOCK_REMOVE_HASH
@param[in] garbage_collect drop ahi only if the index is marked
as freed */
void btr_search_drop_page_hash_index(buf_block_t* block,
bool garbage_collect)
{
ulint n_fields;
ulint n_bytes;
@ -1149,13 +1152,21 @@ retry:
% btr_ahi_parts;
latch = btr_search_latches[ahi_slot];
rw_lock_s_lock(latch);
dict_index_t* index = block->index;
bool is_freed = index && index->freed();
if (is_freed) {
rw_lock_s_unlock(latch);
rw_lock_x_lock(latch);
} else {
rw_lock_s_lock(latch);
if (index != block->index) {
rw_lock_x_unlock(latch);
goto retry;
}
} else if (garbage_collect) {
rw_lock_s_unlock(latch);
return;
}
assert_block_ahi_valid(block);
@ -2220,5 +2231,22 @@ btr_search_validate()
return(true);
}
#ifdef UNIV_DEBUG
bool btr_search_check_marked_free_index(const buf_block_t *block)
{
const index_id_t index_id= btr_page_get_index_id(block->frame);
rw_lock_t *ahi_latch= btr_get_search_latch(
index_id, block->page.id.space());
rw_lock_s_lock(ahi_latch);
bool is_freed= block->index && block->index->freed();
rw_lock_s_unlock(ahi_latch);
return is_freed;
}
#endif /* UNIV_DEBUG */
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
#endif /* BTR_CUR_HASH_ADAPT */

View File

@ -3935,18 +3935,14 @@ static void buf_defer_drop_ahi(buf_block_t *block, mtr_memo_type_t fix_type)
/* Temporarily release our S-latch. */
rw_lock_s_unlock(&block->lock);
rw_lock_x_lock(&block->lock);
if (dict_index_t *index= block->index)
if (index->freed())
btr_search_drop_page_hash_index(block);
btr_search_drop_page_hash_index(block, true);
rw_lock_x_unlock(&block->lock);
rw_lock_s_lock(&block->lock);
break;
case MTR_MEMO_PAGE_SX_FIX:
rw_lock_sx_unlock(&block->lock);
rw_lock_x_lock(&block->lock);
if (dict_index_t *index= block->index)
if (index->freed())
btr_search_drop_page_hash_index(block);
btr_search_drop_page_hash_index(block, true);
rw_lock_x_unlock(&block->lock);
rw_lock_sx_lock(&block->lock);
break;
@ -3993,8 +3989,7 @@ static buf_block_t* buf_page_mtr_lock(buf_block_t *block,
#ifdef BTR_CUR_HASH_ADAPT
{
dict_index_t *index= block->index;
if (index && index->freed())
if (block->index)
buf_defer_drop_ahi(block, fix_type);
}
#endif /* BTR_CUR_HASH_ADAPT */
@ -4916,7 +4911,7 @@ buf_page_get_known_nowait(
# ifdef BTR_CUR_HASH_ADAPT
ut_ad(!block->page.file_page_was_freed
|| (block->index && block->index->freed()));
|| btr_search_check_marked_free_index(block));
# else /* BTR_CUR_HASH_ADAPT */
ut_ad(!block->page.file_page_was_freed);
# endif /* BTR_CUR_HASH_ADAPT */

View File

@ -99,8 +99,11 @@ btr_search_move_or_delete_hash_entries(
index page for which we know that
block->buf_fix_count == 0 or it is an index page which
has already been removed from the buf_pool->page_hash
i.e.: it is in state BUF_BLOCK_REMOVE_HASH */
void btr_search_drop_page_hash_index(buf_block_t* block);
i.e.: it is in state BUF_BLOCK_REMOVE_HASH
@param[in] garbage_collect drop ahi only if the index is marked
as freed */
void btr_search_drop_page_hash_index(buf_block_t* block,
bool garbage_collect= false);
/** Drop possible adaptive hash index entries when a page is evicted
from the buffer pool or freed in a file, or the index is being dropped.
@ -173,16 +176,25 @@ A table is selected from an array of tables using pair of index-id, space-id.
@param[in] index index handler
@return hash table */
static inline hash_table_t* btr_get_search_table(const dict_index_t* index);
#ifdef UNIV_DEBUG
/** @return if the index is marked as freed */
bool btr_search_check_marked_free_index(const buf_block_t *block);
#endif /* UNIV_DEBUG */
#else /* BTR_CUR_HASH_ADAPT */
# define btr_search_sys_create(size)
# define btr_search_sys_free()
# define btr_search_drop_page_hash_index(block)
# define btr_search_drop_page_hash_index(block, garbage_collect)
# define btr_search_s_lock_all(index)
# define btr_search_s_unlock_all(index)
# define btr_search_info_update(index, cursor)
# define btr_search_move_or_delete_hash_entries(new_block, block)
# define btr_search_update_hash_on_insert(cursor, ahi_latch)
# define btr_search_update_hash_on_delete(cursor)
#ifdef UNIV_DEBUG
# define btr_search_check_marked_free_index(block)
#endif /* UNIV_DEBUG */
#endif /* BTR_CUR_HASH_ADAPT */
#ifdef BTR_CUR_ADAPT

View File

@ -158,6 +158,14 @@ static inline bool btr_search_own_any()
}
#endif /* UNIV_DEBUG */
static inline rw_lock_t* btr_get_search_latch(
index_id_t index_id, ulint space_id)
{
ulint ifold = ut_fold_ulint_pair(ulint(index_id), space_id);
return(btr_search_latches[ifold % btr_ahi_parts]);
}
/** Get the adaptive hash search index latch for a b-tree.
@param[in] index b-tree index
@return latch */
@ -167,10 +175,7 @@ static inline rw_lock_t* btr_get_search_latch(const dict_index_t* index)
ut_ad(!index->table->space
|| index->table->space->id == index->table->space_id);
ulint ifold = ut_fold_ulint_pair(ulint(index->id),
index->table->space_id);
return(btr_search_latches[ifold % btr_ahi_parts]);
return btr_get_search_latch(index->id, index->table->space_id);
}
/** Get the hash-table based on index attributes.