MDEV-16515 InnoDB: Failing assertion: ++retries < 10000 in file
dict0dict.cc buf_LRU_drop_page_hash_for_tablespace(): Return whether any adaptive hash index entries existed. If yes, the caller should keep retrying to drop the adaptive hash index. row_import_for_mysql(), row_truncate_table_for_mysql(), row_drop_table_for_mysql(): Ensure that the adaptive hash index was entirely dropped for the table.
This commit is contained in:
parent
c09a8b5b36
commit
c4eb4bcef6
@ -356,9 +356,10 @@ next_page:
|
|||||||
ut_free(page_arr);
|
ut_free(page_arr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Drop the adaptive hash index for a tablespace.
|
/** Try to drop the adaptive hash index for a tablespace.
|
||||||
@param[in,out] table table */
|
@param[in,out] table table
|
||||||
UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
|
@return whether anything was dropped */
|
||||||
|
UNIV_INTERN bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
|
||||||
{
|
{
|
||||||
for (dict_index_t* index = dict_table_get_first_index(table);
|
for (dict_index_t* index = dict_table_get_first_index(table);
|
||||||
index != NULL;
|
index != NULL;
|
||||||
@ -369,13 +370,15 @@ UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
return false;
|
||||||
drop_ahi:
|
drop_ahi:
|
||||||
ulint id = table->space;
|
ulint id = table->space;
|
||||||
for (ulint i = 0; i < srv_buf_pool_instances; i++) {
|
for (ulint i = 0; i < srv_buf_pool_instances; i++) {
|
||||||
buf_LRU_drop_page_hash_for_tablespace(buf_pool_from_array(i),
|
buf_LRU_drop_page_hash_for_tablespace(buf_pool_from_array(i),
|
||||||
id);
|
id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/******************************************************************//**
|
/******************************************************************//**
|
||||||
|
@ -2719,12 +2719,11 @@ dict_index_remove_from_cache_low(
|
|||||||
zero. See also: dict_table_can_be_evicted() */
|
zero. See also: dict_table_can_be_evicted() */
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (!btr_search_info_get_ref_count(info)) {
|
if (!btr_search_info_get_ref_count(info)
|
||||||
|
|| !buf_LRU_drop_page_hash_for_tablespace(table)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
buf_LRU_drop_page_hash_for_tablespace(table);
|
|
||||||
|
|
||||||
ut_a(++retries < 10000);
|
ut_a(++retries < 10000);
|
||||||
} while (srv_shutdown_state == SRV_SHUTDOWN_NONE || !lru_evict);
|
} while (srv_shutdown_state == SRV_SHUTDOWN_NONE || !lru_evict);
|
||||||
|
|
||||||
|
@ -53,9 +53,11 @@ These are low-level functions
|
|||||||
/** Minimum LRU list length for which the LRU_old pointer is defined */
|
/** Minimum LRU list length for which the LRU_old pointer is defined */
|
||||||
#define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */
|
#define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */
|
||||||
|
|
||||||
/** Drop the adaptive hash index for a tablespace.
|
/** Try to drop the adaptive hash index for a tablespace.
|
||||||
@param[in,out] table table */
|
@param[in,out] table table
|
||||||
UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table);
|
@return whether anything was dropped */
|
||||||
|
UNIV_INTERN bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
|
||||||
|
MY_ATTRIBUTE((warn_unused_result,nonnull));
|
||||||
|
|
||||||
/** Empty the flush list for all pages belonging to a tablespace.
|
/** Empty the flush list for all pages belonging to a tablespace.
|
||||||
@param[in] id tablespace identifier
|
@param[in] id tablespace identifier
|
||||||
|
@ -3983,6 +3983,23 @@ row_import_for_mysql(
|
|||||||
DBUG_EXECUTE_IF("ib_import_reset_space_and_lsn_failure",
|
DBUG_EXECUTE_IF("ib_import_reset_space_and_lsn_failure",
|
||||||
err = DB_TOO_MANY_CONCURRENT_TRXS;);
|
err = DB_TOO_MANY_CONCURRENT_TRXS;);
|
||||||
|
|
||||||
|
/* On DISCARD TABLESPACE, we did not drop any adaptive hash
|
||||||
|
index entries. If we replaced the discarded tablespace with a
|
||||||
|
smaller one here, there could still be some adaptive hash
|
||||||
|
index entries that point to cached garbage pages in the buffer
|
||||||
|
pool, because PageConverter::operator() only evicted those
|
||||||
|
pages that were replaced by the imported pages. We must
|
||||||
|
discard all remaining adaptive hash index entries, because the
|
||||||
|
adaptive hash index must be a subset of the table contents;
|
||||||
|
false positives are not tolerated. */
|
||||||
|
while (buf_LRU_drop_page_hash_for_tablespace(table)) {
|
||||||
|
if (trx_is_interrupted(trx)
|
||||||
|
|| srv_shutdown_state != SRV_SHUTDOWN_NONE) {
|
||||||
|
err = DB_INTERRUPTED;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (err != DB_SUCCESS) {
|
if (err != DB_SUCCESS) {
|
||||||
char table_name[MAX_FULL_NAME_LEN + 1];
|
char table_name[MAX_FULL_NAME_LEN + 1];
|
||||||
|
|
||||||
@ -4000,17 +4017,6 @@ row_import_for_mysql(
|
|||||||
return(row_import_cleanup(prebuilt, trx, err));
|
return(row_import_cleanup(prebuilt, trx, err));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* On DISCARD TABLESPACE, we did not drop any adaptive hash
|
|
||||||
index entries. If we replaced the discarded tablespace with a
|
|
||||||
smaller one here, there could still be some adaptive hash
|
|
||||||
index entries that point to cached garbage pages in the buffer
|
|
||||||
pool, because PageConverter::operator() only evicted those
|
|
||||||
pages that were replaced by the imported pages. We must
|
|
||||||
discard all remaining adaptive hash index entries, because the
|
|
||||||
adaptive hash index must be a subset of the table contents;
|
|
||||||
false positives are not tolerated. */
|
|
||||||
buf_LRU_drop_page_hash_for_tablespace(table);
|
|
||||||
|
|
||||||
row_mysql_lock_data_dictionary(trx);
|
row_mysql_lock_data_dictionary(trx);
|
||||||
|
|
||||||
/* If the table is stored in a remote tablespace, we need to
|
/* If the table is stored in a remote tablespace, we need to
|
||||||
|
@ -3516,7 +3516,13 @@ row_truncate_table_for_mysql(
|
|||||||
fil_space_release(space);
|
fil_space_release(space);
|
||||||
}
|
}
|
||||||
|
|
||||||
buf_LRU_drop_page_hash_for_tablespace(table);
|
while (buf_LRU_drop_page_hash_for_tablespace(table)) {
|
||||||
|
if (trx_is_interrupted(trx)
|
||||||
|
|| srv_shutdown_state != SRV_SHUTDOWN_NONE) {
|
||||||
|
err = DB_INTERRUPTED;
|
||||||
|
goto funct_exit;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (flags != ULINT_UNDEFINED
|
if (flags != ULINT_UNDEFINED
|
||||||
&& fil_discard_tablespace(space_id) == DB_SUCCESS) {
|
&& fil_discard_tablespace(space_id) == DB_SUCCESS) {
|
||||||
@ -4172,6 +4178,27 @@ row_drop_table_for_mysql(
|
|||||||
|
|
||||||
ut_a(!lock_table_has_locks(table));
|
ut_a(!lock_table_has_locks(table));
|
||||||
|
|
||||||
|
if (table->space != TRX_SYS_SPACE) {
|
||||||
|
/* On DISCARD TABLESPACE, we would not drop the
|
||||||
|
adaptive hash index entries. If the tablespace is
|
||||||
|
missing here, delete-marking the record in SYS_INDEXES
|
||||||
|
would not free any pages in the buffer pool. Thus,
|
||||||
|
dict_index_remove_from_cache() would hang due to
|
||||||
|
adaptive hash index entries existing in the buffer
|
||||||
|
pool. To prevent this hang, and also to guarantee
|
||||||
|
that btr_search_drop_page_hash_when_freed() will avoid
|
||||||
|
calling btr_search_drop_page_hash_index() while we
|
||||||
|
hold the InnoDB dictionary lock, we will drop any
|
||||||
|
adaptive hash index entries upfront. */
|
||||||
|
while (buf_LRU_drop_page_hash_for_tablespace(table)) {
|
||||||
|
if (trx_is_interrupted(trx)
|
||||||
|
|| srv_shutdown_state != SRV_SHUTDOWN_NONE) {
|
||||||
|
err = DB_INTERRUPTED;
|
||||||
|
goto funct_exit;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
switch (trx_get_dict_operation(trx)) {
|
switch (trx_get_dict_operation(trx)) {
|
||||||
case TRX_DICT_OP_NONE:
|
case TRX_DICT_OP_NONE:
|
||||||
trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
|
trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
|
||||||
@ -4211,21 +4238,6 @@ row_drop_table_for_mysql(
|
|||||||
rw_lock_x_unlock(dict_index_get_lock(index));
|
rw_lock_x_unlock(dict_index_get_lock(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (table->space != TRX_SYS_SPACE) {
|
|
||||||
/* On DISCARD TABLESPACE, we would not drop the
|
|
||||||
adaptive hash index entries. If the tablespace is
|
|
||||||
missing here, delete-marking the record in SYS_INDEXES
|
|
||||||
would not free any pages in the buffer pool. Thus,
|
|
||||||
dict_index_remove_from_cache() would hang due to
|
|
||||||
adaptive hash index entries existing in the buffer
|
|
||||||
pool. To prevent this hang, and also to guarantee
|
|
||||||
that btr_search_drop_page_hash_when_freed() will avoid
|
|
||||||
calling btr_search_drop_page_hash_index() while we
|
|
||||||
hold the InnoDB dictionary lock, we will drop any
|
|
||||||
adaptive hash index entries upfront. */
|
|
||||||
buf_LRU_drop_page_hash_for_tablespace(table);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* We use the private SQL parser of Innobase to generate the
|
/* We use the private SQL parser of Innobase to generate the
|
||||||
query graphs needed in deleting the dictionary data from system
|
query graphs needed in deleting the dictionary data from system
|
||||||
tables in Innobase. Deleting a row from SYS_INDEXES table also
|
tables in Innobase. Deleting a row from SYS_INDEXES table also
|
||||||
|
@ -354,9 +354,10 @@ next_page:
|
|||||||
ut_free(page_arr);
|
ut_free(page_arr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Drop the adaptive hash index for a tablespace.
|
/** Try to drop the adaptive hash index for a tablespace.
|
||||||
@param[in,out] table table */
|
@param[in,out] table table
|
||||||
UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
|
@return whether anything was dropped */
|
||||||
|
UNIV_INTERN bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
|
||||||
{
|
{
|
||||||
for (dict_index_t* index = dict_table_get_first_index(table);
|
for (dict_index_t* index = dict_table_get_first_index(table);
|
||||||
index != NULL;
|
index != NULL;
|
||||||
@ -367,13 +368,15 @@ UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
return false;
|
||||||
drop_ahi:
|
drop_ahi:
|
||||||
ulint id = table->space;
|
ulint id = table->space;
|
||||||
for (ulint i = 0; i < srv_buf_pool_instances; i++) {
|
for (ulint i = 0; i < srv_buf_pool_instances; i++) {
|
||||||
buf_LRU_drop_page_hash_for_tablespace(buf_pool_from_array(i),
|
buf_LRU_drop_page_hash_for_tablespace(buf_pool_from_array(i),
|
||||||
id);
|
id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/******************************************************************//**
|
/******************************************************************//**
|
||||||
|
@ -2729,11 +2729,11 @@ dict_index_remove_from_cache_low(
|
|||||||
zero. See also: dict_table_can_be_evicted() */
|
zero. See also: dict_table_can_be_evicted() */
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (!btr_search_info_get_ref_count(info, index)) {
|
if (!btr_search_info_get_ref_count(info, index)
|
||||||
|
|| !buf_LRU_drop_page_hash_for_tablespace(table)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
buf_LRU_drop_page_hash_for_tablespace(table);
|
|
||||||
ut_a(++retries < 10000);
|
ut_a(++retries < 10000);
|
||||||
} while (srv_shutdown_state == SRV_SHUTDOWN_NONE || !lru_evict);
|
} while (srv_shutdown_state == SRV_SHUTDOWN_NONE || !lru_evict);
|
||||||
|
|
||||||
|
@ -55,9 +55,11 @@ These are low-level functions
|
|||||||
/** Minimum LRU list length for which the LRU_old pointer is defined */
|
/** Minimum LRU list length for which the LRU_old pointer is defined */
|
||||||
#define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */
|
#define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */
|
||||||
|
|
||||||
/** Drop the adaptive hash index for a tablespace.
|
/** Try to drop the adaptive hash index for a tablespace.
|
||||||
@param[in,out] table table */
|
@param[in,out] table table
|
||||||
UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table);
|
@return whether anything was dropped */
|
||||||
|
UNIV_INTERN bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
|
||||||
|
MY_ATTRIBUTE((warn_unused_result,nonnull));
|
||||||
|
|
||||||
/** Empty the flush list for all pages belonging to a tablespace.
|
/** Empty the flush list for all pages belonging to a tablespace.
|
||||||
@param[in] id tablespace identifier
|
@param[in] id tablespace identifier
|
||||||
|
@ -3982,6 +3982,23 @@ row_import_for_mysql(
|
|||||||
DBUG_EXECUTE_IF("ib_import_reset_space_and_lsn_failure",
|
DBUG_EXECUTE_IF("ib_import_reset_space_and_lsn_failure",
|
||||||
err = DB_TOO_MANY_CONCURRENT_TRXS;);
|
err = DB_TOO_MANY_CONCURRENT_TRXS;);
|
||||||
|
|
||||||
|
/* On DISCARD TABLESPACE, we did not drop any adaptive hash
|
||||||
|
index entries. If we replaced the discarded tablespace with a
|
||||||
|
smaller one here, there could still be some adaptive hash
|
||||||
|
index entries that point to cached garbage pages in the buffer
|
||||||
|
pool, because PageConverter::operator() only evicted those
|
||||||
|
pages that were replaced by the imported pages. We must
|
||||||
|
discard all remaining adaptive hash index entries, because the
|
||||||
|
adaptive hash index must be a subset of the table contents;
|
||||||
|
false positives are not tolerated. */
|
||||||
|
while (buf_LRU_drop_page_hash_for_tablespace(table)) {
|
||||||
|
if (trx_is_interrupted(trx)
|
||||||
|
|| srv_shutdown_state != SRV_SHUTDOWN_NONE) {
|
||||||
|
err = DB_INTERRUPTED;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (err != DB_SUCCESS) {
|
if (err != DB_SUCCESS) {
|
||||||
char table_name[MAX_FULL_NAME_LEN + 1];
|
char table_name[MAX_FULL_NAME_LEN + 1];
|
||||||
|
|
||||||
@ -3999,17 +4016,6 @@ row_import_for_mysql(
|
|||||||
return(row_import_cleanup(prebuilt, trx, err));
|
return(row_import_cleanup(prebuilt, trx, err));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* On DISCARD TABLESPACE, we did not drop any adaptive hash
|
|
||||||
index entries. If we replaced the discarded tablespace with a
|
|
||||||
smaller one here, there could still be some adaptive hash
|
|
||||||
index entries that point to cached garbage pages in the buffer
|
|
||||||
pool, because PageConverter::operator() only evicted those
|
|
||||||
pages that were replaced by the imported pages. We must
|
|
||||||
discard all remaining adaptive hash index entries, because the
|
|
||||||
adaptive hash index must be a subset of the table contents;
|
|
||||||
false positives are not tolerated. */
|
|
||||||
buf_LRU_drop_page_hash_for_tablespace(table);
|
|
||||||
|
|
||||||
row_mysql_lock_data_dictionary(trx);
|
row_mysql_lock_data_dictionary(trx);
|
||||||
|
|
||||||
/* If the table is stored in a remote tablespace, we need to
|
/* If the table is stored in a remote tablespace, we need to
|
||||||
|
@ -3540,7 +3540,13 @@ row_truncate_table_for_mysql(
|
|||||||
fil_space_release(space);
|
fil_space_release(space);
|
||||||
}
|
}
|
||||||
|
|
||||||
buf_LRU_drop_page_hash_for_tablespace(table);
|
while (buf_LRU_drop_page_hash_for_tablespace(table)) {
|
||||||
|
if (trx_is_interrupted(trx)
|
||||||
|
|| srv_shutdown_state != SRV_SHUTDOWN_NONE) {
|
||||||
|
err = DB_INTERRUPTED;
|
||||||
|
goto funct_exit;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (flags != ULINT_UNDEFINED
|
if (flags != ULINT_UNDEFINED
|
||||||
&& fil_discard_tablespace(space_id) == DB_SUCCESS) {
|
&& fil_discard_tablespace(space_id) == DB_SUCCESS) {
|
||||||
@ -4202,6 +4208,27 @@ row_drop_table_for_mysql(
|
|||||||
|
|
||||||
ut_a(!lock_table_has_locks(table));
|
ut_a(!lock_table_has_locks(table));
|
||||||
|
|
||||||
|
if (table->space != TRX_SYS_SPACE) {
|
||||||
|
/* On DISCARD TABLESPACE, we would not drop the
|
||||||
|
adaptive hash index entries. If the tablespace is
|
||||||
|
missing here, delete-marking the record in SYS_INDEXES
|
||||||
|
would not free any pages in the buffer pool. Thus,
|
||||||
|
dict_index_remove_from_cache() would hang due to
|
||||||
|
adaptive hash index entries existing in the buffer
|
||||||
|
pool. To prevent this hang, and also to guarantee
|
||||||
|
that btr_search_drop_page_hash_when_freed() will avoid
|
||||||
|
calling btr_search_drop_page_hash_index() while we
|
||||||
|
hold the InnoDB dictionary lock, we will drop any
|
||||||
|
adaptive hash index entries upfront. */
|
||||||
|
while (buf_LRU_drop_page_hash_for_tablespace(table)) {
|
||||||
|
if (trx_is_interrupted(trx)
|
||||||
|
|| srv_shutdown_state != SRV_SHUTDOWN_NONE) {
|
||||||
|
err = DB_INTERRUPTED;
|
||||||
|
goto funct_exit;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
switch (trx_get_dict_operation(trx)) {
|
switch (trx_get_dict_operation(trx)) {
|
||||||
case TRX_DICT_OP_NONE:
|
case TRX_DICT_OP_NONE:
|
||||||
trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
|
trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
|
||||||
@ -4241,21 +4268,6 @@ row_drop_table_for_mysql(
|
|||||||
rw_lock_x_unlock(dict_index_get_lock(index));
|
rw_lock_x_unlock(dict_index_get_lock(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (table->space != TRX_SYS_SPACE) {
|
|
||||||
/* On DISCARD TABLESPACE, we would not drop the
|
|
||||||
adaptive hash index entries. If the tablespace is
|
|
||||||
missing here, delete-marking the record in SYS_INDEXES
|
|
||||||
would not free any pages in the buffer pool. Thus,
|
|
||||||
dict_index_remove_from_cache() would hang due to
|
|
||||||
adaptive hash index entries existing in the buffer
|
|
||||||
pool. To prevent this hang, and also to guarantee
|
|
||||||
that btr_search_drop_page_hash_when_freed() will avoid
|
|
||||||
calling btr_search_drop_page_hash_index() while we
|
|
||||||
hold the InnoDB dictionary lock, we will drop any
|
|
||||||
adaptive hash index entries upfront. */
|
|
||||||
buf_LRU_drop_page_hash_for_tablespace(table);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* We use the private SQL parser of Innobase to generate the
|
/* We use the private SQL parser of Innobase to generate the
|
||||||
query graphs needed in deleting the dictionary data from system
|
query graphs needed in deleting the dictionary data from system
|
||||||
tables in Innobase. Deleting a row from SYS_INDEXES table also
|
tables in Innobase. Deleting a row from SYS_INDEXES table also
|
||||||
|
Loading…
x
Reference in New Issue
Block a user