From 3009b5439dd2fc88b1e255b3ee57d32333829fb0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Mon, 10 Feb 2025 09:19:23 +0200 Subject: [PATCH 1/7] MDEV-35941 : galera_bf_abort_lock_table fails with wait for metadata lock Problem was missing case from wsrep_handle_mdl_conflict. Test case was trying to confirm that LOCK TABLE thread is not BF-aborted. However as case was missing it was BF-aborted. Test case passed because BF-aborting takes time and used wait condition might see expected thread status before it was BF-aborted. Test naturally failed if BF-aborting was done early enough. Fix is to add missing case for SQLCOM_LOCK_TABLES to wsrep_handle_mdl_conflict. Note that using LOCK TABLE is still not recomended on cluster because it could cause cluster hang. Signed-off-by: Julius Goryavsky --- .../suite/galera/t/galera_bf_abort_lock_table.cnf | 5 ----- .../suite/galera/t/galera_bf_abort_lock_table.test | 5 ++--- sql/wsrep_mysqld.cc | 13 +++++++++---- 3 files changed, 11 insertions(+), 12 deletions(-) delete mode 100644 mysql-test/suite/galera/t/galera_bf_abort_lock_table.cnf diff --git a/mysql-test/suite/galera/t/galera_bf_abort_lock_table.cnf b/mysql-test/suite/galera/t/galera_bf_abort_lock_table.cnf deleted file mode 100644 index 033e6f8b99a..00000000000 --- a/mysql-test/suite/galera/t/galera_bf_abort_lock_table.cnf +++ /dev/null @@ -1,5 +0,0 @@ -!include ../galera_2nodes.cnf - -[mysqld.1] -wsrep-debug=1 -loose-galera-bf-abort-lock-table=1 diff --git a/mysql-test/suite/galera/t/galera_bf_abort_lock_table.test b/mysql-test/suite/galera/t/galera_bf_abort_lock_table.test index 06009712c7b..fe8aea9f248 100644 --- a/mysql-test/suite/galera/t/galera_bf_abort_lock_table.test +++ b/mysql-test/suite/galera/t/galera_bf_abort_lock_table.test @@ -1,6 +1,5 @@ --source include/galera_cluster.inc --source include/have_innodb.inc ---source include/force_restart.inc # # Test that a local LOCK TABLE will NOT be broken by an incoming remote transaction against that table @@ -20,13 +19,13 @@ INSERT INTO t1 VALUES (2); SET SESSION wsrep_sync_wait = 0; --let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE = 'Waiting for table metadata lock' --let $wait_condition_on_error_output = SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST ---source include/wait_condition_with_debug_and_kill.inc +--source include/wait_condition_with_debug.inc UNLOCK TABLES; --let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE = 'Waiting for table metadata lock' --let $wait_condition_on_error_output = SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST ---source include/wait_condition_with_debug_and_kill.inc +--source include/wait_condition_with_debug.inc COMMIT; SELECT COUNT(*) = 1 FROM t1; diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index 4e240432939..de40c24bebe 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -3191,11 +3191,9 @@ void wsrep_to_isolation_end(THD *thd) @param requestor_ctx The MDL context of the requestor @param ticket MDL ticket for the requested lock + @param key The key of the object (data) being protected - @retval TRUE Lock request can be granted - @retval FALSE Lock request cannot be granted */ - void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, const MDL_ticket *ticket, const MDL_key *key) @@ -3268,14 +3266,21 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, (granted_thd->system_thread != NON_SYSTEM_THREAD && granted_thd->mdl_context.has_explicit_locks())) { - WSREP_DEBUG("BF thread waiting for FLUSH"); + WSREP_DEBUG("BF thread waiting for %s", + granted_thd->lex->sql_command == SQLCOM_FLUSH ? "FLUSH" : "BACKUP"); ticket->wsrep_report(wsrep_debug); + if (granted_thd->current_backup_stage != BACKUP_FINISHED && wsrep_check_mode(WSREP_MODE_BF_MARIABACKUP)) { wsrep_abort_thd(request_thd, granted_thd, 1); } } + else if (granted_thd->lex->sql_command == SQLCOM_LOCK_TABLES) + { + WSREP_DEBUG("BF thread waiting for LOCK TABLES"); + ticket->wsrep_report(wsrep_debug); + } else if (request_thd->lex->sql_command == SQLCOM_DROP_TABLE) { WSREP_DEBUG("DROP caused BF abort, conf %s", From 44e1f7238aab19a4cb530d4e2ad84b394b633f75 Mon Sep 17 00:00:00 2001 From: Julius Goryavsky Date: Wed, 12 Feb 2025 01:29:09 +0100 Subject: [PATCH 2/7] MDEV-35941 addendum: additional corrections for mtr tests --- mysql-test/suite/galera/r/mysql-wsrep#198.result | 3 +++ mysql-test/suite/galera/t/galera_bf_abort_lock_table.test | 4 ++-- mysql-test/suite/galera/t/mysql-wsrep#198.cnf | 2 -- mysql-test/suite/galera/t/mysql-wsrep#198.test | 8 ++++++-- 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/mysql-test/suite/galera/r/mysql-wsrep#198.result b/mysql-test/suite/galera/r/mysql-wsrep#198.result index 5b569ffae27..7759c4f1982 100644 --- a/mysql-test/suite/galera/r/mysql-wsrep#198.result +++ b/mysql-test/suite/galera/r/mysql-wsrep#198.result @@ -31,3 +31,6 @@ test.t1 repair note The storage engine for the table doesn't support repair test.t2 repair note The storage engine for the table doesn't support repair DROP TABLE t1; DROP TABLE t2; +connection node_1; +disconnect node_2a; +disconnect node_2b; diff --git a/mysql-test/suite/galera/t/galera_bf_abort_lock_table.test b/mysql-test/suite/galera/t/galera_bf_abort_lock_table.test index fe8aea9f248..71c3a7198f2 100644 --- a/mysql-test/suite/galera/t/galera_bf_abort_lock_table.test +++ b/mysql-test/suite/galera/t/galera_bf_abort_lock_table.test @@ -17,13 +17,13 @@ INSERT INTO t1 VALUES (2); --connection node_2 SET SESSION wsrep_sync_wait = 0; ---let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE = 'Waiting for table metadata lock' +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND (STATE LIKE 'Waiting for table metadata lock%' OR STATE LIKE 'Waiting to execute in isolation%'); --let $wait_condition_on_error_output = SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST --source include/wait_condition_with_debug.inc UNLOCK TABLES; ---let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE = 'Waiting for table metadata lock' +--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND (STATE LIKE 'Waiting for table metadata lock%' OR STATE LIKE 'Waiting to execute in isolation%'); --let $wait_condition_on_error_output = SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST --source include/wait_condition_with_debug.inc diff --git a/mysql-test/suite/galera/t/mysql-wsrep#198.cnf b/mysql-test/suite/galera/t/mysql-wsrep#198.cnf index bbeb0e31c31..4c62448fe3d 100644 --- a/mysql-test/suite/galera/t/mysql-wsrep#198.cnf +++ b/mysql-test/suite/galera/t/mysql-wsrep#198.cnf @@ -2,5 +2,3 @@ [mysqld] log-bin -wsrep-debug=1 -loose-mysql-wsrep198=1 diff --git a/mysql-test/suite/galera/t/mysql-wsrep#198.test b/mysql-test/suite/galera/t/mysql-wsrep#198.test index 2c89f859328..dceae6e17e9 100644 --- a/mysql-test/suite/galera/t/mysql-wsrep#198.test +++ b/mysql-test/suite/galera/t/mysql-wsrep#198.test @@ -1,6 +1,5 @@ --source include/galera_cluster.inc --source include/have_innodb.inc ---source include/force_restart.inc CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB; CREATE TABLE t2 (id INT PRIMARY KEY) ENGINE=InnoDB; @@ -21,7 +20,7 @@ LOCK TABLE t2 WRITE; --connection node_2 SET SESSION wsrep_sync_wait = 0; ---let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'Waiting for table metadata lock' +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for table metadata lock%' OR STATE LIKE 'Waiting to execute in isolation%'; --let $wait_condition_on_error_output = SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST --source include/wait_condition_with_debug_and_kill.inc @@ -39,3 +38,8 @@ UNLOCK TABLES; DROP TABLE t1; DROP TABLE t2; + +--connection node_1 + +--disconnect node_2a +--disconnect node_2b From c07e355c40379d37127082f5f5d419783ed778a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 12 Feb 2025 10:14:10 +0200 Subject: [PATCH 3/7] MDEV-36015: unrepresentable value in row_parse_int() row_parse_int(): Refactor the code and define the function static in one compilation unit. For any negative values, we must return 0. row_search_get_max_rec(), row_search_max_autoinc(): Moved to the same compilation unit with row_parse_int(). We also remove a work-around of an internal compiler error when targeting ARMv8 on GCC 4.8.5, a compiler that is no longer supported. Reviewed by: Debarun Banerjee --- .../suite/innodb/r/autoinc_persist.result | 37 +++++- .../suite/innodb/t/autoinc_persist.test | 21 +++- storage/innobase/include/row0row.h | 16 --- storage/innobase/include/row0row.inl | 49 -------- storage/innobase/include/row0sel.h | 5 +- storage/innobase/row/row0ins.cc | 109 ++++++++++++++++-- storage/innobase/row/row0sel.cc | 108 ----------------- 7 files changed, 151 insertions(+), 194 deletions(-) diff --git a/mysql-test/suite/innodb/r/autoinc_persist.result b/mysql-test/suite/innodb/r/autoinc_persist.result index 93594db3f05..59d708e5364 100644 --- a/mysql-test/suite/innodb/r/autoinc_persist.result +++ b/mysql-test/suite/innodb/r/autoinc_persist.result @@ -190,8 +190,7 @@ a 100000000000 100000000006 CREATE TABLE t11(a FLOAT AUTO_INCREMENT KEY) ENGINE = InnoDB; -INSERT INTO t11 VALUES(0), (0), (0), (0), (-1), (-10), (0), -(20), (30), (31); +INSERT INTO t11 VALUES(0), (0), (0), (0), (-1), (-10), (0), (20), (30), (31); SELECT * FROM t11; a -10 @@ -204,9 +203,22 @@ a 20 30 31 +CREATE TABLE t11u(a FLOAT UNSIGNED AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t11u VALUES(0), (0), (0), (0), (-1), (-10), (0), (20), (30), (31); +ERROR 22003: Out of range value for column 'a' at row 5 +INSERT INTO t11u VALUES(0), (0), (0), (0), (0), (20), (30), (31); +SELECT * FROM t11u; +a +11 +12 +13 +14 +15 +20 +30 +31 CREATE TABLE t12(a DOUBLE AUTO_INCREMENT KEY) ENGINE = InnoDB; -INSERT INTO t12 VALUES(0), (0), (0), (0), (-1), (-10), (0), -(20), (30), (31); +INSERT INTO t12 VALUES(0), (0), (0), (0), (-1), (-10), (0), (20), (30), (31); SELECT * FROM t12; a -10 @@ -219,6 +231,20 @@ a 20 30 31 +CREATE TABLE t12u(a DOUBLE UNSIGNED AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t12u VALUES(0), (0), (0), (0), (-1), (-10), (0), (20), (30), (31); +ERROR 22003: Out of range value for column 'a' at row 5 +INSERT INTO t12u VALUES(0), (0), (0), (0), (0), (20), (30), (31); +SELECT * FROM t12u; +a +11 +12 +13 +14 +15 +20 +30 +31 # Scenario 1: Normal restart, to test if the counters are persisted # Scenario 2: Delete some values, to test the counters should not be the # one which is the largest in current table @@ -981,4 +1007,5 @@ a b 10 1 2 2 3 4 -DROP TABLE t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t30, t32, t33; +DROP TABLE t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t11u, t12u, +t30, t32, t33; diff --git a/mysql-test/suite/innodb/t/autoinc_persist.test b/mysql-test/suite/innodb/t/autoinc_persist.test index a2832bd5187..5364c933f57 100644 --- a/mysql-test/suite/innodb/t/autoinc_persist.test +++ b/mysql-test/suite/innodb/t/autoinc_persist.test @@ -82,15 +82,25 @@ INSERT INTO t10 VALUES(0), (0), (0), (0), (8), (10), (0), SELECT * FROM t10; CREATE TABLE t11(a FLOAT AUTO_INCREMENT KEY) ENGINE = InnoDB; -INSERT INTO t11 VALUES(0), (0), (0), (0), (-1), (-10), (0), -(20), (30), (31); +INSERT INTO t11 VALUES(0), (0), (0), (0), (-1), (-10), (0), (20), (30), (31); SELECT * FROM t11; +CREATE TABLE t11u(a FLOAT UNSIGNED AUTO_INCREMENT KEY) ENGINE = InnoDB; +--error ER_WARN_DATA_OUT_OF_RANGE +INSERT INTO t11u VALUES(0), (0), (0), (0), (-1), (-10), (0), (20), (30), (31); +INSERT INTO t11u VALUES(0), (0), (0), (0), (0), (20), (30), (31); +SELECT * FROM t11u; + CREATE TABLE t12(a DOUBLE AUTO_INCREMENT KEY) ENGINE = InnoDB; -INSERT INTO t12 VALUES(0), (0), (0), (0), (-1), (-10), (0), -(20), (30), (31); +INSERT INTO t12 VALUES(0), (0), (0), (0), (-1), (-10), (0), (20), (30), (31); SELECT * FROM t12; +CREATE TABLE t12u(a DOUBLE UNSIGNED AUTO_INCREMENT KEY) ENGINE = InnoDB; +--error ER_WARN_DATA_OUT_OF_RANGE +INSERT INTO t12u VALUES(0), (0), (0), (0), (-1), (-10), (0), (20), (30), (31); +INSERT INTO t12u VALUES(0), (0), (0), (0), (0), (20), (30), (31); +SELECT * FROM t12u; + --echo # Scenario 1: Normal restart, to test if the counters are persisted --echo # Scenario 2: Delete some values, to test the counters should not be the --echo # one which is the largest in current table @@ -556,4 +566,5 @@ INSERT INTO t33 VALUES(3, NULL); SELECT MAX(b) AS `Expect 4` FROM t33; SELECT * FROM t33; -DROP TABLE t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t30, t32, t33; +DROP TABLE t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t11u, t12u, +t30, t32, t33; diff --git a/storage/innobase/include/row0row.h b/storage/innobase/include/row0row.h index 7056c77f2e6..63427d597fc 100644 --- a/storage/innobase/include/row0row.h +++ b/storage/innobase/include/row0row.h @@ -328,22 +328,6 @@ row_get_clust_rec( mtr_t* mtr) /*!< in: mtr */ MY_ATTRIBUTE((nonnull, warn_unused_result)); -/** Parse the integer data from specified data, which could be -DATA_INT, DATA_FLOAT or DATA_DOUBLE. If the value is less than 0 -and the type is not unsigned then we reset the value to 0 -@param[in] data data to read -@param[in] len length of data -@param[in] mtype mtype of data -@param[in] unsigned_type if the data is unsigned -@return the integer value from the data */ -inline -ib_uint64_t -row_parse_int( - const byte* data, - ulint len, - ulint mtype, - bool unsigned_type); - /** Result of row_search_index_entry */ enum row_search_result { ROW_FOUND = 0, /*!< the record was found */ diff --git a/storage/innobase/include/row0row.inl b/storage/innobase/include/row0row.inl index e89adb581f4..0ccf00b9b45 100644 --- a/storage/innobase/include/row0row.inl +++ b/storage/innobase/include/row0row.inl @@ -170,52 +170,3 @@ row_build_row_ref_fast( } } } - -/** Parse the integer data from specified data, which could be -DATA_INT, DATA_FLOAT or DATA_DOUBLE. If the value is less than 0 -and the type is not unsigned then we reset the value to 0 -@param[in] data data to read -@param[in] len length of data -@param[in] mtype mtype of data -@param[in] unsigned_type if the data is unsigned -@return the integer value from the data */ -ib_uint64_t -row_parse_int( - const byte* data, - ulint len, - ulint mtype, - bool unsigned_type) -{ - ib_uint64_t value = 0; - - switch (mtype) { - case DATA_INT: - - ut_a(len <= sizeof value); - value = mach_read_int_type(data, len, unsigned_type); - break; - - case DATA_FLOAT: - - ut_a(len == sizeof(float)); - value = static_cast(mach_float_read(data)); - break; - - case DATA_DOUBLE: - - ut_a(len == sizeof(double)); - value = static_cast(mach_double_read(data)); - break; - - default: - ut_error; - - } - - if (!unsigned_type && static_cast(value) < 0) { - value = 0; - } - - return(value); -} - diff --git a/storage/innobase/include/row0sel.h b/storage/innobase/include/row0sel.h index 54e4a1d283f..35e3cbe6631 100644 --- a/storage/innobase/include/row0sel.h +++ b/storage/innobase/include/row0sel.h @@ -182,9 +182,8 @@ dberr_t row_check_index(row_prebuilt_t *prebuilt, ulint *n_rows) @param[in] index index starting with an AUTO_INCREMENT column @return the largest AUTO_INCREMENT value @retval 0 if no records were found */ -ib_uint64_t -row_search_max_autoinc(dict_index_t* index) - MY_ATTRIBUTE((nonnull, warn_unused_result)); +uint64_t row_search_max_autoinc(dict_index_t *index) noexcept + MY_ATTRIBUTE((nonnull, warn_unused_result)); /** A structure for caching column values for prefetched rows */ struct sel_buf_t{ diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index a03415e218a..adc852725dd 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -2560,12 +2560,44 @@ row_ins_index_entry_big_rec( return(error); } -#if defined __aarch64__&&defined __GNUC__&&__GNUC__==4&&!defined __clang__ -/* Avoid GCC 4.8.5 internal compiler error due to srw_mutex::wr_unlock(). -We would only need this for row_ins_clust_index_entry_low(), -but GCC 4.8.5 does not support pop_options. */ -# pragma GCC optimize ("O0") -#endif +/** Parse the integer data from specified data, which could be +DATA_INT, DATA_FLOAT or DATA_DOUBLE. If the value is less than 0 +and the type is not unsigned then we reset the value to 0 +@param data data to read +@param len length of data +@param mtype main type of the column +@param prtype precise type of the column +@return the integer value from the data +@retval 0 if the value is negative or the type or length invalid */ +static uint64_t row_parse_int(const byte *data, size_t len, + ulint mtype, ulint prtype) noexcept +{ + switch (mtype) { + case DATA_FLOAT: + if (len != sizeof(float)) + return 0; + { + float f= mach_float_read(data); + return f <= 0.0 ? 0 : uint64_t(f); + } + case DATA_DOUBLE: + if (len != sizeof(double)) + return 0; + { + double d= mach_double_read(data); + return d <= 0.0 ? 0 : uint64_t(d); + } + case DATA_INT: + if (len == 0 || len > 8) + return 0; + const ibool unsigned_type{prtype & DATA_UNSIGNED}; + uint64_t value= mach_read_int_type(data, len, unsigned_type); + return !unsigned_type && int64_t(value) < 0 ? 0 : value; + } + + ut_ad("invalid type" == 0); + return 0; +} /***************************************************************//** Tries to insert an entry into a clustered index, ignoring foreign key @@ -2652,8 +2684,7 @@ row_ins_clust_index_entry_low( dfield->data), dfield->len, dfield->type.mtype, - dfield->type.prtype - & DATA_UNSIGNED); + dfield->type.prtype); if (auto_inc && mode != BTR_MODIFY_TREE) { mode = btr_latch_mode( @@ -3810,3 +3841,65 @@ error_handling: return(thr); } + +/** Read the AUTOINC column from an index record +@param index index of the record +@param rec the record +@return value read from the first column +@retval 0 if the value would be NULL or negative */ +static uint64_t row_read_autoinc(const dict_index_t &index, const rec_t *rec) + noexcept +{ + const dict_field_t &field= index.fields[0]; + ut_ad(!DATA_BIG_COL(field.col)); + ut_ad(!(rec_get_info_bits(rec, index.table->not_redundant()) & + (REC_INFO_MIN_REC_FLAG | REC_INFO_DELETED_FLAG))); + mem_heap_t *heap= nullptr; + rec_offs offsets_[REC_OFFS_HEADER_SIZE + 2]; + rec_offs_init(offsets_); + rec_offs *offsets= rec_get_offsets(rec, &index, offsets_, + index.n_core_fields, 1, &heap); + ut_ad(!heap); + + size_t len; + ut_d(size_t first_offset=) rec_get_nth_field_offs(offsets, 0, &len); + ut_ad(!first_offset); + return row_parse_int(rec, len, field.col->mtype, field.col->prtype); +} + +/** Get the maximum and non-delete-marked record in an index. +@param index index B-tree +@param mtr mini-transaction (may be committed and restarted) +@return maximum record, page s-latched in mtr +@retval nullptr if there are no records, or if all of them are delete-marked */ +static +const rec_t *row_search_get_max_rec(dict_index_t *index, mtr_t *mtr) noexcept +{ + btr_pcur_t pcur; + /* Open at the high/right end (false), and init cursor */ + if (pcur.open_leaf(false, index, BTR_SEARCH_LEAF, mtr) != DB_SUCCESS) + return nullptr; + + do + { + const page_t *page= btr_pcur_get_page(&pcur); + const rec_t *rec= page_find_rec_max_not_deleted(page); + if (page_rec_is_user_rec_low(rec - page)) + return rec; + btr_pcur_move_before_first_on_page(&pcur); + } + while (btr_pcur_move_to_prev(&pcur, mtr)); + + return nullptr; +} + +uint64_t row_search_max_autoinc(dict_index_t *index) noexcept +{ + uint64_t value= 0; + mtr_t mtr; + mtr.start(); + if (const rec_t *rec= row_search_get_max_rec(index, &mtr)) + value= row_read_autoinc(*index, rec); + mtr.commit(); + return value; +} diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc index 38b79af8da4..5200c10fe18 100644 --- a/storage/innobase/row/row0sel.cc +++ b/storage/innobase/row/row0sel.cc @@ -6856,111 +6856,3 @@ next_rec: goto rec_loop; } - -/*******************************************************************//** -Read the AUTOINC column from the current row. If the value is less than -0 and the type is not unsigned then we reset the value to 0. -@return value read from the column */ -static -ib_uint64_t -row_search_autoinc_read_column( -/*===========================*/ - dict_index_t* index, /*!< in: index to read from */ - const rec_t* rec, /*!< in: current rec */ - ulint col_no, /*!< in: column number */ - ulint mtype, /*!< in: column main type */ - ibool unsigned_type) /*!< in: signed or unsigned flag */ -{ - ulint len; - const byte* data; - ib_uint64_t value; - mem_heap_t* heap = NULL; - rec_offs offsets_[REC_OFFS_NORMAL_SIZE]; - rec_offs* offsets = offsets_; - - rec_offs_init(offsets_); - ut_ad(page_rec_is_leaf(rec)); - - offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields, - col_no + 1, &heap); - - if (rec_offs_nth_sql_null(offsets, col_no)) { - /* There is no non-NULL value in the auto-increment column. */ - value = 0; - goto func_exit; - } - - data = rec_get_nth_field(rec, offsets, col_no, &len); - - value = row_parse_int(data, len, mtype, unsigned_type); - -func_exit: - if (UNIV_LIKELY_NULL(heap)) { - mem_heap_free(heap); - } - - return(value); -} - -/** Get the maximum and non-delete-marked record in an index. -@param[in] index index tree -@param[in,out] mtr mini-transaction (may be committed and restarted) -@return maximum record, page s-latched in mtr -@retval NULL if there are no records, or if all of them are delete-marked */ -static -const rec_t* -row_search_get_max_rec( - dict_index_t* index, - mtr_t* mtr) -{ - btr_pcur_t pcur; - const rec_t* rec; - /* Open at the high/right end (false), and init cursor */ - if (pcur.open_leaf(false, index, BTR_SEARCH_LEAF, mtr) != DB_SUCCESS) { - return nullptr; - } - - do { - const page_t* page; - - page = btr_pcur_get_page(&pcur); - rec = page_find_rec_max_not_deleted(page); - - if (page_rec_is_user_rec(rec)) { - break; - } else { - rec = NULL; - } - btr_pcur_move_before_first_on_page(&pcur); - } while (btr_pcur_move_to_prev(&pcur, mtr)); - - ut_ad(!rec - || !(rec_get_info_bits(rec, dict_table_is_comp(index->table)) - & (REC_INFO_MIN_REC_FLAG | REC_INFO_DELETED_FLAG))); - return(rec); -} - -/** Read the max AUTOINC value from an index. -@param[in] index index starting with an AUTO_INCREMENT column -@return the largest AUTO_INCREMENT value -@retval 0 if no records were found */ -ib_uint64_t -row_search_max_autoinc(dict_index_t* index) -{ - const dict_field_t* dfield = dict_index_get_nth_field(index, 0); - - ib_uint64_t value = 0; - - mtr_t mtr; - mtr.start(); - - if (const rec_t* rec = row_search_get_max_rec(index, &mtr)) { - value = row_search_autoinc_read_column( - index, rec, 0, - dfield->col->mtype, - dfield->col->prtype & DATA_UNSIGNED); - } - - mtr.commit(); - return(value); -} From 7587b0ec84e01b4fc977d5c9bb3f5be8f12e3d7f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 12 Feb 2025 14:24:19 +0200 Subject: [PATCH 4/7] MDEV-36061 Incorrect error handling on DDL with FULLTEXT INDEX row_create_index_for_mysql(): Tolerate DB_LOCK_TABLE_FULL better. fts_create_one_common_table(), fts_create_one_index_table(): Do not corrupt the error state of a non-active transaction object. fts_config_set_value(): Only run another statement if there was no error yet. --- mysql-test/suite/innodb_fts/r/index_table.result | 3 +++ mysql-test/suite/innodb_fts/t/index_table.test | 6 ++++++ storage/innobase/fts/fts0config.cc | 2 +- storage/innobase/fts/fts0fts.cc | 13 +++++++++---- storage/innobase/row/row0mysql.cc | 4 +--- 5 files changed, 20 insertions(+), 8 deletions(-) diff --git a/mysql-test/suite/innodb_fts/r/index_table.result b/mysql-test/suite/innodb_fts/r/index_table.result index 570e367a7d4..909a889db42 100644 --- a/mysql-test/suite/innodb_fts/r/index_table.result +++ b/mysql-test/suite/innodb_fts/r/index_table.result @@ -5,6 +5,9 @@ id INT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY, title VARCHAR(200), content TEXT ) ENGINE= InnoDB; +SET STATEMENT debug_dbug='+d,innodb_report_deadlock' FOR +CREATE FULLTEXT INDEX idx ON articles (title, content); +ERROR HY000: Got error 11 "Resource temporarily unavailable" from storage engine InnoDB CREATE FULLTEXT INDEX idx ON articles (title, content); INSERT INTO articles (title, content) VALUES ('MySQL Tutorial','DBMS stands for MySQL DataBase ...'), diff --git a/mysql-test/suite/innodb_fts/t/index_table.test b/mysql-test/suite/innodb_fts/t/index_table.test index 4b484877be1..89c09053230 100644 --- a/mysql-test/suite/innodb_fts/t/index_table.test +++ b/mysql-test/suite/innodb_fts/t/index_table.test @@ -3,6 +3,9 @@ -- source include/have_innodb.inc -- source include/have_debug.inc +--disable_query_log +call mtr.add_suppression("InnoDB: \\(Deadlock\\) writing `use_stopword'"); +--enable_query_log SET @optimize=@@GLOBAL.INNODB_OPTIMIZE_FULLTEXT_ONLY; SET GLOBAL INNODB_OPTIMIZE_FULLTEXT_ONLY=1; @@ -14,6 +17,9 @@ CREATE TABLE articles ( content TEXT ) ENGINE= InnoDB; +--error ER_GET_ERRNO +SET STATEMENT debug_dbug='+d,innodb_report_deadlock' FOR +CREATE FULLTEXT INDEX idx ON articles (title, content); CREATE FULLTEXT INDEX idx ON articles (title, content); INSERT INTO articles (title, content) VALUES diff --git a/storage/innobase/fts/fts0config.cc b/storage/innobase/fts/fts0config.cc index 4566224e171..524f648676e 100644 --- a/storage/innobase/fts/fts0config.cc +++ b/storage/innobase/fts/fts0config.cc @@ -231,7 +231,7 @@ fts_config_set_value( n_rows_updated = trx->undo_no - undo_no; /* Check if we need to do an insert. */ - if (n_rows_updated == 0) { + if (error == DB_SUCCESS && n_rows_updated == 0) { info = pars_info_create(); pars_info_bind_varchar_literal( diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc index 83ae2827230..a878905ace4 100644 --- a/storage/innobase/fts/fts0fts.cc +++ b/storage/innobase/fts/fts0fts.cc @@ -37,6 +37,7 @@ Full Text Search interface #include "fts0plugin.h" #include "dict0stats.h" #include "btr0pcur.h" +#include "log.h" static const ulint FTS_MAX_ID_LEN = 32; @@ -1870,8 +1871,10 @@ fts_create_one_common_table( } } - ib::warn() << "Failed to create FTS common table " << fts_table_name; - trx->error_state = error; + ut_ad(trx->state == TRX_STATE_NOT_STARTED + || trx->error_state == error); + sql_print_warning("InnoDB: Failed to create FTS common table %s: %s", + fts_table_name, ut_strerr(error)); return NULL; } @@ -2055,8 +2058,10 @@ fts_create_one_index_table( } } - ib::warn() << "Failed to create FTS index table " << table_name; - trx->error_state = error; + ut_ad(trx->state == TRX_STATE_NOT_STARTED + || trx->error_state == error); + sql_print_warning("InnoDB: Failed to create FTS index table %s: %s", + table_name, ut_strerr(error)); return NULL; } diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index c72e71bf047..1db5ec04ff1 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -2181,11 +2181,9 @@ row_create_index_for_mysql( index = node->index; - ut_ad(!index == (err != DB_SUCCESS)); - que_graph_free((que_t*) que_node_get_parent(thr)); - if (index && (index->type & DICT_FTS)) { + if (err == DB_SUCCESS && (index->type & DICT_FTS)) { err = fts_create_index_tables(trx, index, table->id); } From f1d7e0c17e33f77278e6226dd94aeb30fc856bf0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 13 Feb 2025 12:18:03 +0200 Subject: [PATCH 5/7] MDEV-35436 dict_stats_fetch_from_ps() unnecessarily holds exclusive dict_sys.latch dict_stats_fetch_from_ps(): Acquire dict_sys.latch as few times as possible, and release dict_sys.latch after invoking pars_sql(), so that we will not be unnecessarily holding dict_sys.latch while possibly waiting for data to be read into the buffer pool. --- storage/innobase/dict/dict0stats.cc | 165 ++++++++++++++-------------- storage/innobase/pars/pars0pars.cc | 5 - 2 files changed, 85 insertions(+), 85 deletions(-) diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc index 0bd899ccd32..4d1e28b912a 100644 --- a/storage/innobase/dict/dict0stats.cc +++ b/storage/innobase/dict/dict0stats.cc @@ -3485,9 +3485,7 @@ dict_stats_fetch_from_ps( dict_table_t* table) /*!< in/out: table */ { index_fetch_t index_fetch_arg; - trx_t* trx; pars_info_t* pinfo; - dberr_t ret; char db_utf8[MAX_DB_UTF8_LEN]; char table_utf8[MAX_TABLE_UTF8_LEN]; @@ -3501,34 +3499,36 @@ dict_stats_fetch_from_ps( MDL_ticket *mdl_table = nullptr, *mdl_index = nullptr; dict_table_t* table_stats = dict_table_open_on_name( TABLE_STATS_NAME, false, DICT_ERR_IGNORE_NONE); - if (table_stats) { - dict_sys.freeze(SRW_LOCK_CALL); - table_stats = dict_acquire_mdl_shared(table_stats, thd, - &mdl_table); - dict_sys.unfreeze(); + if (!table_stats) { + return DB_STATS_DO_NOT_EXIST; } + dict_table_t* index_stats = dict_table_open_on_name( + INDEX_STATS_NAME, false, DICT_ERR_IGNORE_NONE); + if (!index_stats) { + dict_table_close(table_stats); + return DB_STATS_DO_NOT_EXIST; + } + + dict_sys.freeze(SRW_LOCK_CALL); + table_stats = dict_acquire_mdl_shared(table_stats, thd, + &mdl_table); if (!table_stats || strcmp(table_stats->name.m_name, TABLE_STATS_NAME)) { release_and_exit: if (table_stats) { - dict_table_close(table_stats, false, thd, mdl_table); + dict_table_close(table_stats, true, thd, mdl_table); } + if (index_stats) { + dict_table_close(index_stats, true, thd, mdl_index); + } + dict_sys.unfreeze(); return DB_STATS_DO_NOT_EXIST; } - dict_table_t* index_stats = dict_table_open_on_name( - INDEX_STATS_NAME, false, DICT_ERR_IGNORE_NONE); - if (index_stats) { - dict_sys.freeze(SRW_LOCK_CALL); - index_stats = dict_acquire_mdl_shared(index_stats, thd, - &mdl_index); - dict_sys.unfreeze(); - } - if (!index_stats) { - goto release_and_exit; - } - if (strcmp(index_stats->name.m_name, INDEX_STATS_NAME)) { - dict_table_close(index_stats, false, thd, mdl_index); + index_stats = dict_acquire_mdl_shared(index_stats, thd, + &mdl_index); + if (!index_stats + || strcmp(index_stats->name.m_name, INDEX_STATS_NAME)) { goto release_and_exit; } @@ -3536,10 +3536,6 @@ release_and_exit: DEBUG_SYNC(thd, "dict_stats_mdl_acquired"); #endif /* ENABLED_DEBUG_SYNC */ - trx = trx_create(); - - trx_start_internal_read_only(trx); - dict_fs2utf8(table->name.m_name, db_utf8, sizeof(db_utf8), table_utf8, sizeof(table_utf8)); @@ -3560,76 +3556,85 @@ release_and_exit: "fetch_index_stats_step", dict_stats_fetch_index_stats_step, &index_fetch_arg); - dict_sys.lock(SRW_LOCK_CALL); /* FIXME: remove this */ - ret = que_eval_sql(pinfo, - "PROCEDURE FETCH_STATS () IS\n" - "found INT;\n" - "DECLARE FUNCTION fetch_table_stats_step;\n" - "DECLARE FUNCTION fetch_index_stats_step;\n" - "DECLARE CURSOR table_stats_cur IS\n" - " SELECT\n" - /* if you change the selected fields, be - sure to adjust - dict_stats_fetch_table_stats_step() */ - " n_rows,\n" - " clustered_index_size,\n" - " sum_of_other_index_sizes\n" - " FROM \"" TABLE_STATS_NAME "\"\n" - " WHERE\n" - " database_name = :database_name AND\n" - " table_name = :table_name;\n" - "DECLARE CURSOR index_stats_cur IS\n" - " SELECT\n" - /* if you change the selected fields, be - sure to adjust - dict_stats_fetch_index_stats_step() */ - " index_name,\n" - " stat_name,\n" - " stat_value,\n" - " sample_size\n" - " FROM \"" INDEX_STATS_NAME "\"\n" - " WHERE\n" - " database_name = :database_name AND\n" - " table_name = :table_name;\n" + dict_sys.unfreeze(); + dict_sys.lock(SRW_LOCK_CALL); + que_t* graph = pars_sql( + pinfo, + "PROCEDURE FETCH_STATS () IS\n" + "found INT;\n" + "DECLARE FUNCTION fetch_table_stats_step;\n" + "DECLARE FUNCTION fetch_index_stats_step;\n" + "DECLARE CURSOR table_stats_cur IS\n" + " SELECT\n" + /* if you change the selected fields, be + sure to adjust + dict_stats_fetch_table_stats_step() */ + " n_rows,\n" + " clustered_index_size,\n" + " sum_of_other_index_sizes\n" + " FROM \"" TABLE_STATS_NAME "\"\n" + " WHERE\n" + " database_name = :database_name AND\n" + " table_name = :table_name;\n" + "DECLARE CURSOR index_stats_cur IS\n" + " SELECT\n" + /* if you change the selected fields, be + sure to adjust + dict_stats_fetch_index_stats_step() */ + " index_name,\n" + " stat_name,\n" + " stat_value,\n" + " sample_size\n" + " FROM \"" INDEX_STATS_NAME "\"\n" + " WHERE\n" + " database_name = :database_name AND\n" + " table_name = :table_name;\n" - "BEGIN\n" + "BEGIN\n" - "OPEN table_stats_cur;\n" - "FETCH table_stats_cur INTO\n" - " fetch_table_stats_step();\n" - "IF (SQL % NOTFOUND) THEN\n" - " CLOSE table_stats_cur;\n" - " RETURN;\n" - "END IF;\n" - "CLOSE table_stats_cur;\n" + "OPEN table_stats_cur;\n" + "FETCH table_stats_cur INTO\n" + " fetch_table_stats_step();\n" + "IF (SQL % NOTFOUND) THEN\n" + " CLOSE table_stats_cur;\n" + " RETURN;\n" + "END IF;\n" + "CLOSE table_stats_cur;\n" - "OPEN index_stats_cur;\n" - "found := 1;\n" - "WHILE found = 1 LOOP\n" - " FETCH index_stats_cur INTO\n" - " fetch_index_stats_step();\n" - " IF (SQL % NOTFOUND) THEN\n" - " found := 0;\n" - " END IF;\n" - "END LOOP;\n" - "CLOSE index_stats_cur;\n" + "OPEN index_stats_cur;\n" + "found := 1;\n" + "WHILE found = 1 LOOP\n" + " FETCH index_stats_cur INTO\n" + " fetch_index_stats_step();\n" + " IF (SQL % NOTFOUND) THEN\n" + " found := 0;\n" + " END IF;\n" + "END LOOP;\n" + "CLOSE index_stats_cur;\n" - "END;", trx); - /* pinfo is freed by que_eval_sql() */ + "END;"); dict_sys.unlock(); + trx_t* trx = trx_create(); + trx->graph = nullptr; + graph->trx = trx; + + trx_start_internal_read_only(trx); + que_run_threads(que_fork_start_command(graph)); + que_graph_free(graph); + dict_table_close(table_stats, false, thd, mdl_table); dict_table_close(index_stats, false, thd, mdl_index); trx_commit_for_mysql(trx); - + dberr_t ret = trx->error_state; trx->free(); if (!index_fetch_arg.stats_were_modified) { - return(DB_STATS_DO_NOT_EXIST); + return DB_STATS_DO_NOT_EXIST; } - return(ret); + return ret; } /*********************************************************************//** diff --git a/storage/innobase/pars/pars0pars.cc b/storage/innobase/pars/pars0pars.cc index 51bcc9540fc..fff60b0bd46 100644 --- a/storage/innobase/pars/pars0pars.cc +++ b/storage/innobase/pars/pars0pars.cc @@ -783,11 +783,6 @@ pars_retrieve_table_list_defs( { ulint count = 0; - if (sym_node == NULL) { - - return(count); - } - while (sym_node) { pars_retrieve_table_def(sym_node); From a20c8fabe703d951c26a35f649e1a0c303a27610 Mon Sep 17 00:00:00 2001 From: Kristian Nielsen Date: Wed, 19 Feb 2025 10:43:36 +0000 Subject: [PATCH 6/7] Fix sporadic failure of rpl.rpl_parallel_innodb_lock_conflict Make sure the table mysql.gtid_slave_pos is altered to InnoDB before starting parallel replication. The parallel replication of the suppression insertion in the test case was trying to update the GTID position in parallel with the ALTER TABLE, which could occasionally deadlock on the MDL lock. Reviewed-by: Monty Signed-off-by: Kristian Nielsen --- .../rpl_parallel_innodb_lock_conflict.result | 7 +++---- .../rpl/r/rpl_parallel_innodb_lock_conflict.result | 7 +++---- .../rpl/t/rpl_parallel_innodb_lock_conflict.test | 12 +++++------- 3 files changed, 11 insertions(+), 15 deletions(-) diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_innodb_lock_conflict.result b/mysql-test/suite/binlog_encryption/rpl_parallel_innodb_lock_conflict.result index 1411db16af6..a3d87641622 100644 --- a/mysql-test/suite/binlog_encryption/rpl_parallel_innodb_lock_conflict.result +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_innodb_lock_conflict.result @@ -1,16 +1,15 @@ ***MDEV-5914: Parallel replication deadlock due to InnoDB lock conflicts *** include/master-slave.inc [connection master] -connection server_2; -SET sql_log_bin=0; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CALL mtr.add_suppression("InnoDB: Transaction was aborted due to "); CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); -SET sql_log_bin=1; +connection server_2; SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; include/stop_slave.inc SET GLOBAL slave_parallel_threads=10; CHANGE MASTER TO master_use_gtid=slave_pos; connection server_1; -ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; CREATE TABLE t4 (a INT PRIMARY KEY, b INT, KEY b_idx(b)) ENGINE=InnoDB; INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); connect con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,; diff --git a/mysql-test/suite/rpl/r/rpl_parallel_innodb_lock_conflict.result b/mysql-test/suite/rpl/r/rpl_parallel_innodb_lock_conflict.result index 1411db16af6..a3d87641622 100644 --- a/mysql-test/suite/rpl/r/rpl_parallel_innodb_lock_conflict.result +++ b/mysql-test/suite/rpl/r/rpl_parallel_innodb_lock_conflict.result @@ -1,16 +1,15 @@ ***MDEV-5914: Parallel replication deadlock due to InnoDB lock conflicts *** include/master-slave.inc [connection master] -connection server_2; -SET sql_log_bin=0; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CALL mtr.add_suppression("InnoDB: Transaction was aborted due to "); CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); -SET sql_log_bin=1; +connection server_2; SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; include/stop_slave.inc SET GLOBAL slave_parallel_threads=10; CHANGE MASTER TO master_use_gtid=slave_pos; connection server_1; -ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; CREATE TABLE t4 (a INT PRIMARY KEY, b INT, KEY b_idx(b)) ENGINE=InnoDB; INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); connect con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,; diff --git a/mysql-test/suite/rpl/t/rpl_parallel_innodb_lock_conflict.test b/mysql-test/suite/rpl/t/rpl_parallel_innodb_lock_conflict.test index 532eb58571c..47fb8ff8c75 100644 --- a/mysql-test/suite/rpl/t/rpl_parallel_innodb_lock_conflict.test +++ b/mysql-test/suite/rpl/t/rpl_parallel_innodb_lock_conflict.test @@ -5,21 +5,19 @@ --source include/have_debug_sync.inc --source include/master-slave.inc ---disable_query_log -call mtr.add_suppression("InnoDB: Transaction was aborted due to "); ---enable_query_log +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CALL mtr.add_suppression("InnoDB: Transaction was aborted due to "); +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); +--save_master_pos --connection server_2 -SET sql_log_bin=0; -CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); -SET sql_log_bin=1; +--sync_with_master SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; --source include/stop_slave.inc SET GLOBAL slave_parallel_threads=10; CHANGE MASTER TO master_use_gtid=slave_pos; --connection server_1 -ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; CREATE TABLE t4 (a INT PRIMARY KEY, b INT, KEY b_idx(b)) ENGINE=InnoDB; INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); --connect (con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,) From bac2358c9dafd2df9d0e92293097615032ae958b Mon Sep 17 00:00:00 2001 From: Monty Date: Sun, 23 Feb 2025 16:59:04 +0200 Subject: [PATCH 7/7] Removed outdated code comment --- sql/sql_table.cc | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/sql/sql_table.cc b/sql/sql_table.cc index c20fb8d9bc4..16bb9696ac7 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1728,18 +1728,6 @@ err: if (non_temp_tables_count) query_cache_invalidate3(thd, tables, 0); - /* - We are always logging drop of temporary tables. - The reason is to handle the following case: - - Use statement based replication - - CREATE TEMPORARY TABLE foo (logged) - - set row based replication - - DROP TEMPORARY TABLE foo (needs to be logged) - This should be fixed so that we remember if creation of the - temporary table was logged and only log it if the creation was - logged. - */ - if (non_trans_tmp_table_deleted || trans_tmp_table_deleted || non_tmp_table_deleted) {