From 289cc26c646f5a45e2c9fc6c0bb75f6f61d301a1 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 2 Jul 2007 19:14:48 +0200 Subject: [PATCH 01/19] Bug#21074 Large query_cache freezes mysql server sporadically under heavy load Invaldating a subset of a sufficiently large query cache can take a long time. During this time the server is efficiently frozen and no other operation can be executed. This patch addresses this problem by moving the locks which cause the freezing and also by temporarily disable the query cache while the invalidation takes place. sql/ha_ndbcluster.cc: - mysql_rm_table_part2 has a new parameter to indicate if OPEN_lock mutex protection is needed. sql/lock.cc: - Added function for acquiring table name exclusive locks. - Added function for asserting that table name lock is acquired. sql/mysql_priv.h: - Added function for acquiring table name exclusive locks. - Added function for asserting that table name lock is acquired. - Added parameter to mysql_rm_table_part2 to indicate whether OPEN_lock mutex protection is needed or not. sql/sql_cache.cc: - Changed flush_in_progress-flag into a state and added a function, is_flushing() to reflect on this state. A new state was needed to indicate that a partial invalidation was in progress. - An unused parameter 'under_guard' was removed. - The Query_cache mutex structural_guard was pushed down into one invalidate_table function to avoid multiple entry points which makes maintainens more difficult. - Instead of keeping the structural_guard mutex during the entire invalidation we set the query cache status state to TABLE_FLUSH_IN_PROGRESS to temporarily disable the cache and avoid locking other threads needing the Query_cache resource. sql/sql_cache.h: - Changed flush_in_progress-flag into a state and added a function, is_flushing() to reflect on this state. A new state was needed to indicate that a partial invalidation was in progress. - An unused parameter 'under_guard' was removed. - The Query_cache mutex structural_guard was pushed down into one invalidate_table function to avoid multiple entry points which makes maintainens more difficult. - Instead of keeping the structural_guard mutex during the entire invalidation we set the query cache status state to TABLE_FLUSH_IN_PROGRESS to temporarily disable the cache and avoid locking other threads needing the the Query_cache resource. sql/sql_db.cc: - mysql_rm_table_part2_with_lock is redundant and replaced with mysql_rm_table_part2. sql/sql_parse.cc: - Function query_cache_invalidate3 isn't protect by a lock and we have a race condition. - Moving this function into mysql_rename_tables and make sure it is protected by a exclusive table name lock. sql/sql_rename.cc: - Function query_cache_invalidation3 isn't protect by a lock and we have a race condition. - Moving this function into mysql_rename_tables and make sure it is protected by a exclusive table name lock. - Instead of using LOCK_open mutex, which excludes all other threads, the lock is changed into exclusive table name locks instead. This prevents us from locking the server if a query cache invalidation would take a long time to complete. sql/sql_table.cc: - Instead of using LOCK_open mutex, which excludes all other threads, the lock is changed into exclusive table name locks instead. This prevents us from locking the server if a query cache invalidation would take a long time to complete. - Added new parameter to mysql_rm_table_part2 to control whether OPEN_lock mutex needs to be aquired or not. This is currently needed by the NDB implemenation. sql/sql_trigger.cc: - Table_triggers don't need to be protexted by LOCK_open mutex. This patch cancel this restriction. - Refactored comments to doxygen style. --- sql/ha_ndbcluster.cc | 13 +- sql/lock.cc | 96 ++++++ sql/mysql_priv.h | 11 +- sql/sql_cache.cc | 715 +++++++++++++++++++++++++++---------------- sql/sql_cache.h | 72 ++++- sql/sql_db.cc | 2 +- sql/sql_parse.cc | 2 +- sql/sql_rename.cc | 16 +- sql/sql_table.cc | 82 ++--- sql/sql_trigger.cc | 40 ++- 10 files changed, 677 insertions(+), 372 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index aeaea90feb6..8e49ebcbe51 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -6996,7 +6996,6 @@ int ndbcluster_find_files(handlerton *hton, THD *thd, // Lock mutex before deleting and creating frm files pthread_mutex_lock(&LOCK_open); - if (!global_read_lock) { // Delete old files @@ -7010,10 +7009,12 @@ int ndbcluster_find_files(handlerton *hton, THD *thd, table_list.db= (char*) db; table_list.alias= table_list.table_name= (char*)file_name; (void)mysql_rm_table_part2(thd, &table_list, - /* if_exists */ FALSE, - /* drop_temporary */ FALSE, - /* drop_view */ FALSE, - /* dont_log_query*/ TRUE); + FALSE, /* if_exists */ + FALSE, /* drop_temporary */ + FALSE, /* drop_view */ + TRUE, /* dont_log_query*/ + FALSE); /* need lock open */ + /* Clear error message that is returned when table is deleted */ thd->clear_error(); } @@ -7029,7 +7030,7 @@ int ndbcluster_find_files(handlerton *hton, THD *thd, } pthread_mutex_unlock(&LOCK_open); - + hash_free(&ok_tables); hash_free(&ndb_tables); diff --git a/sql/lock.cc b/sql/lock.cc index 6f1dd0669ee..6c1196e29c0 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -1027,6 +1027,102 @@ end: } +/** + @brief Lock all tables in list with an exclusive table name lock. + + @param thd Thread handle. + @param table_list Names of tables to lock. + + @note This function needs to be protected by LOCK_open. If we're + under LOCK TABLES, this function does not work as advertised. Namely, + it does not exclude other threads from using this table and does not + put an exclusive name lock on this table into the table cache. + + @see lock_table_names + @see unlock_table_names + + @retval TRUE An error occured. + @retval FALSE Name lock successfully acquired. +*/ + +bool lock_table_names_exclusively(THD *thd, TABLE_LIST *table_list) +{ + if (lock_table_names(thd, table_list)) + return TRUE; + + /* + Upgrade the table name locks from semi-exclusive to exclusive locks. + */ + for (TABLE_LIST *table= table_list; table; table= table->next_global) + { + if (table->table) + table->table->open_placeholder= 1; + } + return FALSE; +} + + +/** + @brief Test is 'table' is protected by an exclusive name lock. + + @param[in] thd The current thread handler + @param[in] table Table container containing the single table to be tested + + @note Needs to be protected by LOCK_open mutex. + + @return Error status code + @retval TRUE Table is protected + @retval FALSE Table is not protected +*/ + +bool +is_table_name_exclusively_locked_by_this_thread(THD *thd, + TABLE_LIST *table_list) +{ + char key[MAX_DBKEY_LENGTH]; + uint key_length; + + key_length= create_table_def_key(thd, key, table_list, 0); + + return is_table_name_exclusively_locked_by_this_thread(thd, (uchar *)key, + key_length); +} + + +/** + @brief Test is 'table key' is protected by an exclusive name lock. + + @param[in] thd The current thread handler. + @param[in] table Table container containing the single table to be tested. + + @note Needs to be protected by LOCK_open mutex + + @retval TRUE Table is protected + @retval FALSE Table is not protected + */ + +bool +is_table_name_exclusively_locked_by_this_thread(THD *thd, uchar *key, + int key_length) +{ + HASH_SEARCH_STATE state; + TABLE *table; + + for (table= (TABLE*) hash_first(&open_cache, key, + key_length, &state); + table ; + table= (TABLE*) hash_next(&open_cache, key, + key_length, &state)) + { + if (table->in_use == thd && + table->open_placeholder == 1 && + table->s->version == 0) + return TRUE; + } + + return FALSE; +} + /* Unlock all tables in list with a name lock diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index f5d66696cab..3e6bdeb2775 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -820,10 +820,8 @@ void mysql_client_binlog_statement(THD *thd); bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, my_bool drop_temporary); int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, - bool drop_temporary, bool drop_view, bool log_query); -int mysql_rm_table_part2_with_lock(THD *thd, TABLE_LIST *tables, - bool if_exists, bool drop_temporary, - bool log_query); + bool drop_temporary, bool drop_view, bool log_query, + bool need_lock_open); bool quick_rm_table(handlerton *base,const char *db, const char *table_name, uint flags); void close_cached_table(THD *thd, TABLE *table); @@ -1799,6 +1797,11 @@ bool wait_for_locked_table_names(THD *thd, TABLE_LIST *table_list); bool lock_table_names(THD *thd, TABLE_LIST *table_list); void unlock_table_names(THD *thd, TABLE_LIST *table_list, TABLE_LIST *last_table); +bool lock_table_names_exclusively(THD *thd, TABLE_LIST *table_list); +bool is_table_name_exclusively_locked_by_this_thread(THD *thd, + TABLE_LIST *table_list); +bool is_table_name_exclusively_locked_by_this_thread(THD *thd, uchar *key, + int key_length); /* old unireg functions */ diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 04f142c5dfb..bcf3c2b550c 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -268,6 +268,39 @@ are stored in one block. If join_results allocated new block(s) then we need call pack_cache again. +7. Interface +The query cache interfaces with the rest of the server code through 7 +functions: + 1. Query_cache::send_result_to_client + - Called before parsing and used to match a statement with the stored + queries hash. + If a match is found the cached result set is sent through repeated + calls to net_real_write. (note: calling thread doesn't have a regis- + tered result set writer: thd->net.query_cache_query=0) + 2. Query_cache::store_query + - Called just before handle_select() and is used to register a result + set writer to the statement currently being processed + (thd->net.query_cache_query). + 3. query_cache_insert + - Called from net_real_write to append a result set to a cached query + if (and only if) this query has a registered result set writer + (thd->net.query_cache_query). + 4. Query_cache::invalidate + - Called from various places to invalidate query cache based on data- + base, table and myisam file name. During an on going invalidation + the query cache is temporarily disabled. + 5. Query_cache::flush + - Used when a RESET QUERY CACHE is issued. This clears the entire + cache block by block. + 6. Query_cache::resize + - Used to change the available memory used by the query cache. This + will also invalidate the entrie query cache in one free operation. + 7. Query_cache::pack + - Used when a FLUSH QUERY CACHE is issued. This changes the order of + the used memory blocks in physical memory order and move all avail- + able memory to the 'bottom' of the memory. + + TODO list: - Delayed till after-parsing qache answer (for column rights processing) @@ -615,49 +648,55 @@ void query_cache_insert(NET *net, const char *packet, ulong length) DBUG_VOID_RETURN; STRUCT_LOCK(&query_cache.structure_guard_mutex); - - if (unlikely(query_cache.query_cache_size == 0 || - query_cache.flush_in_progress)) + bool interrupt; + query_cache.wait_while_table_flush_is_in_progress(&interrupt); + if (interrupt) { + STRUCT_UNLOCK(&query_cache.structure_guard_mutex); + return; + } + + Query_cache_block *query_block= (Query_cache_block*)net->query_cache_query; + if (!query_block) + { + /* + We lost the writer and the currently processed query has been + invalidated; there is nothing left to do. + */ STRUCT_UNLOCK(&query_cache.structure_guard_mutex); DBUG_VOID_RETURN; } - Query_cache_block *query_block = ((Query_cache_block*) - net->query_cache_query); - if (query_block) + Query_cache_query *header= query_block->query(); + Query_cache_block *result= header->result(); + + DUMP(&query_cache); + BLOCK_LOCK_WR(query_block); + DBUG_PRINT("qcache", ("insert packet %lu bytes long",length)); + + /* + On success, STRUCT_UNLOCK is done by append_result_data. Otherwise, we + still need structure_guard_mutex to free the query, and therefore unlock + it later in this function. + */ + if (!query_cache.append_result_data(&result, length, (uchar*) packet, + query_block)) { - Query_cache_query *header = query_block->query(); - Query_cache_block *result = header->result(); - - DUMP(&query_cache); - BLOCK_LOCK_WR(query_block); - DBUG_PRINT("qcache", ("insert packet %lu bytes long",length)); - - /* - On success STRUCT_UNLOCK(&query_cache.structure_guard_mutex) will be - done by query_cache.append_result_data if success (if not we need - query_cache.structure_guard_mutex locked to free query) - */ - if (!query_cache.append_result_data(&result, length, (uchar*) packet, - query_block)) - { - DBUG_PRINT("warning", ("Can't append data")); - header->result(result); - DBUG_PRINT("qcache", ("free query 0x%lx", (ulong) query_block)); - // The following call will remove the lock on query_block - query_cache.free_query(query_block); - // append_result_data no success => we need unlock - STRUCT_UNLOCK(&query_cache.structure_guard_mutex); - DBUG_VOID_RETURN; - } + DBUG_PRINT("warning", ("Can't append data")); header->result(result); - header->last_pkt_nr= net->pkt_nr; - BLOCK_UNLOCK_WR(query_block); - DBUG_EXECUTE("check_querycache",query_cache.check_integrity(0);); - } - else + DBUG_PRINT("qcache", ("free query 0x%lx", (ulong) query_block)); + // The following call will remove the lock on query_block + query_cache.free_query(query_block); + // append_result_data no success => we need unlock STRUCT_UNLOCK(&query_cache.structure_guard_mutex); + DBUG_VOID_RETURN; + } + + header->result(result); + header->last_pkt_nr= net->pkt_nr; + BLOCK_UNLOCK_WR(query_block); + DBUG_EXECUTE("check_querycache",query_cache.check_integrity(0);); + DBUG_VOID_RETURN; } @@ -671,17 +710,21 @@ void query_cache_abort(NET *net) DBUG_VOID_RETURN; STRUCT_LOCK(&query_cache.structure_guard_mutex); - - if (unlikely(query_cache.query_cache_size == 0 || - query_cache.flush_in_progress)) + bool interrupt; + query_cache.wait_while_table_flush_is_in_progress(&interrupt); + if (interrupt) { STRUCT_UNLOCK(&query_cache.structure_guard_mutex); DBUG_VOID_RETURN; } + /* + While we were waiting another thread might have changed the status + of the writer. Make sure the writer still exists before continue. + */ Query_cache_block *query_block= ((Query_cache_block*) net->query_cache_query); - if (query_block) // Test if changed by other thread + if (query_block) { DUMP(&query_cache); BLOCK_LOCK_WR(query_block); @@ -713,13 +756,22 @@ void query_cache_end_of_result(THD *thd) STRUCT_LOCK(&query_cache.structure_guard_mutex); - if (unlikely(query_cache.query_cache_size == 0 || - query_cache.flush_in_progress)) - goto end; + bool interrupt; + query_cache.wait_while_table_flush_is_in_progress(&interrupt); + if (interrupt) + { + STRUCT_UNLOCK(&query_cache.structure_guard_mutex); + DBUG_VOID_RETURN; + } query_block= ((Query_cache_block*) thd->net.query_cache_query); if (query_block) { + /* + The writer is still present; finish last result block by chopping it to + suitable size if needed and setting block type. Since this is the last + block, the writer should be dropped. + */ DUMP(&query_cache); BLOCK_LOCK_WR(query_block); Query_cache_query *header= query_block->query(); @@ -746,8 +798,11 @@ void query_cache_end_of_result(THD *thd) #endif header->found_rows(current_thd->limit_found_rows); header->result()->type= Query_cache_block::RESULT; + + /* Drop the writer. */ header->writer(0); thd->net.query_cache_query= 0; + BLOCK_UNLOCK_WR(query_block); DBUG_EXECUTE("check_querycache",query_cache.check_integrity(1);); @@ -801,9 +856,9 @@ ulong Query_cache::resize(ulong query_cache_size_arg) DBUG_ASSERT(initialized); STRUCT_LOCK(&structure_guard_mutex); - while (flush_in_progress) - pthread_cond_wait(&COND_flush_finished, &structure_guard_mutex); - flush_in_progress= TRUE; + while (is_flushing()) + pthread_cond_wait(&COND_cache_status_changed, &structure_guard_mutex); + m_cache_status= Query_cache::FLUSH_IN_PROGRESS; STRUCT_UNLOCK(&structure_guard_mutex); free_cache(); @@ -814,8 +869,8 @@ ulong Query_cache::resize(ulong query_cache_size_arg) DBUG_EXECUTE("check_querycache",check_integrity(0);); STRUCT_LOCK(&structure_guard_mutex); - flush_in_progress= FALSE; - pthread_cond_signal(&COND_flush_finished); + m_cache_status= Query_cache::NO_FLUSH_IN_PROGRESS; + pthread_cond_signal(&COND_cache_status_changed); STRUCT_UNLOCK(&structure_guard_mutex); DBUG_RETURN(new_query_cache_size); @@ -910,8 +965,13 @@ def_week_frmt: %lu", ha_release_temporary_latches(thd); STRUCT_LOCK(&structure_guard_mutex); - if (query_cache_size == 0 || flush_in_progress) + if (query_cache_size == 0 || is_flushing()) { + /* + A table- or a full flush operation can potentially take a long time to + finish. We choose not to wait for them and skip caching statements + instead. + */ STRUCT_UNLOCK(&structure_guard_mutex); DBUG_VOID_RETURN; } @@ -954,7 +1014,7 @@ def_week_frmt: %lu", Query_cache_block *query_block; query_block= write_block_data(tot_length, (uchar*) thd->query, ALIGN_SIZE(sizeof(Query_cache_query)), - Query_cache_block::QUERY, local_tables, 1); + Query_cache_block::QUERY, local_tables); if (query_block != 0) { DBUG_PRINT("qcache", ("query block 0x%lx allocated, %lu", @@ -1088,13 +1148,21 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) } STRUCT_LOCK(&structure_guard_mutex); - if (query_cache_size == 0 || flush_in_progress) + + if (query_cache_size == 0) + goto err_unlock; + + if (is_flushing()) { - DBUG_PRINT("qcache", ("query cache disabled")); + /* Return; Query cache is temporarily disabled while we flush. */ + DBUG_PRINT("qcache",("query cache disabled")); goto err_unlock; } - /* Check that we haven't forgot to reset the query cache variables */ + /* + Check that we haven't forgot to reset the query cache variables; + make sure there are no attached query cache writer to this thread. + */ DBUG_ASSERT(thd->net.query_cache_query == 0); Query_cache_block *query_block; @@ -1267,7 +1335,7 @@ def_week_frmt: %lu", ("Handler require invalidation queries of %s.%s %lu-%lu", table_list.db, table_list.alias, (ulong) engine_data, (ulong) table->engine_data())); - invalidate_table((uchar *) table->db(), table->key_length()); + invalidate_table(thd, (uchar *) table->db(), table->key_length()); } else thd->lex->safe_to_cache_query= 0; // Don't try to cache this @@ -1330,32 +1398,26 @@ void Query_cache::invalidate(THD *thd, TABLE_LIST *tables_used, my_bool using_transactions) { DBUG_ENTER("Query_cache::invalidate (table list)"); - STRUCT_LOCK(&structure_guard_mutex); - if (query_cache_size > 0 && !flush_in_progress) - { - DUMP(this); - using_transactions= using_transactions && - (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); - for (; tables_used; tables_used= tables_used->next_local) - { - DBUG_ASSERT(!using_transactions || tables_used->table!=0); - if (tables_used->derived) - continue; - if (using_transactions && - (tables_used->table->file->table_cache_type() == - HA_CACHE_TBL_TRANSACT)) - /* - Tables_used->table can't be 0 in transaction. - Only 'drop' invalidate not opened table, but 'drop' - force transaction finish. - */ - thd->add_changed_table(tables_used->table); - else - invalidate_table(tables_used); - } + using_transactions= using_transactions && + (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); + for (; tables_used; tables_used= tables_used->next_local) + { + DBUG_ASSERT(!using_transactions || tables_used->table!=0); + if (tables_used->derived) + continue; + if (using_transactions && + (tables_used->table->file->table_cache_type() == + HA_CACHE_TBL_TRANSACT)) + /* + tables_used->table can't be 0 in transaction. + Only 'drop' invalidate not opened table, but 'drop' + force transaction finish. + */ + thd->add_changed_table(tables_used->table); + else + invalidate_table(thd, tables_used); } - STRUCT_UNLOCK(&structure_guard_mutex); DBUG_VOID_RETURN; } @@ -1363,21 +1425,13 @@ void Query_cache::invalidate(THD *thd, TABLE_LIST *tables_used, void Query_cache::invalidate(CHANGED_TABLE_LIST *tables_used) { DBUG_ENTER("Query_cache::invalidate (changed table list)"); - if (tables_used) + THD *thd= current_thd; + for (; tables_used; tables_used= tables_used->next) { - STRUCT_LOCK(&structure_guard_mutex); - if (query_cache_size > 0 && !flush_in_progress) - { - DUMP(this); - for (; tables_used; tables_used= tables_used->next) - { - invalidate_table((uchar*) tables_used->key, tables_used->key_length); - DBUG_PRINT("qcache", ("db: %s table: %s", tables_used->key, - tables_used->key+ - strlen(tables_used->key)+1)); - } - } - STRUCT_UNLOCK(&structure_guard_mutex); + invalidate_table(thd, (uchar*) tables_used->key, tables_used->key_length); + DBUG_PRINT("qcache", ("db: %s table: %s", tables_used->key, + tables_used->key+ + strlen(tables_used->key)+1)); } DBUG_VOID_RETURN; } @@ -1396,20 +1450,14 @@ void Query_cache::invalidate(CHANGED_TABLE_LIST *tables_used) void Query_cache::invalidate_locked_for_write(TABLE_LIST *tables_used) { DBUG_ENTER("Query_cache::invalidate_locked_for_write"); - if (tables_used) + for (; tables_used; tables_used= tables_used->next_local) { - STRUCT_LOCK(&structure_guard_mutex); - if (query_cache_size > 0 && !flush_in_progress) + if (tables_used->lock_type & (TL_WRITE_LOW_PRIORITY | TL_WRITE) && + tables_used->table) { - DUMP(this); - for (; tables_used; tables_used= tables_used->next_local) - { - if (tables_used->lock_type & (TL_WRITE_LOW_PRIORITY | TL_WRITE) && - tables_used->table) - invalidate_table(tables_used->table); - } + THD *thd= current_thd; + invalidate_table(thd, tables_used->table); } - STRUCT_UNLOCK(&structure_guard_mutex); } DBUG_VOID_RETURN; } @@ -1423,18 +1471,14 @@ void Query_cache::invalidate(THD *thd, TABLE *table, { DBUG_ENTER("Query_cache::invalidate (table)"); - STRUCT_LOCK(&structure_guard_mutex); - if (query_cache_size > 0 && !flush_in_progress) - { - using_transactions= using_transactions && - (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); - if (using_transactions && - (table->file->table_cache_type() == HA_CACHE_TBL_TRANSACT)) - thd->add_changed_table(table); - else - invalidate_table(table); - } - STRUCT_UNLOCK(&structure_guard_mutex); + using_transactions= using_transactions && + (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); + if (using_transactions && + (table->file->table_cache_type() == HA_CACHE_TBL_TRANSACT)) + thd->add_changed_table(table); + else + invalidate_table(thd, table); + DBUG_VOID_RETURN; } @@ -1443,31 +1487,77 @@ void Query_cache::invalidate(THD *thd, const char *key, uint32 key_length, my_bool using_transactions) { DBUG_ENTER("Query_cache::invalidate (key)"); - - STRUCT_LOCK(&structure_guard_mutex); - if (query_cache_size > 0 && !flush_in_progress) - { - using_transactions= using_transactions && - (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); - if (using_transactions) // used for innodb => has_transactions() is TRUE - thd->add_changed_table(key, key_length); - else - invalidate_table((uchar*)key, key_length); - } - STRUCT_UNLOCK(&structure_guard_mutex); + + using_transactions= using_transactions && + (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); + if (using_transactions) // used for innodb => has_transactions() is TRUE + thd->add_changed_table(key, key_length); + else + invalidate_table(thd, (uchar*)key, key_length); DBUG_VOID_RETURN; } + +/** + @brief Synchronize the thread with any flushing operations. + + This helper function is called whenever a thread needs to operate on the + query cache structure (example: during invalidation). If a table flush is in + progress this function will wait for it to stop. If a full flush is in + progress, the function will set the interrupt parameter to indicate that the + current operation is redundant and should be interrupted. + + @param[out] interrupt This out-parameter will be set to TRUE if the calling + function is redundant and should be interrupted. + + @return If the interrupt-parameter is TRUE then m_cache_status is set to + NO_FLUSH_IN_PROGRESS. If the interrupt-parameter is FALSE then + m_cache_status is set to FLUSH_IN_PROGRESS. + The structure_guard_mutex will in any case be locked. +*/ + +void Query_cache::wait_while_table_flush_is_in_progress(bool *interrupt) +{ + while (is_flushing()) + { + /* + If there already is a full flush in progress query cache isn't enabled + and additional flushes are redundant; just return instead. + */ + if (m_cache_status == Query_cache::FLUSH_IN_PROGRESS) + { + *interrupt= TRUE; + return; + } + /* + If a table flush is in progress; wait on cache status to change. + */ + if (m_cache_status == Query_cache::TABLE_FLUSH_IN_PROGRESS) + pthread_cond_wait(&COND_cache_status_changed, &structure_guard_mutex); + } + *interrupt= FALSE; +} + /* Remove all cached queries that uses the given database */ - void Query_cache::invalidate(char *db) { DBUG_ENTER("Query_cache::invalidate (db)"); + STRUCT_LOCK(&structure_guard_mutex); - if (query_cache_size > 0 && !flush_in_progress) + bool interrupt; + wait_while_table_flush_is_in_progress(&interrupt); + if (interrupt) + { + STRUCT_UNLOCK(&structure_guard_mutex); + return; + } + + THD *thd= current_thd; + + if (query_cache_size > 0) { DUMP(this); restart_search: @@ -1479,7 +1569,10 @@ void Query_cache::invalidate(char *db) { next= curr->next; if (strcmp(db, (char*)(curr->table()->db())) == 0) - invalidate_table(curr); + { + Query_cache_block_table *list_root= curr->table(0); + invalidate_query_block_list(thd,list_root); + } /* invalidate_table can freed block on which point 'next' (if table of this block used only in queries which was deleted @@ -1491,6 +1584,11 @@ void Query_cache::invalidate(char *db) if (next->type == Query_cache_block::FREE) goto restart_search; curr= next; + /* + The loop will end if the circular list pointer has reached the + point where it started from, or if the current thread was signaled + to die. + */ } while (curr != tables_blocks); } } @@ -1504,21 +1602,12 @@ void Query_cache::invalidate_by_MyISAM_filename(const char *filename) { DBUG_ENTER("Query_cache::invalidate_by_MyISAM_filename"); - STRUCT_LOCK(&structure_guard_mutex); - if (query_cache_size > 0 && !flush_in_progress) - { - /* Calculate the key outside the lock to make the lock shorter */ - char key[MAX_DBKEY_LENGTH]; - uint32 db_length; - uint key_length= filename_2_table_key(key, filename, &db_length); - Query_cache_block *table_block; - if ((table_block = (Query_cache_block*) hash_search(&tables, - (uchar*) key, - key_length))) - invalidate_table(table_block); - } - STRUCT_UNLOCK(&structure_guard_mutex); - + /* Calculate the key outside the lock to make the lock shorter */ + char key[MAX_DBKEY_LENGTH]; + uint32 db_length; + uint key_length= filename_2_table_key(key, filename, &db_length); + THD *thd= current_thd; + invalidate_table(thd,(uchar *)key, key_length); DBUG_VOID_RETURN; } @@ -1540,16 +1629,43 @@ void Query_cache::flush() DBUG_VOID_RETURN; } - /* Join result in cache in 1 block (if result length > join_limit) */ + +/** + @brief Rearrange the memory blocks and join result in cache in 1 block (if + result length > join_limit) + + @param[in] join_limit If the minimum length of a result block to be joined. + @param[in] iteration_limit The maximum number of packing and joining + sequences. + +*/ void Query_cache::pack(ulong join_limit, uint iteration_limit) { DBUG_ENTER("Query_cache::pack"); + + bool interrupt; + STRUCT_LOCK(&structure_guard_mutex); + wait_while_table_flush_is_in_progress(&interrupt); + if (interrupt) + { + STRUCT_UNLOCK(&structure_guard_mutex); + DBUG_VOID_RETURN; + } + + if (query_cache_size == 0) + { + STRUCT_UNLOCK(&structure_guard_mutex); + DBUG_VOID_RETURN; + } + uint i = 0; do { pack_cache(); } while ((++i < iteration_limit) && join_results(join_limit)); + + STRUCT_UNLOCK(&structure_guard_mutex); DBUG_VOID_RETURN; } @@ -1568,7 +1684,7 @@ void Query_cache::destroy() free_cache(); STRUCT_UNLOCK(&structure_guard_mutex); - pthread_cond_destroy(&COND_flush_finished); + pthread_cond_destroy(&COND_cache_status_changed); pthread_mutex_destroy(&structure_guard_mutex); initialized = 0; } @@ -1584,8 +1700,8 @@ void Query_cache::init() { DBUG_ENTER("Query_cache::init"); pthread_mutex_init(&structure_guard_mutex,MY_MUTEX_INIT_FAST); - pthread_cond_init(&COND_flush_finished, NULL); - flush_in_progress= FALSE; + pthread_cond_init(&COND_cache_status_changed, NULL); + m_cache_status= Query_cache::NO_FLUSH_IN_PROGRESS; initialized = 1; DBUG_VOID_RETURN; } @@ -1787,9 +1903,10 @@ void Query_cache::make_disabled() /** @class Query_cache @brief Free all resources allocated by the cache. - @details This function frees all resources allocated by the cache. You - have to call init_cache() before using the cache again. This function requires - the structure_guard_mutex to be locked. + + This function frees all resources allocated by the cache. You + have to call init_cache() before using the cache again. This function + requires the structure_guard_mutex to be locked. */ void Query_cache::free_cache() @@ -1808,24 +1925,17 @@ void Query_cache::free_cache() *****************************************************************************/ -/* - flush_cache() - flush the cache. +/** + @brief Flush the cache. - SYNOPSIS - flush_cache() - - DESCRIPTION - This function will flush cache contents. It assumes we have - 'structure_guard_mutex' locked. The function sets the - flush_in_progress flag and releases the lock, so other threads may - proceed skipping the cache as if it is disabled. Concurrent - flushes are performed in turn. - - After flush_cache() call, the cache is flushed, all the freed - memory is accumulated in bin[0], and the 'structure_guard_mutex' - is locked. However, since we could release the mutex during - execution, the rest of the cache state could have been changed, - and should not be relied on. + This function will flush cache contents. It assumes we have + 'structure_guard_mutex' locked. The function sets the m_cache_status flag and + releases the lock, so other threads may proceed skipping the cache as if it + is disabled. Concurrent flushes are performed in turn. + After flush_cache() call, the cache is flushed, all the freed memory is + accumulated in bin[0], and the 'structure_guard_mutex' is locked. However, + since we could release the mutex during execution, the rest of the cache + state could have been changed, and should not be relied on. */ void Query_cache::flush_cache() @@ -1837,15 +1947,15 @@ void Query_cache::flush_cache() Query_cache::free_cache()) depends on the fact that after the flush the cache is empty. */ - while (flush_in_progress) - pthread_cond_wait(&COND_flush_finished, &structure_guard_mutex); + while (is_flushing()) + pthread_cond_wait(&COND_cache_status_changed, &structure_guard_mutex); /* - Setting 'flush_in_progress' will prevent other threads from using + Setting 'FLUSH_IN_PROGRESS' will prevent other threads from using the cache while we are in the middle of the flush, and we release the lock so that other threads won't block. */ - flush_in_progress= TRUE; + m_cache_status= Query_cache::FLUSH_IN_PROGRESS; STRUCT_UNLOCK(&structure_guard_mutex); my_hash_reset(&queries); @@ -1856,8 +1966,8 @@ void Query_cache::flush_cache() } STRUCT_LOCK(&structure_guard_mutex); - flush_in_progress= FALSE; - pthread_cond_signal(&COND_flush_finished); + m_cache_status= Query_cache::NO_FLUSH_IN_PROGRESS; + pthread_cond_signal(&COND_cache_status_changed); } /* @@ -1875,7 +1985,7 @@ my_bool Query_cache::free_old_query() sequence is breached. Also we don't need remove locked queries at this point. */ - Query_cache_block *query_block = 0; + Query_cache_block *query_block= 0; if (queries_blocks != 0) { Query_cache_block *block = queries_blocks; @@ -2013,8 +2123,7 @@ Query_cache_block * Query_cache::write_block_data(ulong data_len, uchar* data, ulong header_len, Query_cache_block::block_type type, - TABLE_COUNTER_TYPE ntab, - my_bool under_guard) + TABLE_COUNTER_TYPE ntab) { ulong all_headers_len = (ALIGN_SIZE(sizeof(Query_cache_block)) + ALIGN_SIZE(ntab*sizeof(Query_cache_block_table)) + @@ -2024,9 +2133,8 @@ Query_cache::write_block_data(ulong data_len, uchar* data, DBUG_ENTER("Query_cache::write_block_data"); DBUG_PRINT("qcache", ("data: %ld, header: %ld, all header: %ld", data_len, header_len, all_headers_len)); - Query_cache_block *block = allocate_block(max(align_len, - min_allocation_unit), - 1, 0, under_guard); + Query_cache_block *block= allocate_block(max(align_len, + min_allocation_unit),1, 0); if (block != 0) { block->type = type; @@ -2243,8 +2351,7 @@ my_bool Query_cache::allocate_data_chain(Query_cache_block **result_block, if (!(new_block= allocate_block(max(min_size, align_len), min_result_data_size == 0, - all_headers_len + min_result_data_size, - 1))) + all_headers_len + min_result_data_size))) { DBUG_PRINT("warning", ("Can't allocate block for results")); DBUG_RETURN(FALSE); @@ -2286,51 +2393,94 @@ my_bool Query_cache::allocate_data_chain(Query_cache_block **result_block, Invalidate the first table in the table_list */ -void Query_cache::invalidate_table(TABLE_LIST *table_list) +void Query_cache::invalidate_table(THD *thd, TABLE_LIST *table_list) { if (table_list->table != 0) - invalidate_table(table_list->table); // Table is open + invalidate_table(thd, table_list->table); // Table is open else { char key[MAX_DBKEY_LENGTH]; uint key_length; - Query_cache_block *table_block; + key_length=(uint) (strmov(strmov(key,table_list->db)+1, table_list->table_name) -key)+ 1; // We don't store temporary tables => no key_length+=4 ... - if ((table_block = (Query_cache_block*) - hash_search(&tables,(uchar*) key,key_length))) - invalidate_table(table_block); + invalidate_table(thd, (uchar *)key, key_length); } } -void Query_cache::invalidate_table(TABLE *table) +void Query_cache::invalidate_table(THD *thd, TABLE *table) { - invalidate_table((uchar*) table->s->table_cache_key.str, + invalidate_table(thd, (uchar*) table->s->table_cache_key.str, table->s->table_cache_key.length); } -void Query_cache::invalidate_table(uchar * key, uint32 key_length) +void Query_cache::invalidate_table(THD *thd, uchar * key, uint32 key_length) { - Query_cache_block *table_block; - if ((table_block = ((Query_cache_block*) - hash_search(&tables, key, key_length)))) - invalidate_table(table_block); + bool interrupt; + STRUCT_LOCK(&structure_guard_mutex); + wait_while_table_flush_is_in_progress(&interrupt); + if (interrupt) + { + STRUCT_UNLOCK(&structure_guard_mutex); + return; + } + + /* + Setting 'TABLE_FLUSH_IN_PROGRESS' will temporarily disable the cache + so that structural changes to cache won't block the entire server. + However, threads requesting to change the query cache will still have + to wait for the flush to finish. + */ + m_cache_status= Query_cache::TABLE_FLUSH_IN_PROGRESS; + STRUCT_UNLOCK(&structure_guard_mutex); + + Query_cache_block *table_block= + (Query_cache_block*)hash_search(&tables, key, key_length); + if (query_cache_size > 0 && table_block) + { + Query_cache_block_table *list_root= table_block->table(0); + invalidate_query_block_list(thd, list_root); + } + + STRUCT_LOCK(&structure_guard_mutex); + m_cache_status= Query_cache::NO_FLUSH_IN_PROGRESS; + + /* + net_real_write might be waiting on a change on the m_cache_status + variable. + */ + pthread_cond_signal(&COND_cache_status_changed); + STRUCT_UNLOCK(&structure_guard_mutex); } -void Query_cache::invalidate_table(Query_cache_block *table_block) + +/** + @brief Invalidate a linked list of query cache blocks. + + Each block tries to aquire a block level lock before + free_query is a called. This function will in turn affect + related table- and result-blocks. + + @param[in,out] thd Thread context. + @param[in,out] list_root A pointer to a circular list of query blocks. + +*/ + +void +Query_cache::invalidate_query_block_list(THD *thd, + Query_cache_block_table *list_root) { - Query_cache_block_table *list_root = table_block->table(0); while (list_root->next != list_root) { - Query_cache_block *query_block = list_root->next->block(); + Query_cache_block *query_block= list_root->next->block(); BLOCK_LOCK_WR(query_block); free_query(query_block); + DBUG_EXECUTE_IF("debug_cache_locks", sleep(10);); } } - /* Register given table list begining with given position in tables table of block @@ -2463,7 +2613,6 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block, if (n) { - DBUG_PRINT("qcache", ("failed at table %d", (int) n)); /* Unlink the tables we allocated above */ for (Query_cache_block_table *tmp = block->table(0) ; tmp != block_table; @@ -2473,9 +2622,13 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block, return (n); } -/* - Insert used tablename in cache - Returns 0 on error + +/** + @brief Insert used table name into the cache. + + @return Error status + @retval FALSE On error + @retval TRUE On success */ my_bool @@ -2489,9 +2642,10 @@ Query_cache::insert_table(uint key_len, char *key, DBUG_PRINT("qcache", ("insert table node 0x%lx, len %d", (ulong)node, key_len)); - Query_cache_block *table_block = ((Query_cache_block *) - hash_search(&tables, (uchar*) key, - key_len)); + THD *thd= current_thd; + + Query_cache_block *table_block= + (Query_cache_block *)hash_search(&tables, (uchar*) key, key_len); if (table_block && table_block->table()->engine_data() != engine_data) @@ -2506,7 +2660,11 @@ Query_cache::insert_table(uint key_len, char *key, as far as we delete all queries with this table, table block will be deleted, too */ - invalidate_table(table_block); + { + Query_cache_block_table *list_root= table_block->table(0); + invalidate_query_block_list(thd, list_root); + } + table_block= 0; } @@ -2514,21 +2672,29 @@ Query_cache::insert_table(uint key_len, char *key, { DBUG_PRINT("qcache", ("new table block from 0x%lx (%u)", (ulong) key, (int) key_len)); - table_block = write_block_data(key_len, (uchar*) key, - ALIGN_SIZE(sizeof(Query_cache_table)), - Query_cache_block::TABLE, - 1, 1); + table_block= write_block_data(key_len, (uchar*) key, + ALIGN_SIZE(sizeof(Query_cache_table)), + Query_cache_block::TABLE, 1); if (table_block == 0) { DBUG_PRINT("qcache", ("Can't write table name to cache")); DBUG_RETURN(0); } - Query_cache_table *header = table_block->table(); + Query_cache_table *header= table_block->table(); double_linked_list_simple_include(table_block, - &tables_blocks); - Query_cache_block_table *list_root = table_block->table(0); - list_root->n = 0; - list_root->next = list_root->prev = list_root; + &tables_blocks); + /* + First node in the Query_cache_block_table-chain is the table-type + block. This block will only have one Query_cache_block_table (n=0). + */ + Query_cache_block_table *list_root= table_block->table(0); + list_root->n= 0; + + /* + The node list is circular in nature. + */ + list_root->next= list_root->prev= list_root; + if (my_hash_insert(&tables, (const uchar *) table_block)) { DBUG_PRINT("qcache", ("Can't insert table to hash")); @@ -2536,20 +2702,37 @@ Query_cache::insert_table(uint key_len, char *key, free_memory_block(table_block); DBUG_RETURN(0); } - char *db = header->db(); + char *db= header->db(); header->table(db + db_length + 1); header->key_length(key_len); header->type(cache_type); header->callback(callback); header->engine_data(engine_data); + + /* + We insert this table without the assumption that it isn't refrenenced by + any queries. + */ + header->m_cached_query_count= 0; } - Query_cache_block_table *list_root = table_block->table(0); - node->next = list_root->next; - list_root->next = node; - node->next->prev = node; - node->prev = list_root; - node->parent = table_block->table(); + /* + Table is now in the cache; link the table_block-node associated + with the currently processed query into the chain of queries depending + on the cached table. + */ + Query_cache_block_table *list_root= table_block->table(0); + node->next= list_root->next; + list_root->next= node; + node->next->prev= node; + node->prev= list_root; + node->parent= table_block->table(); + /* + Increase the counter to keep track on how long this chain + of queries is. + */ + Query_cache_table *table_block_data= table_block->table(); + table_block_data->m_cached_query_count++; DBUG_RETURN(1); } @@ -2557,15 +2740,27 @@ Query_cache::insert_table(uint key_len, char *key, void Query_cache::unlink_table(Query_cache_block_table *node) { DBUG_ENTER("Query_cache::unlink_table"); - node->prev->next = node->next; - node->next->prev = node->prev; - Query_cache_block_table *neighbour = node->next; + node->prev->next= node->next; + node->next->prev= node->prev; + Query_cache_block_table *neighbour= node->next; + Query_cache_table *table_block_data= node->parent; + table_block_data->m_cached_query_count--; + + DBUG_ASSERT(table_block_data->m_cached_query_count >= 0); + if (neighbour->next == neighbour) { - // list is empty (neighbor is root of list) - Query_cache_block *table_block = neighbour->block(); + DBUG_ASSERT(table_block_data->m_cached_query_count == 0); + /* + If neighbor is root of list, the list is empty. + The root of the list is always a table-type block + which contain exactly one Query_cache_block_table + node object, thus we can use the block() method + to calculate the Query_cache_block address. + */ + Query_cache_block *table_block= neighbour->block(); double_linked_list_exclude(table_block, - &tables_blocks); + &tables_blocks); hash_delete(&tables,(uchar *) table_block); free_memory_block(table_block); } @@ -2577,12 +2772,11 @@ void Query_cache::unlink_table(Query_cache_block_table *node) *****************************************************************************/ Query_cache_block * -Query_cache::allocate_block(ulong len, my_bool not_less, ulong min, - my_bool under_guard) +Query_cache::allocate_block(ulong len, my_bool not_less, ulong min) { DBUG_ENTER("Query_cache::allocate_block"); - DBUG_PRINT("qcache", ("len %lu, not less %d, min %lu, uder_guard %d", - len, not_less,min,under_guard)); + DBUG_PRINT("qcache", ("len %lu, not less %d, min %lu", + len, not_less,min)); if (len >= min(query_cache_size, query_cache_limit)) { @@ -2591,17 +2785,6 @@ Query_cache::allocate_block(ulong len, my_bool not_less, ulong min, DBUG_RETURN(0); // in any case we don't have such piece of memory } - if (!under_guard) - { - STRUCT_LOCK(&structure_guard_mutex); - - if (unlikely(query_cache.query_cache_size == 0 || flush_in_progress)) - { - STRUCT_UNLOCK(&structure_guard_mutex); - DBUG_RETURN(0); - } - } - /* Free old queries until we have enough memory to store this block */ Query_cache_block *block; do @@ -2616,8 +2799,6 @@ Query_cache::allocate_block(ulong len, my_bool not_less, ulong min, split_block(block,ALIGN_SIZE(len)); } - if (!under_guard) - STRUCT_UNLOCK(&structure_guard_mutex); DBUG_RETURN(block); } @@ -2852,9 +3033,7 @@ uint Query_cache::find_bin(ulong size) } uint bin = steps[left].idx - (uint)((size - steps[left].size)/steps[left].increment); -#ifndef DBUG_OFF - bins_dump(); -#endif + DBUG_PRINT("qcache", ("bin %u step %u, size %lu step size %lu", bin, left, size, steps[left].size)); DBUG_RETURN(bin); @@ -3140,18 +3319,17 @@ my_bool Query_cache::ask_handler_allowance(THD *thd, Packing *****************************************************************************/ + +/** + @brief Rearrange all memory blocks so that free memory joins at the + 'bottom' of the allocated memory block containing all cache data. + @see Query_cache::pack(ulong join_limit, uint iteration_limit) +*/ + void Query_cache::pack_cache() { DBUG_ENTER("Query_cache::pack_cache"); - STRUCT_LOCK(&structure_guard_mutex); - - if (unlikely(query_cache_size == 0 || flush_in_progress)) - { - STRUCT_UNLOCK(&structure_guard_mutex); - DBUG_VOID_RETURN; - } - DBUG_EXECUTE("check_querycache",query_cache.check_integrity(1);); uchar *border = 0; @@ -3185,7 +3363,6 @@ void Query_cache::pack_cache() } DBUG_EXECUTE("check_querycache",query_cache.check_integrity(1);); - STRUCT_UNLOCK(&structure_guard_mutex); DBUG_VOID_RETURN; } @@ -3460,8 +3637,7 @@ my_bool Query_cache::join_results(ulong join_limit) my_bool has_moving = 0; DBUG_ENTER("Query_cache::join_results"); - STRUCT_LOCK(&structure_guard_mutex); - if (queries_blocks != 0 && !flush_in_progress) + if (queries_blocks != 0) { DBUG_ASSERT(query_cache_size > 0); Query_cache_block *block = queries_blocks; @@ -3524,7 +3700,6 @@ my_bool Query_cache::join_results(ulong join_limit) block = block->next; } while ( block != queries_blocks ); } - STRUCT_UNLOCK(&structure_guard_mutex); DBUG_RETURN(has_moving); } @@ -3760,6 +3935,14 @@ void Query_cache::tables_dump() } +/** + @brief Checks integrity of the various linked lists + + @return Error status code + @retval FALSE Query cache is operational. + @retval TRUE Query cache is broken. +*/ + my_bool Query_cache::check_integrity(bool locked) { my_bool result = 0; @@ -3769,14 +3952,8 @@ my_bool Query_cache::check_integrity(bool locked) if (!locked) STRUCT_LOCK(&structure_guard_mutex); - if (unlikely(query_cache_size == 0 || flush_in_progress)) - { - if (!locked) - STRUCT_UNLOCK(&query_cache.structure_guard_mutex); - - DBUG_PRINT("qcache", ("Query Cache not initialized")); - DBUG_RETURN(0); - } + while (is_flushing()) + pthread_cond_wait(&COND_cache_status_changed,&structure_guard_mutex); if (hash_check(&queries)) { diff --git a/sql/sql_cache.h b/sql/sql_cache.h index 3c5d784ce94..cfc52e5d33a 100644 --- a/sql/sql_cache.h +++ b/sql/sql_cache.h @@ -65,17 +65,44 @@ struct Query_cache_query; struct Query_cache_result; class Query_cache; +/** + @brief This class represents a node in the linked chain of queries + belonging to one table. + @note The root of this linked list is not a query-type block, but the table- + type block which all queries has in common. +*/ struct Query_cache_block_table { Query_cache_block_table() {} /* Remove gcc warning */ - TABLE_COUNTER_TYPE n; // numbr in table (from 0) + + /** + This node holds a position in a static table list belonging + to the associated query (base 0). + */ + TABLE_COUNTER_TYPE n; + + /** + Pointers to the next and previous node, linking all queries with + a common table. + */ Query_cache_block_table *next, *prev; + + /** + A pointer to the table-type block which all + linked queries has in common. + */ Query_cache_table *parent; + + /** + A method to calculate the address of the query cache block + owning this node. The purpose of this calculation is to + make it easier to move the query cache block without having + to modify all the pointer addresses. + */ inline Query_cache_block *block(); }; - struct Query_cache_block { Query_cache_block() {} /* Remove gcc warning */ @@ -151,6 +178,11 @@ struct Query_cache_table /* data need by some engines */ ulonglong engine_data_buff; + /** + The number of queries depending of this table. + */ + int32 m_cached_query_count; + inline char *db() { return (char *) data(); } inline char *table() { return tbl; } inline void table(char *table_arg) { tbl= table_arg; } @@ -237,9 +269,14 @@ public: ulong free_memory, queries_in_cache, hits, inserts, refused, free_memory_blocks, total_blocks, lowmem_prunes; + private: - pthread_cond_t COND_flush_finished; - bool flush_in_progress; + pthread_cond_t COND_cache_status_changed; + + enum Cache_status { NO_FLUSH_IN_PROGRESS, FLUSH_IN_PROGRESS, + TABLE_FLUSH_IN_PROGRESS }; + + Cache_status m_cache_status; void free_query_internal(Query_cache_block *point); @@ -253,7 +290,7 @@ protected: 2. query block (for operation inside query (query block/results)) Thread doing cache flush releases the mutex once it sets - flush_in_progress flag, so other threads may bypass the cache as + m_cache_status flag, so other threads may bypass the cache as if it is disabled, not waiting for reset to finish. The exception is other threads that were going to do cache flush---they'll wait till the end of a flush operation. @@ -270,6 +307,7 @@ protected: /* options */ ulong min_allocation_unit, min_result_data_size; uint def_query_hash_size, def_table_hash_size; + uint mem_bin_num, mem_bin_steps; // See at init_cache & find_bin my_bool initialized; @@ -295,10 +333,13 @@ protected: ulong data_len, Query_cache_block *query_block, my_bool first_block); - void invalidate_table(TABLE_LIST *table); - void invalidate_table(TABLE *table); - void invalidate_table(uchar *key, uint32 key_length); - void invalidate_table(Query_cache_block *table_block); + void invalidate_table(THD *thd, TABLE_LIST *table); + void invalidate_table(THD *thd, TABLE *table); + void invalidate_table(THD *thd, uchar *key, uint32 key_length); + void invalidate_table(THD *thd, Query_cache_block *table_block); + void invalidate_query_block_list(THD *thd, + Query_cache_block_table *list_root); + TABLE_COUNTER_TYPE register_tables_from_list(TABLE_LIST *tables_used, TABLE_COUNTER_TYPE counter, @@ -337,6 +378,8 @@ protected: Query_cache_block *pprev); my_bool join_results(ulong join_limit); + void wait_while_table_flush_is_in_progress(bool *interrupt); + /* Following function control structure_guard_mutex by themself or don't need structure_guard_mutex @@ -347,8 +390,7 @@ protected: Query_cache_block *write_block_data(ulong data_len, uchar* data, ulong header_len, Query_cache_block::block_type type, - TABLE_COUNTER_TYPE ntab = 0, - my_bool under_guard=0); + TABLE_COUNTER_TYPE ntab = 0); my_bool append_result_data(Query_cache_block **result, ulong data_len, uchar* data, Query_cache_block *parent); @@ -360,8 +402,7 @@ protected: inline ulong get_min_first_result_data_size(); inline ulong get_min_append_result_data_size(); Query_cache_block *allocate_block(ulong len, my_bool not_less, - ulong min, - my_bool under_guard=0); + ulong min); /* If query is cacheable return number tables in query (query without tables not cached) @@ -424,6 +465,11 @@ protected: friend void query_cache_end_of_result(THD *thd); friend void query_cache_abort(NET *net); + bool is_flushing(void) + { + return (m_cache_status != Query_cache::NO_FLUSH_IN_PROGRESS); + } + /* The following functions are only used when debugging We don't protect these with ifndef DBUG_OFF to not have to recompile diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 82938184245..49a9620d555 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -1084,7 +1084,7 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, } } if (thd->killed || - (tot_list && mysql_rm_table_part2_with_lock(thd, tot_list, 1, 0, 1))) + (tot_list && mysql_rm_table_part2(thd, tot_list, 1, 0, 1, 1, 1))) goto err; /* Remove RAID directories */ diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 8e55610df36..485648d1fb1 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2445,7 +2445,7 @@ end_with_restore_list: check_grant(thd, INSERT_ACL | CREATE_ACL, &new_list, 0, 1, 0))) goto error; } - query_cache_invalidate3(thd, first_table, 0); + if (end_active_trans(thd) || mysql_rename_tables(thd, first_table, 0)) goto error; break; diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc index f34ec83b29c..b87c74dc179 100644 --- a/sql/sql_rename.cc +++ b/sql/sql_rename.cc @@ -144,10 +144,14 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent) } } - VOID(pthread_mutex_lock(&LOCK_open)); - if (lock_table_names(thd, table_list)) + pthread_mutex_lock(&LOCK_open); + if (lock_table_names_exclusively(thd, table_list)) + { + pthread_mutex_unlock(&LOCK_open); goto err; - + } + pthread_mutex_unlock(&LOCK_open); + error=0; if ((ren_table=rename_tables(thd,table_list,0))) { @@ -183,10 +187,14 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent) send_ok(thd); } + if (!error) + query_cache_invalidate3(thd, table_list, 0); + + pthread_mutex_lock(&LOCK_open); unlock_table_names(thd, table_list, (TABLE_LIST*) 0); + pthread_mutex_unlock(&LOCK_open); err: - pthread_mutex_unlock(&LOCK_open); /* enable logging back if needed */ if (disable_logs) { diff --git a/sql/sql_table.cc b/sql/sql_table.cc index bb0f7649272..ba89578ec31 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1411,14 +1411,7 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, LOCK_open during wait_if_global_read_lock(), other threads could not close their tables. This would make a pretty deadlock. */ - thd->mysys_var->current_mutex= &LOCK_open; - thd->mysys_var->current_cond= &COND_refresh; - VOID(pthread_mutex_lock(&LOCK_open)); - - error= mysql_rm_table_part2(thd, tables, if_exists, drop_temporary, 0, 0); - - pthread_mutex_unlock(&LOCK_open); - + error= mysql_rm_table_part2(thd, tables, if_exists, drop_temporary, 0, 0, 1); pthread_mutex_lock(&thd->mysys_var->mutex); thd->mysys_var->current_mutex= 0; thd->mysys_var->current_cond= 0; @@ -1433,49 +1426,6 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, DBUG_RETURN(FALSE); } - -/* - delete (drop) tables. - - SYNOPSIS - mysql_rm_table_part2_with_lock() - thd Thread handle - tables List of tables to delete - if_exists If 1, don't give error if one table doesn't exists - dont_log_query Don't write query to log files. This will also not - generate warnings if the handler files doesn't exists - - NOTES - Works like documented in mysql_rm_table(), but don't check - global_read_lock and don't send_ok packet to server. - - RETURN - 0 ok - 1 error -*/ - -int mysql_rm_table_part2_with_lock(THD *thd, - TABLE_LIST *tables, bool if_exists, - bool drop_temporary, bool dont_log_query) -{ - int error; - thd->mysys_var->current_mutex= &LOCK_open; - thd->mysys_var->current_cond= &COND_refresh; - VOID(pthread_mutex_lock(&LOCK_open)); - - error= mysql_rm_table_part2(thd, tables, if_exists, drop_temporary, 1, - dont_log_query); - - pthread_mutex_unlock(&LOCK_open); - - pthread_mutex_lock(&thd->mysys_var->mutex); - thd->mysys_var->current_mutex= 0; - thd->mysys_var->current_cond= 0; - pthread_mutex_unlock(&thd->mysys_var->mutex); - return error; -} - - /* Execute the drop of a normal or temporary table @@ -1508,7 +1458,7 @@ int mysql_rm_table_part2_with_lock(THD *thd, int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, bool drop_temporary, bool drop_view, - bool dont_log_query) + bool dont_log_query, bool need_lock_open) { TABLE_LIST *table; char path[FN_REFLEN], *alias; @@ -1520,9 +1470,11 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, String built_query; DBUG_ENTER("mysql_rm_table_part2"); + if (need_lock_open) + pthread_mutex_lock(&LOCK_open); + LINT_INIT(alias); LINT_INIT(path_length); - safe_mutex_assert_owner(&LOCK_open); if (thd->current_stmt_binlog_row_based && !dont_log_query) { @@ -1555,8 +1507,15 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, } } - if (!drop_temporary && lock_table_names(thd, tables)) + if (!drop_temporary && lock_table_names_exclusively(thd, tables)) + { + if (need_lock_open) + pthread_mutex_unlock(&LOCK_open); DBUG_RETURN(1); + } + + if (need_lock_open) + pthread_mutex_unlock(&LOCK_open); /* Don't give warnings for not found errors, as we already generate notes */ thd->no_warnings_for_error= 1; @@ -1567,7 +1526,7 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, handlerton *table_type; enum legacy_db_type frm_db_type; - mysql_ha_flush(thd, table, MYSQL_HA_CLOSE_FINAL, TRUE); + mysql_ha_flush(thd, table, MYSQL_HA_CLOSE_FINAL, !need_lock_open); if (!close_temporary_table(thd, table)) { tmp_table_deleted=1; @@ -1604,6 +1563,8 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, { TABLE *locked_table; abort_locked_tables(thd, db, table->table_name); + if (need_lock_open) + pthread_mutex_lock(&LOCK_open); remove_table_from_cache(thd, db, table->table_name, RTFC_WAIT_OTHER_THREAD_FLAG | RTFC_CHECK_KILLED_FLAG); @@ -1614,6 +1575,9 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, if ((locked_table= drop_locked_tables(thd, db, table->table_name))) table->table= locked_table; + if (need_lock_open) + pthread_mutex_unlock(&LOCK_open); + if (thd->killed) { thd->no_warnings_for_error= 0; @@ -1739,9 +1703,11 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, */ } } - - if (!drop_temporary) - unlock_table_names(thd, tables, (TABLE_LIST*) 0); + if (need_lock_open) + pthread_mutex_lock(&LOCK_open); + unlock_table_names(thd, tables, (TABLE_LIST*) 0); + if (need_lock_open) + pthread_mutex_unlock(&LOCK_open); thd->no_warnings_for_error= 0; DBUG_RETURN(error); } diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index e15003ab243..16159150e66 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -1268,8 +1268,6 @@ bool Table_triggers_list::drop_all_triggers(THD *thd, char *db, char *name) bzero(&table, sizeof(table)); init_alloc_root(&table.mem_root, 8192, 0); - safe_mutex_assert_owner(&LOCK_open); - if (Table_triggers_list::check_n_load(thd, db, name, &table, 1)) { result= 1; @@ -1431,26 +1429,24 @@ Table_triggers_list::change_table_name_in_trignames(const char *db_name, } -/* - Update .TRG and .TRN files after renaming triggers' subject table. +/** + @brief Update .TRG and .TRN files after renaming triggers' subject table. - SYNOPSIS - change_table_name() - thd Thread context - db Old database of subject table - old_table Old name of subject table - new_db New database for subject table - new_table New name of subject table + @param[in,out] thd Thread context + @param[in] db Old database of subject table + @param[in] old_table Old name of subject table + @param[in] new_db New database for subject table + @param[in] new_table New name of subject table - NOTE + @note This method tries to leave trigger related files in consistent state, i.e. it either will complete successfully, or will fail leaving files in their initial state. Also this method assumes that subject table is not renamed to itself. + This method needs to be called under an exclusive table name lock. - RETURN VALUE - FALSE Success - TRUE Error + @retval FALSE Success + @retval TRUE Error */ bool Table_triggers_list::change_table_name(THD *thd, const char *db, @@ -1466,7 +1462,19 @@ bool Table_triggers_list::change_table_name(THD *thd, const char *db, bzero(&table, sizeof(table)); init_alloc_root(&table.mem_root, 8192, 0); - safe_mutex_assert_owner(&LOCK_open); + uchar key[MAX_DBKEY_LENGTH]; + uint key_length= (uint) (strmov(strmov((char*)&key[0], db)+1, + old_table)-(char*)&key[0])+1; + + /* + This method interfaces the mysql server code protected by + either LOCK_open mutex or with an exclusive table name lock. + In the future, only an exclusive table name lock will be enough. + */ +#ifndef DBUG_OFF + if (!is_table_name_exclusively_locked_by_this_thread(thd, key, key_length)) + safe_mutex_assert_owner(&LOCK_open); +#endif DBUG_ASSERT(my_strcasecmp(table_alias_charset, db, new_db) || my_strcasecmp(table_alias_charset, old_table, new_table)); From b1ec3b534d530807995955c1c249496b0912a906 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 5 Jul 2007 02:20:32 +0400 Subject: [PATCH 02/19] A fix and a teset case for Bug#28551 The warning 'No database selected' is reported when calling stored procedures Remove the offending warning introduced by the fix for Bug 25082 This minimal patch relies on the intrinsic knowledge of the fact that mysql_change_db is never called with 'force_switch' set to TRUE when such a warning may be needed: * every stored routine belongs to a database (unlike, e.g., a user defined function, which does not), so if we're activating the database of a stored routine, it can never be NULL. Therefore, this branch is never called for activation. * if we're restoring the 'old' current database after routine execution is complete, we should not issue a warning, since it's OK to call a routine without having previously selected the current database. TODO: 'force_switch' is an ambiguous flag, since we do not actually have to 'force' the switch in case of stored routines at all. When we activate the routine's database, we should perform all the checks as in case of 'use db', and so we already do (in this case 'force_switch' is unused). When we load a routine into cache, we should not use mysql_change_db at all, since there it's enough to call thd->reset_db(). We do it this way for triggers, but code for routines is different (wrongly). TODO: bugs are lurking in replication, since it bypasses mysql_change_db and calls thd->[re_]set_db to set the current database. The latter does not change thd->db_charset, thd->sctx->db_access and thd->variables.collation_database (and this may have nasty side effects). These todo items are to be addressed in a separate patch, if at all. mysql-test/r/sp.result: Update results (Bug#28551) mysql-test/t/sp.test: Add a test case (Bug#28551) sql/sp.cc: Remove an obsolete comment. Replace a check with an assert. sql/sql_db.cc: Remove the offending warning introduced by the fix for Bug 25082 This minimal patch relies on the intrinsic knowledge of the fact that mysql_change_db is never called with 'force_switch' set to TRUE when such a warning may be needed. --- mysql-test/r/sp.result | 7 +++++++ mysql-test/t/sp.test | 12 ++++++++++++ sql/sp.cc | 12 ++++-------- sql/sql_db.cc | 8 ++++---- 4 files changed, 27 insertions(+), 12 deletions(-) diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result index b411c65faee..5d7371b0991 100644 --- a/mysql-test/r/sp.result +++ b/mysql-test/r/sp.result @@ -6176,4 +6176,11 @@ v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VI DROP VIEW v1; DROP FUNCTION metered; DROP TABLE t1; +drop database if exists mysqltest_db1; +create database mysqltest_db1; +create procedure mysqltest_db1.sp_bug28551() begin end; +call mysqltest_db1.sp_bug28551(); +show warnings; +Level Code Message +drop database mysqltest_db1; End of 5.0 tests diff --git a/mysql-test/t/sp.test b/mysql-test/t/sp.test index 0de416cdffa..cfc97fab777 100644 --- a/mysql-test/t/sp.test +++ b/mysql-test/t/sp.test @@ -7130,5 +7130,17 @@ DROP VIEW v1; DROP FUNCTION metered; DROP TABLE t1; +# +# Bug#28551 "The warning 'No database selected' is reported when calling +# stored procedures" +# +--disable_warnings +drop database if exists mysqltest_db1; +--enable_warnings +create database mysqltest_db1; +create procedure mysqltest_db1.sp_bug28551() begin end; +call mysqltest_db1.sp_bug28551(); +show warnings; +drop database mysqltest_db1; --echo End of 5.0 tests diff --git a/sql/sp.cc b/sql/sp.cc index d28e9138fa8..3c8ebed4ae6 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -1873,15 +1873,11 @@ sp_use_new_db(THD *thd, LEX_STRING new_db, LEX_STRING *old_db, DBUG_PRINT("enter", ("newdb: %s", new_db.str)); /* - Set new_db to an empty string if it's NULL, because mysql_change_db - requires a non-NULL argument. - new_db.str can be NULL only if we're restoring the old database after - execution of a stored procedure and there were no current database - selected. The stored procedure itself must always have its database - initialized. + A stored routine always belongs to some database. The + old database (old_db) might be NULL, but to restore the + old database we will use mysql_change_db. */ - if (new_db.str == NULL) - new_db.str= empty_c_string; + DBUG_ASSERT(new_db.str && new_db.length); if (thd->db) { diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 963457cc896..91b0c02d23b 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -1240,10 +1240,10 @@ bool mysql_change_db(THD *thd, const LEX_STRING *new_db_name, bool force_switch) { if (force_switch) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, - ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR)); - - /* Change db to NULL. */ + /* + This can only happen when we restore the old db in THD after + execution of a routine is complete. Change db to NULL. + */ mysql_change_db_impl(thd, NULL, 0, thd->variables.collation_server); From e8966deecc0eda77c5b94c6f0004cc2159dbd158 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 5 Jul 2007 11:34:04 +0400 Subject: [PATCH 03/19] A fix and a test case for Bug#29050 Creation of a legal stored procedure fails if a database is not selected prior. The problem manifested itself when a user tried to create a routine that had non-fully-qualified identifiers in its bodies and there was no current database selected. This is a regression introduced by the fix for Bug 19022: The patch for Bug 19022 changes the code to always produce a warning if we can't resolve the current database in the parser. In this case this was not necessary, since even though the produced parsed tree was incorrect, we never re-use sphead that was obtained at first parsing of CREATE PROCEDURE. The sphead that is anyhow used is always obtained through db_load_routine, and there we change the current database to sphead->m_db before calling yyparse. The idea of the fix is to resolve the current database directly using lex->sphead->m_db member when parsing a stored routine body, when such is present. This patch removes the need to reset the current database when loading a trigger or routine definition into SP cache. The redundant code will be removed in 5.1. mysql-test/r/sp.result: Update test results (Bug#29050) mysql-test/r/trigger.result: Update results. mysql-test/t/sp.test: Add a test case for Bug#29050 mysql-test/t/trigger.test: Fix wrong behavior covered with tests. sql/sql_lex.cc: Implement st_lex::copy_db_to(). sql/sql_lex.h: Declare st_lex::copy_db_to(). sql/sql_parse.cc: Use st_lex::copy_db_to() in add_table_to_list, rather than THD::copy_db_to(). The former will use the database of the sphead, if we're parsing a stored routine, not the default database in THD. The default database is needed to initialize tables->db when the database part was not explicitly specified in the identifier. sql/sql_yacc.yy: Use st_lex::copy_db_to() in the parser, rather than THD::copy_db_to(). The former will use the database of the sphead, if we're parsing a stored routine, not the default database in THD. --- mysql-test/r/sp.result | 18 ++++++++++++++++++ mysql-test/r/trigger.result | 4 ++-- mysql-test/t/sp.test | 25 +++++++++++++++++++++++++ mysql-test/t/trigger.test | 4 ++-- sql/sql_lex.cc | 37 +++++++++++++++++++++++++++++++++++++ sql/sql_lex.h | 2 ++ sql/sql_parse.cc | 2 +- sql/sql_yacc.yy | 15 ++++++--------- 8 files changed, 93 insertions(+), 14 deletions(-) diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result index 5d7371b0991..7a4deb3ea5f 100644 --- a/mysql-test/r/sp.result +++ b/mysql-test/r/sp.result @@ -6183,4 +6183,22 @@ call mysqltest_db1.sp_bug28551(); show warnings; Level Code Message drop database mysqltest_db1; +drop database if exists mysqltest_db1; +drop table if exists test.t1; +create database mysqltest_db1; +use mysqltest_db1; +drop database mysqltest_db1; +create table test.t1 (id int); +insert into test.t1 (id) values (1); +create procedure test.sp_bug29050() begin select * from t1; end// +show warnings; +Level Code Message +call test.sp_bug29050(); +id +1 +show warnings; +Level Code Message +use test; +drop procedure sp_bug29050; +drop table t1; End of 5.0 tests diff --git a/mysql-test/r/trigger.result b/mysql-test/r/trigger.result index 5405a632aa4..290929d476d 100644 --- a/mysql-test/r/trigger.result +++ b/mysql-test/r/trigger.result @@ -351,7 +351,7 @@ create trigger trg1 before insert on mysqltest.t1 for each row set @a:= 1; ERROR HY000: Trigger in wrong schema use mysqltest; create trigger test.trg1 before insert on t1 for each row set @a:= 1; -ERROR HY000: Trigger in wrong schema +ERROR 42S02: Table 'test.t1' doesn't exist drop database mysqltest; use test; create table t1 (i int, j int default 10, k int not null, key (k)); @@ -842,7 +842,7 @@ drop table t1; create trigger t1_bi before insert on test.t1 for each row set @a:=0; ERROR 3D000: No database selected create trigger test.t1_bi before insert on t1 for each row set @a:=0; -ERROR 3D000: No database selected +ERROR 42S02: Table 'test.t1' doesn't exist drop trigger t1_bi; ERROR 3D000: No database selected create table t1 (id int); diff --git a/mysql-test/t/sp.test b/mysql-test/t/sp.test index cfc97fab777..54dc84ad19d 100644 --- a/mysql-test/t/sp.test +++ b/mysql-test/t/sp.test @@ -7142,5 +7142,30 @@ create procedure mysqltest_db1.sp_bug28551() begin end; call mysqltest_db1.sp_bug28551(); show warnings; drop database mysqltest_db1; +# +# Bug#29050 Creation of a legal stored procedure fails if a database is not +# selected prior +# +--disable_warnings +drop database if exists mysqltest_db1; +drop table if exists test.t1; +--enable_warnings +create database mysqltest_db1; +use mysqltest_db1; +# For the sake of its side effect +drop database mysqltest_db1; +# Now we have no current database selected. +create table test.t1 (id int); +insert into test.t1 (id) values (1); +delimiter //; +create procedure test.sp_bug29050() begin select * from t1; end// +delimiter ;// +show warnings; +call test.sp_bug29050(); +show warnings; +# Restore the old current database +use test; +drop procedure sp_bug29050; +drop table t1; --echo End of 5.0 tests diff --git a/mysql-test/t/trigger.test b/mysql-test/t/trigger.test index 7158d02956e..0fa92f33de2 100644 --- a/mysql-test/t/trigger.test +++ b/mysql-test/t/trigger.test @@ -406,7 +406,7 @@ create table mysqltest.t1 (i int); --error ER_TRG_IN_WRONG_SCHEMA create trigger trg1 before insert on mysqltest.t1 for each row set @a:= 1; use mysqltest; ---error ER_TRG_IN_WRONG_SCHEMA +--error ER_NO_SUCH_TABLE create trigger test.trg1 before insert on t1 for each row set @a:= 1; drop database mysqltest; use test; @@ -1040,7 +1040,7 @@ drop table t1; connection addconwithoutdb; --error ER_NO_DB_ERROR create trigger t1_bi before insert on test.t1 for each row set @a:=0; ---error ER_NO_DB_ERROR +--error ER_NO_SUCH_TABLE create trigger test.t1_bi before insert on t1 for each row set @a:=0; --error ER_NO_DB_ERROR drop trigger t1_bi; diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index cbfba3d4d80..aefa3d43b14 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -1974,6 +1974,43 @@ uint8 st_lex::get_effective_with_check(st_table_list *view) } +/** + This method should be called only during parsing. + It is aware of compound statements (stored routine bodies) + and will initialize the destination with the default + database of the stored routine, rather than the default + database of the connection it is parsed in. + E.g. if one has no current database selected, or current database + set to 'bar' and then issues: + + CREATE PROCEDURE foo.p1() BEGIN SELECT * FROM t1 END// + + t1 is meant to refer to foo.t1, not to bar.t1. + + This method is needed to support this rule. + + @return TRUE in case of error (parsing should be aborted, FALSE in + case of success +*/ + +bool +st_lex::copy_db_to(char **p_db, uint *p_db_length) const +{ + if (sphead) + { + DBUG_ASSERT(sphead->m_db.str && sphead->m_db.length); + /* + It is safe to assign the string by-pointer, both sphead and + its statements reside in the same memory root. + */ + *p_db= sphead->m_db.str; + if (p_db_length) + *p_db_length= sphead->m_db.length; + return FALSE; + } + return thd->copy_db_to(p_db, p_db_length); +} + /* initialize limit counters diff --git a/sql/sql_lex.h b/sql/sql_lex.h index f8405ef14ca..3f55b3baae1 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -1237,6 +1237,8 @@ typedef struct st_lex : public Query_tables_list context_stack.pop(); } + bool copy_db_to(char **p_db, uint *p_db_length) const; + Name_resolution_context *current_context() { return context_stack.head(); diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index b65ad705a36..124fcff9517 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -6397,7 +6397,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, ptr->db= table->db.str; ptr->db_length= table->db.length; } - else if (thd->copy_db_to(&ptr->db, &ptr->db_length)) + else if (lex->copy_db_to(&ptr->db, &ptr->db_length)) DBUG_RETURN(0); ptr->alias= alias_str; diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 6c146f77ed6..949f3ed4161 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -1565,14 +1565,14 @@ sp_name: } | ident { - THD *thd= YYTHD; + LEX *lex= Lex; LEX_STRING db; if (check_routine_name($1)) { my_error(ER_SP_WRONG_NAME, MYF(0), $1.str); MYSQL_YYABORT; } - if (thd->copy_db_to(&db.str, &db.length)) + if (lex->copy_db_to(&db.str, &db.length)) MYSQL_YYABORT; $$= new sp_name(db, $1, false); if ($$) @@ -3624,10 +3624,9 @@ alter: opt_create_database_options { LEX *lex=Lex; - THD *thd= Lex->thd; lex->sql_command=SQLCOM_ALTER_DB; lex->name= $3; - if (lex->name == NULL && thd->copy_db_to(&lex->name, NULL)) + if (lex->name == NULL && lex->copy_db_to(&lex->name, NULL)) MYSQL_YYABORT; } | ALTER PROCEDURE sp_name @@ -3795,10 +3794,9 @@ alter_list_item: | RENAME opt_to table_ident { LEX *lex=Lex; - THD *thd= lex->thd; lex->select_lex.db=$3->db.str; if (lex->select_lex.db == NULL && - thd->copy_db_to(&lex->select_lex.db, NULL)) + lex->copy_db_to(&lex->select_lex.db, NULL)) { MYSQL_YYABORT; } @@ -5148,7 +5146,7 @@ simple_expr: { THD *thd= lex->thd; LEX_STRING db; - if (thd->copy_db_to(&db.str, &db.length)) + if (lex->copy_db_to(&db.str, &db.length)) MYSQL_YYABORT; sp_name *name= new sp_name(db, $1, false); if (name) @@ -9025,8 +9023,7 @@ grant_ident: '*' { LEX *lex= Lex; - THD *thd= lex->thd; - if (thd->copy_db_to(&lex->current_select->db, NULL)) + if (lex->copy_db_to(&lex->current_select->db, NULL)) MYSQL_YYABORT; if (lex->grant == GLOBAL_ACLS) lex->grant = DB_ACLS & ~GRANT_ACL; From 360a5ebc378f514f824d585545459ebfab6dc06f Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 6 Jul 2007 16:18:49 +0400 Subject: [PATCH 04/19] Remove typedef st_table_list TABLE_LIST and always use name 'TABLE_LIST'. The need arose when working on Bug 26141, where it became necessary to replace TABLE_LIST with its forward declaration in a few headers, and this involved a lot of s/TABLE_LIST/st_table_list/. Although other workarounds exist, this patch is in line with our general strategy of moving away from typedef-ed names. Sometime in future we might also rename TABLE_LIST to follow the coding style, but this is a huge change. sql/item.cc: st_table_list -> TABLE_LIST sql/item.h: st_table_list -> TABLE_LIST sql/mysql_priv.h: st_table_list -> TABLE_LIST sql/sql_base.cc: st_table_list -> TABLE_LIST sql/sql_lex.cc: st_table_list -> TABLE_LIST sql/sql_lex.h: st_table_list -> TABLE_LIST sql/sql_select.cc: st_table_list -> TABLE_LIST sql/sql_show.cc: st_table_list -> TABLE_LIST sql/sql_udf.h: Was not needed. sql/table.cc: st_table_list -> TABLE_LIST sql/table.h: st_table_list -> TABLE_LIST --- sql/item.cc | 2 +- sql/item.h | 2 +- sql/mysql_priv.h | 6 ++-- sql/sql_base.cc | 2 +- sql/sql_lex.cc | 2 +- sql/sql_lex.h | 2 +- sql/sql_select.cc | 4 +-- sql/sql_show.cc | 14 ++++---- sql/sql_udf.h | 1 - sql/table.cc | 92 +++++++++++++++++++++++------------------------ sql/table.h | 60 +++++++++++++++---------------- 11 files changed, 93 insertions(+), 94 deletions(-) diff --git a/sql/item.cc b/sql/item.cc index 52389eece10..2fc58eebe75 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -5807,7 +5807,7 @@ bool Item_insert_value::fix_fields(THD *thd, Item **items) if (!arg->fixed) { bool res; - st_table_list *orig_next_table= context->last_name_resolution_table; + TABLE_LIST *orig_next_table= context->last_name_resolution_table; context->last_name_resolution_table= context->first_name_resolution_table; res= arg->fix_fields(thd, &arg); context->last_name_resolution_table= orig_next_table; diff --git a/sql/item.h b/sql/item.h index fdb85b131c1..58e3ec439b4 100644 --- a/sql/item.h +++ b/sql/item.h @@ -19,7 +19,7 @@ #endif class Protocol; -struct st_table_list; +struct TABLE_LIST; void item_init(void); /* Init item functions */ class Item_field; diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 0aaa1744fe1..bbcf2ed8adf 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -455,9 +455,9 @@ void debug_sync_point(const char* lock_name, uint lock_timeout); #define SHOW_LOG_STATUS_FREE "FREE" #define SHOW_LOG_STATUS_INUSE "IN USE" -struct st_table_list; +struct TABLE_LIST; class String; -void view_store_options(THD *thd, st_table_list *table, String *buff); +void view_store_options(THD *thd, TABLE_LIST *table, String *buff); /* Options to add_table_to_list() */ #define TL_OPTION_UPDATING 1 @@ -1104,7 +1104,7 @@ bool close_thread_table(THD *thd, TABLE **table_ptr); void close_temporary_tables(THD *thd); void close_tables_for_reopen(THD *thd, TABLE_LIST **tables); TABLE_LIST *find_table_in_list(TABLE_LIST *table, - st_table_list *TABLE_LIST::*link, + TABLE_LIST *TABLE_LIST::*link, const char *db_name, const char *table_name); TABLE_LIST *unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list, diff --git a/sql/sql_base.cc b/sql/sql_base.cc index ed006714143..bf1c6d7cb0d 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -848,7 +848,7 @@ void close_temporary_tables(THD *thd) */ TABLE_LIST *find_table_in_list(TABLE_LIST *table, - st_table_list *TABLE_LIST::*link, + TABLE_LIST *TABLE_LIST::*link, const char *db_name, const char *table_name) { diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index cbfba3d4d80..4084c67098e 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -1965,7 +1965,7 @@ bool st_lex::need_correct_ident() VIEW_CHECK_CASCADED CHECK OPTION CASCADED */ -uint8 st_lex::get_effective_with_check(st_table_list *view) +uint8 st_lex::get_effective_with_check(TABLE_LIST *view) { if (view->select_lex->master_unit() == &unit && which_check_option_applicable()) diff --git a/sql/sql_lex.h b/sql/sql_lex.h index f8405ef14ca..fbbb620fbd6 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -1198,7 +1198,7 @@ typedef struct st_lex : public Query_tables_list bool can_not_use_merged(); bool only_view_structure(); bool need_correct_ident(); - uint8 get_effective_with_check(st_table_list *view); + uint8 get_effective_with_check(TABLE_LIST *view); /* Is this update command where 'WHITH CHECK OPTION' clause is important diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 9d27ab4bb4e..6e0640d2cd4 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -15395,11 +15395,11 @@ static void print_join(THD *thd, String *str, List *tables) Print table as it should be in join list SYNOPSIS - st_table_list::print(); + TABLE_LIST::print(); str string where table should bbe printed */ -void st_table_list::print(THD *thd, String *str) +void TABLE_LIST::print(THD *thd, String *str) { if (nested_join) { diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 902b298e423..dd2c95842dc 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -2429,7 +2429,7 @@ int fill_schema_shemata(THD *thd, TABLE_LIST *tables, COND *cond) } -static int get_schema_tables_record(THD *thd, struct st_table_list *tables, +static int get_schema_tables_record(THD *thd, TABLE_LIST *tables, TABLE *table, bool res, const char *base_name, const char *file_name) @@ -2619,7 +2619,7 @@ static int get_schema_tables_record(THD *thd, struct st_table_list *tables, } -static int get_schema_column_record(THD *thd, struct st_table_list *tables, +static int get_schema_column_record(THD *thd, TABLE_LIST *tables, TABLE *table, bool res, const char *base_name, const char *file_name) @@ -3061,7 +3061,7 @@ err: } -static int get_schema_stat_record(THD *thd, struct st_table_list *tables, +static int get_schema_stat_record(THD *thd, TABLE_LIST *tables, TABLE *table, bool res, const char *base_name, const char *file_name) @@ -3151,7 +3151,7 @@ static int get_schema_stat_record(THD *thd, struct st_table_list *tables, } -static int get_schema_views_record(THD *thd, struct st_table_list *tables, +static int get_schema_views_record(THD *thd, TABLE_LIST *tables, TABLE *table, bool res, const char *base_name, const char *file_name) @@ -3263,7 +3263,7 @@ bool store_constraints(THD *thd, TABLE *table, const char *db, } -static int get_schema_constraints_record(THD *thd, struct st_table_list *tables, +static int get_schema_constraints_record(THD *thd, TABLE_LIST *tables, TABLE *table, bool res, const char *base_name, const char *file_name) @@ -3359,7 +3359,7 @@ static bool store_trigger(THD *thd, TABLE *table, const char *db, } -static int get_schema_triggers_record(THD *thd, struct st_table_list *tables, +static int get_schema_triggers_record(THD *thd, TABLE_LIST *tables, TABLE *table, bool res, const char *base_name, const char *file_name) @@ -3426,7 +3426,7 @@ void store_key_column_usage(TABLE *table, const char*db, const char *tname, static int get_schema_key_column_usage_record(THD *thd, - struct st_table_list *tables, + TABLE_LIST *tables, TABLE *table, bool res, const char *base_name, const char *file_name) diff --git a/sql/sql_udf.h b/sql/sql_udf.h index 3cd9343610c..4b8b492698e 100644 --- a/sql/sql_udf.h +++ b/sql/sql_udf.h @@ -47,7 +47,6 @@ typedef struct st_udf_func } udf_func; class Item_result_field; -struct st_table_list; class udf_handler :public Sql_alloc { diff --git a/sql/table.cc b/sql/table.cc index 4e0f2b5d287..899d0ab2ed0 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1780,11 +1780,11 @@ void st_table::reset_item_list(List *item_list) const calculate md5 of query SYNOPSIS - st_table_list::calc_md5() + TABLE_LIST::calc_md5() buffer buffer for md5 writing */ -void st_table_list::calc_md5(char *buffer) +void TABLE_LIST::calc_md5(char *buffer) { my_MD5_CTX context; uchar digest[16]; @@ -1809,10 +1809,10 @@ void st_table_list::calc_md5(char *buffer) it (it is a kind of optimisation) SYNOPSIS - st_table_list::set_underlying_merge() + TABLE_LIST::set_underlying_merge() */ -void st_table_list::set_underlying_merge() +void TABLE_LIST::set_underlying_merge() { TABLE_LIST *tbl; @@ -1847,7 +1847,7 @@ void st_table_list::set_underlying_merge() setup fields of placeholder of merged VIEW SYNOPSIS - st_table_list::setup_underlying() + TABLE_LIST::setup_underlying() thd - thread handler DESCRIPTION @@ -1860,9 +1860,9 @@ void st_table_list::set_underlying_merge() TRUE - error */ -bool st_table_list::setup_underlying(THD *thd) +bool TABLE_LIST::setup_underlying(THD *thd) { - DBUG_ENTER("st_table_list::setup_underlying"); + DBUG_ENTER("TABLE_LIST::setup_underlying"); if (!field_translation && merge_underlying_list) { @@ -1925,7 +1925,7 @@ bool st_table_list::setup_underlying(THD *thd) Prepare where expression of view SYNOPSIS - st_table_list::prep_where() + TABLE_LIST::prep_where() thd - thread handler conds - condition of this JOIN no_where_clause - do not build WHERE or ON outer qwery do not need it @@ -1939,10 +1939,10 @@ bool st_table_list::setup_underlying(THD *thd) TRUE - error */ -bool st_table_list::prep_where(THD *thd, Item **conds, +bool TABLE_LIST::prep_where(THD *thd, Item **conds, bool no_where_clause) { - DBUG_ENTER("st_table_list::prep_where"); + DBUG_ENTER("TABLE_LIST::prep_where"); for (TABLE_LIST *tbl= merge_underlying_list; tbl; tbl= tbl->next_local) { @@ -2042,7 +2042,7 @@ merge_on_conds(THD *thd, TABLE_LIST *table, bool is_cascaded) Prepare check option expression of table SYNOPSIS - st_table_list::prep_check_option() + TABLE_LIST::prep_check_option() thd - thread handler check_opt_type - WITH CHECK OPTION type (VIEW_CHECK_NONE, VIEW_CHECK_LOCAL, VIEW_CHECK_CASCADED) @@ -2057,16 +2057,16 @@ merge_on_conds(THD *thd, TABLE_LIST *table, bool is_cascaded) This method builds check option condition to use it later on every call (usual execution or every SP/PS call). This method have to be called after WHERE preparation - (st_table_list::prep_where) + (TABLE_LIST::prep_where) RETURN FALSE - OK TRUE - error */ -bool st_table_list::prep_check_option(THD *thd, uint8 check_opt_type) +bool TABLE_LIST::prep_check_option(THD *thd, uint8 check_opt_type) { - DBUG_ENTER("st_table_list::prep_check_option"); + DBUG_ENTER("TABLE_LIST::prep_check_option"); bool is_cascaded= check_opt_type == VIEW_CHECK_CASCADED; for (TABLE_LIST *tbl= merge_underlying_list; tbl; tbl= tbl->next_local) @@ -2125,12 +2125,12 @@ bool st_table_list::prep_check_option(THD *thd, uint8 check_opt_type) Hide errors which show view underlying table information SYNOPSIS - st_table_list::hide_view_error() + TABLE_LIST::hide_view_error() thd thread handler */ -void st_table_list::hide_view_error(THD *thd) +void TABLE_LIST::hide_view_error(THD *thd) { /* Hide "Unknown column" or "Unknown function" error */ if (thd->net.last_errno == ER_BAD_FIELD_ERROR || @@ -2161,7 +2161,7 @@ void st_table_list::hide_view_error(THD *thd) table_to_find (TABLE) SYNOPSIS - st_table_list::find_underlying_table() + TABLE_LIST::find_underlying_table() table_to_find table to find RETURN @@ -2169,7 +2169,7 @@ void st_table_list::hide_view_error(THD *thd) found table reference */ -st_table_list *st_table_list::find_underlying_table(TABLE *table_to_find) +TABLE_LIST *TABLE_LIST::find_underlying_table(TABLE *table_to_find) { /* is this real table and table which we are looking for? */ if (table == table_to_find && merge_underlying_list == 0) @@ -2188,10 +2188,10 @@ st_table_list *st_table_list::find_underlying_table(TABLE *table_to_find) cleunup items belonged to view fields translation table SYNOPSIS - st_table_list::cleanup_items() + TABLE_LIST::cleanup_items() */ -void st_table_list::cleanup_items() +void TABLE_LIST::cleanup_items() { if (!field_translation) return; @@ -2207,7 +2207,7 @@ void st_table_list::cleanup_items() check CHECK OPTION condition SYNOPSIS - st_table_list::view_check_option() + TABLE_LIST::view_check_option() ignore_failure ignore check option fail RETURN @@ -2216,7 +2216,7 @@ void st_table_list::cleanup_items() VIEW_CHECK_SKIP FAILED, but continue */ -int st_table_list::view_check_option(THD *thd, bool ignore_failure) +int TABLE_LIST::view_check_option(THD *thd, bool ignore_failure) { if (check_option && check_option->val_int() == 0) { @@ -2241,7 +2241,7 @@ int st_table_list::view_check_option(THD *thd, bool ignore_failure) table belong to given mask SYNOPSIS - st_table_list::check_single_table() + TABLE_LIST::check_single_table() table_arg reference on variable where to store found table (should be 0 on call, to find table, or point to table for unique test) @@ -2253,9 +2253,9 @@ int st_table_list::view_check_option(THD *thd, bool ignore_failure) TRUE found several tables */ -bool st_table_list::check_single_table(st_table_list **table_arg, +bool TABLE_LIST::check_single_table(TABLE_LIST **table_arg, table_map map, - st_table_list *view_arg) + TABLE_LIST *view_arg) { for (TABLE_LIST *tbl= merge_underlying_list; tbl; tbl= tbl->next_local) { @@ -2288,7 +2288,7 @@ bool st_table_list::check_single_table(st_table_list **table_arg, TRUE - out of memory */ -bool st_table_list::set_insert_values(MEM_ROOT *mem_root) +bool TABLE_LIST::set_insert_values(MEM_ROOT *mem_root) { if (table) { @@ -2312,7 +2312,7 @@ bool st_table_list::set_insert_values(MEM_ROOT *mem_root) Test if this is a leaf with respect to name resolution. SYNOPSIS - st_table_list::is_leaf_for_name_resolution() + TABLE_LIST::is_leaf_for_name_resolution() DESCRIPTION A table reference is a leaf with respect to name resolution if @@ -2324,7 +2324,7 @@ bool st_table_list::set_insert_values(MEM_ROOT *mem_root) RETURN TRUE if a leaf, FALSE otherwise. */ -bool st_table_list::is_leaf_for_name_resolution() +bool TABLE_LIST::is_leaf_for_name_resolution() { return (view || is_natural_join || is_join_columns_complete || !nested_join); @@ -2336,7 +2336,7 @@ bool st_table_list::is_leaf_for_name_resolution() respect to name resolution. SYNOPSIS - st_table_list::first_leaf_for_name_resolution() + TABLE_LIST::first_leaf_for_name_resolution() DESCRIPTION Given that 'this' is a nested table reference, recursively walk @@ -2354,7 +2354,7 @@ bool st_table_list::is_leaf_for_name_resolution() else return 'this' */ -TABLE_LIST *st_table_list::first_leaf_for_name_resolution() +TABLE_LIST *TABLE_LIST::first_leaf_for_name_resolution() { TABLE_LIST *cur_table_ref; NESTED_JOIN *cur_nested_join; @@ -2394,7 +2394,7 @@ TABLE_LIST *st_table_list::first_leaf_for_name_resolution() respect to name resolution. SYNOPSIS - st_table_list::last_leaf_for_name_resolution() + TABLE_LIST::last_leaf_for_name_resolution() DESCRIPTION Given that 'this' is a nested table reference, recursively walk @@ -2412,7 +2412,7 @@ TABLE_LIST *st_table_list::first_leaf_for_name_resolution() - else - 'this' */ -TABLE_LIST *st_table_list::last_leaf_for_name_resolution() +TABLE_LIST *TABLE_LIST::last_leaf_for_name_resolution() { TABLE_LIST *cur_table_ref= this; NESTED_JOIN *cur_nested_join; @@ -2454,7 +2454,7 @@ TABLE_LIST *st_table_list::last_leaf_for_name_resolution() want_access Acess which we require */ -void st_table_list::register_want_access(ulong want_access) +void TABLE_LIST::register_want_access(ulong want_access) { /* Remove SHOW_VIEW_ACL, because it will be checked during making view */ want_access&= ~SHOW_VIEW_ACL; @@ -2473,7 +2473,7 @@ void st_table_list::register_want_access(ulong want_access) Load security context information for this view SYNOPSIS - st_table_list::prepare_view_securety_context() + TABLE_LIST::prepare_view_securety_context() thd [in] thread handler RETURN @@ -2482,9 +2482,9 @@ void st_table_list::register_want_access(ulong want_access) */ #ifndef NO_EMBEDDED_ACCESS_CHECKS -bool st_table_list::prepare_view_securety_context(THD *thd) +bool TABLE_LIST::prepare_view_securety_context(THD *thd) { - DBUG_ENTER("st_table_list::prepare_view_securety_context"); + DBUG_ENTER("TABLE_LIST::prepare_view_securety_context"); DBUG_PRINT("enter", ("table: %s", alias)); DBUG_ASSERT(!prelocking_placeholder && view); @@ -2533,17 +2533,17 @@ bool st_table_list::prepare_view_securety_context(THD *thd) Find security context of current view SYNOPSIS - st_table_list::find_view_security_context() + TABLE_LIST::find_view_security_context() thd [in] thread handler */ #ifndef NO_EMBEDDED_ACCESS_CHECKS -Security_context *st_table_list::find_view_security_context(THD *thd) +Security_context *TABLE_LIST::find_view_security_context(THD *thd) { Security_context *sctx; TABLE_LIST *upper_view= this; - DBUG_ENTER("st_table_list::find_view_security_context"); + DBUG_ENTER("TABLE_LIST::find_view_security_context"); DBUG_ASSERT(view); while (upper_view && !upper_view->view_suid) @@ -2572,7 +2572,7 @@ Security_context *st_table_list::find_view_security_context(THD *thd) Prepare security context and load underlying tables priveleges for view SYNOPSIS - st_table_list::prepare_security() + TABLE_LIST::prepare_security() thd [in] thread handler RETURN @@ -2580,11 +2580,11 @@ Security_context *st_table_list::find_view_security_context(THD *thd) TRUE Error */ -bool st_table_list::prepare_security(THD *thd) +bool TABLE_LIST::prepare_security(THD *thd) { List_iterator_fast tb(*view_tables); TABLE_LIST *tbl; - DBUG_ENTER("st_table_list::prepare_security"); + DBUG_ENTER("TABLE_LIST::prepare_security"); #ifndef NO_EMBEDDED_ACCESS_CHECKS Security_context *save_security_ctx= thd->security_ctx; @@ -3079,10 +3079,10 @@ Field_iterator_table_ref::get_natural_column_ref() Cleanup this table for re-execution. SYNOPSIS - st_table_list::reinit_before_use() + TABLE_LIST::reinit_before_use() */ -void st_table_list::reinit_before_use(THD *thd) +void TABLE_LIST::reinit_before_use(THD *thd) { /* Reset old pointers to TABLEs: they are not valid since the tables @@ -3109,7 +3109,7 @@ void st_table_list::reinit_before_use(THD *thd) Return subselect that contains the FROM list this table is taken from SYNOPSIS - st_table_list::containing_subselect() + TABLE_LIST::containing_subselect() RETURN Subselect item for the subquery that contains the FROM list @@ -3118,7 +3118,7 @@ void st_table_list::reinit_before_use(THD *thd) */ -Item_subselect *st_table_list::containing_subselect() +Item_subselect *TABLE_LIST::containing_subselect() { return (select_lex ? select_lex->master_unit()->item : 0); } diff --git a/sql/table.h b/sql/table.h index d4dc3e8c0b8..b29ef8c6566 100644 --- a/sql/table.h +++ b/sql/table.h @@ -237,7 +237,7 @@ struct st_table { /* Table's triggers, 0 if there are no of them */ Table_triggers_list *triggers; - struct st_table_list *pos_in_table_list;/* Element referring to this table */ + TABLE_LIST *pos_in_table_list;/* Element referring to this table */ ORDER *group; const char *alias; /* alias or table name */ uchar *null_flags; @@ -392,7 +392,7 @@ typedef struct st_field_info } ST_FIELD_INFO; -struct st_table_list; +struct TABLE_LIST; typedef class Item COND; typedef struct st_schema_table @@ -400,12 +400,12 @@ typedef struct st_schema_table const char* table_name; ST_FIELD_INFO *fields_info; /* Create information_schema table */ - TABLE *(*create_table) (THD *thd, struct st_table_list *table_list); + TABLE *(*create_table) (THD *thd, TABLE_LIST *table_list); /* Fill table with data */ - int (*fill_table) (THD *thd, struct st_table_list *tables, COND *cond); + int (*fill_table) (THD *thd, TABLE_LIST *tables, COND *cond); /* Handle fileds for old SHOW */ int (*old_format) (THD *thd, struct st_schema_table *schema_table); - int (*process_table) (THD *thd, struct st_table_list *tables, + int (*process_table) (THD *thd, TABLE_LIST *tables, TABLE *table, bool res, const char *base_name, const char *file_name); int idx_field1, idx_field2; @@ -438,7 +438,7 @@ struct st_lex; class select_union; class TMP_TABLE_PARAM; -Item *create_view_field(THD *thd, st_table_list *view, Item **field_ref, +Item *create_view_field(THD *thd, TABLE_LIST *view, Item **field_ref, const char *name); struct Field_translator @@ -459,7 +459,7 @@ class Natural_join_column: public Sql_alloc public: Field_translator *view_field; /* Column reference of merge view. */ Field *table_field; /* Column reference of table or temp view. */ - st_table_list *table_ref; /* Original base table/view reference. */ + TABLE_LIST *table_ref; /* Original base table/view reference. */ /* True if a common join column of two NATURAL/USING join operands. Notice that when we have a hierarchy of nested NATURAL/USING joins, a column can @@ -469,8 +469,8 @@ public: */ bool is_common; public: - Natural_join_column(Field_translator *field_param, st_table_list *tab); - Natural_join_column(Field *field_param, st_table_list *tab); + Natural_join_column(Field_translator *field_param, TABLE_LIST *tab); + Natural_join_column(Field *field_param, TABLE_LIST *tab); const char *name(); Item *create_item(THD *thd); Field *field(); @@ -512,17 +512,17 @@ public: (TABLE_LIST::join_using_fields != NULL) */ -typedef struct st_table_list +struct TABLE_LIST { - st_table_list() {} /* Remove gcc warning */ + TABLE_LIST() {} /* Remove gcc warning */ /* List of tables local to a subquery (used by SQL_LIST). Considers views as leaves (unlike 'next_leaf' below). Created at parse time in st_select_lex::add_table_to_list() -> table_list.link_in_list(). */ - struct st_table_list *next_local; + TABLE_LIST *next_local; /* link in a global list of all queries tables */ - struct st_table_list *next_global, **prev_global; + TABLE_LIST *next_global, **prev_global; char *db, *alias, *table_name, *schema_table_name; char *option; /* Used by cache index */ Item *on_expr; /* Used with outer join */ @@ -542,7 +542,7 @@ typedef struct st_table_list 'this' represents a NATURAL or USING join operation. Thus after parsing 'this' is a NATURAL/USING join iff (natural_join != NULL). */ - struct st_table_list *natural_join; + TABLE_LIST *natural_join; /* True if 'this' represents a nested join that is a NATURAL JOIN. For one of the operands of 'this', the member 'natural_join' points @@ -566,7 +566,7 @@ typedef struct st_table_list base tables. All of these TABLE_LIST instances contain a materialized list of columns. The list is local to a subquery. */ - struct st_table_list *next_name_resolution_table; + TABLE_LIST *next_name_resolution_table; /* Index names in a "... JOIN ... USE/IGNORE INDEX ..." clause. */ List *use_index, *ignore_index; TABLE *table; /* opened table */ @@ -582,7 +582,7 @@ typedef struct st_table_list here it will be reference of first occurrence of t1 to second (as you can see this lists can't be merged) */ - st_table_list *correspondent_table; + TABLE_LIST *correspondent_table; st_select_lex_unit *derived; /* SELECT_LEX_UNIT of derived table */ ST_SCHEMA_TABLE *schema_table; /* Information_schema table */ st_select_lex *schema_select_lex; @@ -603,20 +603,20 @@ typedef struct st_table_list does not include the tables of subqueries used in the view. Is set only for merged views. */ - st_table_list *merge_underlying_list; + TABLE_LIST *merge_underlying_list; /* - 0 for base tables - in case of the view it is the list of all (not only underlying tables but also used in subquery ones) tables of the view. */ - List *view_tables; + List *view_tables; /* most upper view this table belongs to */ - st_table_list *belong_to_view; + TABLE_LIST *belong_to_view; /* The view directly referencing this table (non-zero only for merged underlying tables of a view). */ - st_table_list *referencing_view; + TABLE_LIST *referencing_view; /* Security context (non-zero only for tables which belong to view with SQL SECURITY DEFINER) @@ -633,7 +633,7 @@ typedef struct st_table_list leaves. Created in setup_tables() -> make_leaves_list(). */ bool allowed_show; - st_table_list *next_leaf; + TABLE_LIST *next_leaf; Item *where; /* VIEW WHERE clause condition */ Item *check_option; /* WITH CHECK OPTION condition */ LEX_STRING query; /* text of (CRETE/SELECT) statement */ @@ -673,8 +673,8 @@ typedef struct st_table_list table_map dep_tables; /* tables the table depends on */ table_map on_expr_dep_tables; /* tables on expression depends on */ struct st_nested_join *nested_join; /* if the element is a nested join */ - st_table_list *embedding; /* nested join containing the table */ - List *join_list;/* join list the table belongs to */ + TABLE_LIST *embedding; /* nested join containing the table */ + List *join_list;/* join list the table belongs to */ bool cacheable_table; /* stop PS caching */ /* used in multi-upd/views privilege check */ bool table_in_first_from_clause; @@ -714,15 +714,15 @@ typedef struct st_table_list !table; } void print(THD *thd, String *str); - bool check_single_table(st_table_list **table, table_map map, - st_table_list *view); + bool check_single_table(TABLE_LIST **table, table_map map, + TABLE_LIST *view); bool set_insert_values(MEM_ROOT *mem_root); void hide_view_error(THD *thd); - st_table_list *find_underlying_table(TABLE *table); - st_table_list *first_leaf_for_name_resolution(); - st_table_list *last_leaf_for_name_resolution(); + TABLE_LIST *find_underlying_table(TABLE *table); + TABLE_LIST *first_leaf_for_name_resolution(); + TABLE_LIST *last_leaf_for_name_resolution(); bool is_leaf_for_name_resolution(); - inline st_table_list *top_table() + inline TABLE_LIST *top_table() { return belong_to_view ? belong_to_view : this; } inline bool prepare_check_option(THD *thd) { @@ -759,7 +759,7 @@ private: Cleanup for re-execution in a prepared statement or a stored procedure. */ -} TABLE_LIST; +}; class Item; From 0bc3e69f92497c04cdd48e7936888183fd8a21d5 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 12 Jul 2007 01:10:29 +0400 Subject: [PATCH 05/19] A fix and a test case for Bug#25859 ALTER DATABASE works w/o parameters. Fix the parser to make the database options not optional. mysql-test/r/information_schema.result: Update results (Bug#25859) mysql-test/t/information_schema.test: Add a test case for Bug#25859 "ALTER DATABASE works w/o parameters" sql/sql_yacc.yy: Fix Bug#25859 ALTER DATABASE works w/o parameters - require parameters in the parser. --- mysql-test/r/information_schema.result | 7 ++++++- mysql-test/t/information_schema.test | 11 +++++++++-- sql/sql_yacc.yy | 2 +- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index 4947fd7aecc..9d0e41b341a 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -1013,7 +1013,7 @@ c int(11) YES NULL drop view v1; drop table t1; alter database information_schema; -ERROR 42000: Access denied for user 'root'@'localhost' to database 'information_schema' +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '' at line 1 drop database information_schema; ERROR 42000: Access denied for user 'root'@'localhost' to database 'information_schema' drop table information_schema.tables; @@ -1326,3 +1326,8 @@ v2 YES delete from v1; drop view v1,v2; drop table t1,t2; +alter database; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '' at line 1 +alter database test; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '' at line 1 +End of 5.0 tests. diff --git a/mysql-test/t/information_schema.test b/mysql-test/t/information_schema.test index 1d368ac6075..6cf4ad8f576 100644 --- a/mysql-test/t/information_schema.test +++ b/mysql-test/t/information_schema.test @@ -697,7 +697,7 @@ drop table t1; # # Bug #9846 Inappropriate error displayed while dropping table from 'INFORMATION_SCHEMA' # ---error 1044 +--error ER_PARSE_ERROR alter database information_schema; --error 1044 drop database information_schema; @@ -1038,4 +1038,11 @@ delete from v1; drop view v1,v2; drop table t1,t2; -# End of 5.0 tests. +# +# Bug#25859 ALTER DATABASE works w/o parameters +# +--error ER_PARSE_ERROR +alter database; +--error ER_PARSE_ERROR +alter database test; +--echo End of 5.0 tests. diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 949f3ed4161..6c38c5984a3 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -3621,7 +3621,7 @@ alter: Lex->create_info.default_table_charset= NULL; Lex->create_info.used_fields= 0; } - opt_create_database_options + create_database_options { LEX *lex=Lex; lex->sql_command=SQLCOM_ALTER_DB; From 6ba23b0ac9a4548de36386b4f892af9c7d471e97 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 12 Jul 2007 12:49:39 +0400 Subject: [PATCH 06/19] Fix for 5.1 for BUG#10491: Server returns data as charset binary SHOW CREATE TABLE or SELECT FROM I_S. This is the last patch for this bug, which depends on the big CS patch and was pending. The problem was that SHOW CREATE statements returned original queries in the binary character set. That could cause the query to be unreadable. The fix is to use original character_set_client when sending the original query to the client. In order to preserve the query in mysqldump, 'binary' character set results should be set when issuing SHOW CREATE statement. If either source or destination character set is 'binary' , no conversion is performed. The idea is that since the source character set is no longer 'binary', we fix the destination character set to still produce valid dumps. client/mysqldump.c: Switch character_set_results of mysqldump-connection before calling SHOW CREATE statements for the objects. mysql-test/r/show_check.result: Result file. mysql-test/t/show_check.test: Add test case for the part of BUG#10491. sql/events.cc: Send original query in the original character set. sql/sp_head.cc: Send original query in the original character set. sql/sql_show.cc: Send original query in the original character set. --- client/mysqldump.c | 43 +++++++++++++++++++++++ mysql-test/r/show_check.result | 39 +++++++++++++++++++++ mysql-test/t/show_check.test | 64 ++++++++++++++++++++++++++++++++++ sql/events.cc | 3 +- sql/sp_head.cc | 3 +- sql/sql_show.cc | 12 +++++-- 6 files changed, 160 insertions(+), 4 deletions(-) diff --git a/client/mysqldump.c b/client/mysqldump.c index 812a158048e..11be212ec3b 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -1201,6 +1201,20 @@ static void restore_time_zone(FILE *sql_file, (const char *) delimiter); } + +static int switch_character_set_results(MYSQL *mysql, const char *cs_name) +{ + char query_buffer[QUERY_LENGTH]; + size_t query_length; + + query_length= my_snprintf(query_buffer, + sizeof (query_buffer), + "SET SESSION character_set_results = '%s'", + (const char *) cs_name); + + return mysql_real_query(mysql, query_buffer, query_length); +} + /* Open a new .sql file to dump the table or view into @@ -1706,6 +1720,9 @@ static uint dump_events_for_db(char *db) if (fetch_db_collation(db_name_buff, db_cl_name, sizeof (db_cl_name))) DBUG_RETURN(1); + if (switch_character_set_results(mysql, "binary")) + DBUG_RETURN(1); + while ((event_list_row= mysql_fetch_row(event_list_res)) != NULL) { event_name= quote_name(event_list_row[1], name_buff, 0); @@ -1774,6 +1791,9 @@ static uint dump_events_for_db(char *db) } /* end of list of events */ fprintf(sql_file, "DELIMITER ;\n"); fprintf(sql_file, "/*!50106 SET TIME_ZONE= @save_time_zone */ ;\n"); + + if (switch_character_set_results(mysql, default_charset)) + DBUG_RETURN(1); } mysql_free_result(event_list_res); @@ -1853,6 +1873,9 @@ static uint dump_routines_for_db(char *db) if (fetch_db_collation(db_name_buff, db_cl_name, sizeof (db_cl_name))) DBUG_RETURN(1); + if (switch_character_set_results(mysql, "binary")) + DBUG_RETURN(1); + /* 0, retrieve and dump functions, 1, procedures */ for (i= 0; i <= 1; i++) { @@ -1990,6 +2013,9 @@ static uint dump_routines_for_db(char *db) mysql_free_result(routine_list_res); } /* end of for i (0 .. 1) */ + if (switch_character_set_results(mysql, default_charset)) + DBUG_RETURN(1); + if (lock_tables) VOID(mysql_query_with_error_report(mysql, 0, "UNLOCK TABLES")); DBUG_RETURN(0); @@ -2542,6 +2568,9 @@ static void dump_triggers_for_table(char *table, char *db_name) if (fetch_db_collation(db_name, db_cl_name, sizeof (db_cl_name))) DBUG_VOID_RETURN; + if (switch_character_set_results(mysql, "binary")) + DBUG_VOID_RETURN; + /* Dump triggers. */ while ((row= mysql_fetch_row(result))) @@ -2637,6 +2666,9 @@ static void dump_triggers_for_table(char *table, char *db_name) mysql_free_result(result); + if (switch_character_set_results(mysql, default_charset)) + DBUG_VOID_RETURN; + /* make sure to set back opt_compatible mode to original value @@ -4390,14 +4422,22 @@ static my_bool get_view_structure(char *table, char* db) result_table= quote_name(table, table_buff, 1); opt_quoted_table= quote_name(table, table_buff2, 0); + if (switch_character_set_results(mysql, "binary")) + DBUG_RETURN(1); + my_snprintf(query, sizeof(query), "SHOW CREATE TABLE %s", result_table); + if (mysql_query_with_error_report(mysql, &table_res, query)) + { + switch_character_set_results(mysql, default_charset); DBUG_RETURN(0); + } /* Check if this is a view */ field= mysql_fetch_field_direct(table_res, 0); if (strcmp(field->name, "View") != 0) { + switch_character_set_results(mysql, default_charset); verbose_msg("-- It's base table, skipped\n"); DBUG_RETURN(0); } @@ -4540,6 +4580,9 @@ static my_bool get_view_structure(char *table, char* db) dynstr_free(&ds_view); } + if (switch_character_set_results(mysql, default_charset)) + DBUG_RETURN(1); + /* If a separate .sql file was opened, close it now */ if (sql_file != md_result_file) { diff --git a/mysql-test/r/show_check.result b/mysql-test/r/show_check.result index 9ef10865cd7..c89f386623b 100644 --- a/mysql-test/r/show_check.result +++ b/mysql-test/r/show_check.result @@ -1282,4 +1282,43 @@ t1_bi CREATE DEFINER=`root`@`localhost` TRIGGER t1_bi BEFORE INSERT ON t1 FOR E DROP TABLE t1; DROP PROCEDURE p1; DEALLOCATE PREPARE stmt1; +set names koi8r; +DROP VIEW IF EXISTS v1; +DROP PROCEDURE IF EXISTS p1; +DROP FUNCTION IF EXISTS f1; +DROP TABLE IF EXISTS t1; +DROP EVENT IF EXISTS ev1; +CREATE VIEW v1 AS SELECT 'ÔÅÓÔ' AS test; +CREATE PROCEDURE p1() SELECT 'ÔÅÓÔ' AS test; +CREATE FUNCTION f1() RETURNS CHAR(10) RETURN 'ÔÅÓÔ'; +CREATE TABLE t1(c1 CHAR(10)); +CREATE TRIGGER t1_bi BEFORE INSERT ON t1 +FOR EACH ROW +SET NEW.c1 = 'ÔÅÓÔ'; +CREATE EVENT ev1 ON SCHEDULE AT '2030-01-01 00:00:00' DO SELECT 'ÔÅÓÔ' AS test; +set names utf8; +SHOW CREATE VIEW v1; +View Create View character_set_client collation_connection +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select _koi8r'теÑÑ‚' AS `test` koi8r koi8r_general_ci +SHOW CREATE PROCEDURE p1; +Procedure sql_mode Create Procedure character_set_client collation_connection Database Collation +p1 CREATE DEFINER=`root`@`localhost` PROCEDURE `p1`() +SELECT 'теÑÑ‚' AS test koi8r koi8r_general_ci latin1_swedish_ci +SHOW CREATE FUNCTION f1; +Function sql_mode Create Function character_set_client collation_connection Database Collation +f1 CREATE DEFINER=`root`@`localhost` FUNCTION `f1`() RETURNS char(10) CHARSET latin1 +RETURN 'теÑÑ‚' koi8r koi8r_general_ci latin1_swedish_ci +SHOW CREATE TRIGGER t1_bi; +Trigger sql_mode SQL Original Statement character_set_client collation_connection Database Collation +t1_bi CREATE DEFINER=`root`@`localhost` TRIGGER t1_bi BEFORE INSERT ON t1 +FOR EACH ROW +SET NEW.c1 = 'теÑÑ‚' koi8r koi8r_general_ci latin1_swedish_ci +SHOW CREATE EVENT ev1; +Event sql_mode time_zone Create Event character_set_client collation_connection Database Collation +ev1 SYSTEM CREATE EVENT `ev1` ON SCHEDULE AT '2030-01-01 00:00:00' ON COMPLETION NOT PRESERVE ENABLE DO SELECT 'теÑÑ‚' AS test koi8r koi8r_general_ci latin1_swedish_ci +DROP VIEW v1; +DROP PROCEDURE p1; +DROP FUNCTION f1; +DROP TABLE t1; +DROP EVENT ev1; End of 5.1 tests diff --git a/mysql-test/t/show_check.test b/mysql-test/t/show_check.test index 341c9039390..2f474d0335b 100644 --- a/mysql-test/t/show_check.test +++ b/mysql-test/t/show_check.test @@ -897,4 +897,68 @@ DROP TABLE t1; DROP PROCEDURE p1; DEALLOCATE PREPARE stmt1; +# +# BUG#10491: Server returns data as charset binary SHOW CREATE TABLE or SELECT +# FROM INFORMATION_SCHEMA. +# +# Before the change performed to fix the bug, the metadata of the output of +# SHOW CREATE statements would always describe the result as 'binary'. That +# would ensure that the result is never converted to character_set_client +# (which was essential to mysqldump). Now we return to the client the actual +# character set of the object -- which is character_set_client of the +# connection that issues the CREATE statement, and this triggers an automatic +# conversion to character_set_results of the connection that issues SHOW CREATE +# statement. +# +# This test demonstrates that this conversion indeed is taking place. +# + +# Prepare: create objects in a one character set. + +set names koi8r; + +--disable_warnings +DROP VIEW IF EXISTS v1; +DROP PROCEDURE IF EXISTS p1; +DROP FUNCTION IF EXISTS f1; +DROP TABLE IF EXISTS t1; +DROP EVENT IF EXISTS ev1; +--enable_warnings + +CREATE VIEW v1 AS SELECT 'ÔÅÓÔ' AS test; + +CREATE PROCEDURE p1() SELECT 'ÔÅÓÔ' AS test; + +CREATE FUNCTION f1() RETURNS CHAR(10) RETURN 'ÔÅÓÔ'; + +CREATE TABLE t1(c1 CHAR(10)); +CREATE TRIGGER t1_bi BEFORE INSERT ON t1 + FOR EACH ROW + SET NEW.c1 = 'ÔÅÓÔ'; + +CREATE EVENT ev1 ON SCHEDULE AT '2030-01-01 00:00:00' DO SELECT 'ÔÅÓÔ' AS test; + +# Test: switch the character set and show that SHOW CREATE output is +# automatically converted to the new character_set_client. + +set names utf8; + +SHOW CREATE VIEW v1; + +SHOW CREATE PROCEDURE p1; + +SHOW CREATE FUNCTION f1; + +SHOW CREATE TRIGGER t1_bi; + +SHOW CREATE EVENT ev1; + +# Cleanup. + +DROP VIEW v1; +DROP PROCEDURE p1; +DROP FUNCTION f1; +DROP TABLE t1; +DROP EVENT ev1; + --echo End of 5.1 tests diff --git a/sql/events.cc b/sql/events.cc index e48daeca63d..8d32580816f 100644 --- a/sql/events.cc +++ b/sql/events.cc @@ -717,7 +717,8 @@ send_show_create_event(THD *thd, Event_timed *et, Protocol *protocol) protocol->store(et->name.str, et->name.length, system_charset_info); protocol->store(sql_mode.str, sql_mode.length, system_charset_info); protocol->store(tz_name->ptr(), tz_name->length(), system_charset_info); - protocol->store(show_str.c_ptr(), show_str.length(), &my_charset_bin); + protocol->store(show_str.c_ptr(), show_str.length(), + et->creation_ctx->get_client_cs()); protocol->store(et->creation_ctx->get_client_cs()->csname, strlen(et->creation_ctx->get_client_cs()->csname), system_charset_info); diff --git a/sql/sp_head.cc b/sql/sp_head.cc index f0cc5204749..d3a9787ee2b 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -2260,7 +2260,8 @@ sp_head::show_create_routine(THD *thd, int type) protocol->store(sql_mode.str, sql_mode.length, system_charset_info); if (full_access) - protocol->store(m_defstr.str, m_defstr.length, &my_charset_bin); + protocol->store(m_defstr.str, m_defstr.length, + m_creation_ctx->get_client_cs()); else protocol->store_null(); diff --git a/sql/sql_show.cc b/sql/sql_show.cc index f66897df671..04374c1d1c1 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -639,7 +639,8 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list) if (table_list->view) { - protocol->store(buffer.ptr(), buffer.length(), &my_charset_bin); + protocol->store(buffer.ptr(), buffer.length(), + table_list->view_creation_ctx->get_client_cs()); protocol->store(table_list->view_creation_ctx->get_client_cs()->csname, system_charset_info); @@ -5983,6 +5984,8 @@ static bool show_create_trigger_impl(THD *thd, LEX_STRING trg_connection_cl_name; LEX_STRING trg_db_cl_name; + CHARSET_INFO *trg_client_cs; + /* TODO: Check privileges here. This functionality will be added by implementation of the following WL items: @@ -6008,6 +6011,11 @@ static bool show_create_trigger_impl(THD *thd, trg_sql_mode, &trg_sql_mode_str); + /* Resolve trigger client character set. */ + + if (resolve_charset(trg_client_cs_name.str, NULL, &trg_client_cs)) + return TRUE; + /* Send header. */ fields.push_back(new Item_empty_string("Trigger", NAME_LEN)); @@ -6054,7 +6062,7 @@ static bool show_create_trigger_impl(THD *thd, p->store(trg_sql_original_stmt.str, trg_sql_original_stmt.length, - &my_charset_bin); + trg_client_cs); p->store(trg_client_cs_name.str, trg_client_cs_name.length, From 30810f80b1070cbfd4579835353bb6e84fd1b233 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 12 Jul 2007 13:29:51 +0200 Subject: [PATCH 07/19] Bug#28249 Query Cache returns wrong result with concurrent insert / certain lock A race condition in the integration between MyISAM and the query cache code caused the query cache to fail to invalidate itself on concurrently inserted data. This patch fix this problem by using the existing handler interface which, upon each statement cache attempt, compare the size of the table as viewed from the cache writing thread and with any snap shot of the global table state. If the two sizes are different the global table size is unknown and the current statement can't be cached. mysql-test/r/query_cache.result: Added test case mysql-test/t/query_cache.test: Added test case sql/ha_myisam.cc: - Implemented handler interface for ha_myisam class to dermine if the table belonging to the currently processed statement can be cached or not. sql/ha_myisam.h: - Implemented handler interface for ha_myisam class to dermine if the table belonging to the currently processed statement can be cached or not. sql/handler.h: - Documented register_query_cache_table method in the handler interface. --- mysql-test/r/query_cache.result | 39 +++++++++++++++++ mysql-test/t/query_cache.test | 73 ++++++++++++++++++++++++++++++++ sql/ha_myisam.cc | 74 +++++++++++++++++++++++++++++++++ sql/ha_myisam.h | 7 ++++ sql/handler.h | 45 +++++++++++++++++--- 5 files changed, 232 insertions(+), 6 deletions(-) diff --git a/mysql-test/r/query_cache.result b/mysql-test/r/query_cache.result index 79471ee5c02..58b8aad6fc9 100644 --- a/mysql-test/r/query_cache.result +++ b/mysql-test/r/query_cache.result @@ -1409,3 +1409,42 @@ set GLOBAL query_cache_type=default; set GLOBAL query_cache_limit=default; set GLOBAL query_cache_min_res_unit=default; set GLOBAL query_cache_size= default; +Bug#28249 Query Cache returns wrong result with concurrent insert/ certain lock +set GLOBAL query_cache_type=1; +set GLOBAL query_cache_limit=10000; +set GLOBAL query_cache_min_res_unit=0; +set GLOBAL query_cache_size= 100000; +flush tables; +drop table if exists t1, t2; +create table t1 (a int); +create table t2 (a int); +insert into t1 values (1),(2),(3); +Locking table T2 with a write lock. +lock table t2 write; +Select blocked by write lock. +select *, (select count(*) from t2) from t1;; +Sleeing is ok, because selecting should be done very fast. +Inserting into table T1. +insert into t1 values (4); +Unlocking the tables. +unlock tables; +Collecting result from previously blocked select. +Next select should contain 4 rows, as the insert is long finished. +select *, (select count(*) from t2) from t1; +a (select count(*) from t2) +1 0 +2 0 +3 0 +4 0 +reset query cache; +select *, (select count(*) from t2) from t1; +a (select count(*) from t2) +1 0 +2 0 +3 0 +4 0 +drop table t1,t2; +set GLOBAL query_cache_type=default; +set GLOBAL query_cache_limit=default; +set GLOBAL query_cache_min_res_unit=default; +set GLOBAL query_cache_size=default; diff --git a/mysql-test/t/query_cache.test b/mysql-test/t/query_cache.test index 1ef104f820b..17073e039c2 100644 --- a/mysql-test/t/query_cache.test +++ b/mysql-test/t/query_cache.test @@ -970,4 +970,77 @@ set GLOBAL query_cache_limit=default; set GLOBAL query_cache_min_res_unit=default; set GLOBAL query_cache_size= default; +# +# Bug #28249 Query Cache returns wrong result with concurrent insert / certain lock +# +--echo Bug#28249 Query Cache returns wrong result with concurrent insert/ certain lock +connect (user1,localhost,root,,test,,); +connect (user2,localhost,root,,test,,); +connect (user3,localhost,root,,test,,); + +connection user1; + +set GLOBAL query_cache_type=1; +set GLOBAL query_cache_limit=10000; +set GLOBAL query_cache_min_res_unit=0; +set GLOBAL query_cache_size= 100000; + +flush tables; +--disable_warnings +drop table if exists t1, t2; +--enable_warnings +create table t1 (a int); +create table t2 (a int); +insert into t1 values (1),(2),(3); +connection user2; +--echo Locking table T2 with a write lock. +lock table t2 write; + +connection user1; +--echo Select blocked by write lock. +--send select *, (select count(*) from t2) from t1; +--echo Sleeing is ok, because selecting should be done very fast. +sleep 5; + +connection user3; +--echo Inserting into table T1. +insert into t1 values (4); + +connection user2; +--echo Unlocking the tables. +unlock tables; + +connection user1; +--echo Collecting result from previously blocked select. +# +# Since the lock ordering rule in thr_multi_lock depends on +# pointer values, from execution to execution we might have +# different lock order, and therefore, sometimes lock t1 and block +# on t2, and sometimes block on t2 right away. In the second case, +# the following insert succeeds, and only then this select can +# proceed, and we actually test nothing, as the very first select +# returns 4 rows right away. +# It's fine to have a test case that covers the problematic area +# at least once in a while. +# We, however, need to disable the result log here to make the +# test repeatable. +--disable_result_log +--reap +--enable_result_log +--echo Next select should contain 4 rows, as the insert is long finished. +select *, (select count(*) from t2) from t1; +reset query cache; +select *, (select count(*) from t2) from t1; + +drop table t1,t2; + +connection default; +disconnect user1; +disconnect user2; +disconnect user3; +set GLOBAL query_cache_type=default; +set GLOBAL query_cache_limit=default; +set GLOBAL query_cache_min_res_unit=default; +set GLOBAL query_cache_size=default; # End of 5.0 tests + diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index 6d8a770175d..ebaa1d6bd3b 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -1922,3 +1922,77 @@ uint ha_myisam::checksum() const return (uint)file->state->checksum; } +#ifdef HAVE_QUERY_CACHE +/** + @brief Register a named table with a call back function to the query cache. + + @param thd The thread handle + @param table_key A pointer to the table name in the table cache + @param key_length The length of the table name + @param[out] engine_callback The pointer to the storage engine call back + function, currently 0 + @param[out] engine_data Engine data will be set to 0. + + @note Despite the name of this function, it is used to check each statement + before it is cached and not to register a table or callback function. + + @see handler::register_query_cache_table + + @return The error code. The engine_data and engine_callback will be set to 0. + @retval TRUE Success + @retval FALSE An error occured +*/ + +my_bool ha_myisam::register_query_cache_table(THD *thd, char *table_name, + uint table_name_len, + qc_engine_callback + *engine_callback, + ulonglong *engine_data) +{ + /* + No call back function is needed to determine if a cached statement + is valid or not. + */ + *engine_callback= 0; + + /* + No engine data is needed. + */ + *engine_data= 0; + + /* + If a concurrent INSERT has happened just before the currently processed + SELECT statement, the total size of the table is unknown. + + To determine if the table size is known, the current thread's snap shot of + the table size with the actual table size are compared. + + If the table size is unknown the SELECT statement can't be cached. + */ + ulonglong actual_data_file_length; + ulonglong current_data_file_length; + + /* + POSIX visibility rules specify that "2. Whatever memory values a + thread can see when it unlocks a mutex <...> can also be seen by any + thread that later locks the same mutex". In this particular case, + concurrent insert thread had modified the data_file_length in + MYISAM_SHARE before it has unlocked (or even locked) + structure_guard_mutex. So, here we're guaranteed to see at least that + value after we've locked the same mutex. We can see a later value + (modified by some other thread) though, but it's ok, as we only want + to know if the variable was changed, the actual new value doesn't matter + */ + actual_data_file_length= file->s->state.state.data_file_length; + current_data_file_length= file->save_state.data_file_length; + + if (current_data_file_length != actual_data_file_length) + { + /* Don't cache current statement. */ + return FALSE; + } + + /* It is ok to try to cache current statement. */ + return TRUE; +} +#endif diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h index b186d9c7bb8..536ea211820 100644 --- a/sql/ha_myisam.h +++ b/sql/ha_myisam.h @@ -127,4 +127,11 @@ class ha_myisam: public handler int dump(THD* thd, int fd); int net_read_dump(NET* net); #endif +#ifdef HAVE_QUERY_CACHE + my_bool register_query_cache_table(THD *thd, char *table_key, + uint key_length, + qc_engine_callback + *engine_callback, + ulonglong *engine_data); +#endif }; diff --git a/sql/handler.h b/sql/handler.h index 9863d541b5f..d25796d8546 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -841,16 +841,49 @@ public: /* Type of table for caching query */ virtual uint8 table_cache_type() { return HA_CACHE_TBL_NONTRANSACT; } - /* ask handler about permission to cache table when query is to be cached */ + + + /** + @brief Register a named table with a call back function to the query cache. + + @param thd The thread handle + @param table_key A pointer to the table name in the table cache + @param key_length The length of the table name + @param[out] engine_callback The pointer to the storage engine call back + function + @param[out] engine_data Storage engine specific data which could be + anything + + This method offers the storage engine, the possibility to store a reference + to a table name which is going to be used with query cache. + The method is called each time a statement is written to the cache and can + be used to verify if a specific statement is cachable. It also offers + the possibility to register a generic (but static) call back function which + is called each time a statement is matched against the query cache. + + @note If engine_data supplied with this function is different from + engine_data supplied with the callback function, and the callback returns + FALSE, a table invalidation on the current table will occur. + + @return Upon success the engine_callback will point to the storage engine + call back function, if any, and engine_data will point to any storage + engine data used in the specific implementation. + @retval TRUE Success + @retval FALSE The specified table or current statement should not be + cached + */ + virtual my_bool register_query_cache_table(THD *thd, char *table_key, - uint key_length, - qc_engine_callback - *engine_callback, - ulonglong *engine_data) + uint key_length, + qc_engine_callback + *engine_callback, + ulonglong *engine_data) { *engine_callback= 0; - return 1; + return TRUE; } + + /* RETURN true Primary key (if there is one) is clustered key covering all fields From 9dc3088f9e4cad7b6e33d4d1b35f11d4b5b5e372 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 12 Jul 2007 22:26:41 +0400 Subject: [PATCH 08/19] A fix and a test case for Bug#26141 mixing table types in trigger causes full table lock on innodb table. Also fixes Bug#28502 Triggers that update another innodb table will block on X lock unnecessarily (duplciate). Code review fixes. Both bugs' synopses are misleading: InnoDB table is not X locked. The statements, however, cannot proceed concurrently, but this happens due to lock conflicts for tables used in triggers, not for the InnoDB table. If a user had an InnoDB table, and two triggers, AFTER UPDATE and AFTER INSERT, competing for different resources (e.g. two distinct MyISAM tables), then these two triggers would not be able to execute concurrently. Moreover, INSERTS/UPDATES of the InnoDB table would not be able to run concurrently. The problem had other side-effects (see respective bug reports). This behavior was a consequence of a shortcoming of the pre-locking algorithm, which would not distinguish between different DML operations (e.g. INSERT and DELETE) and pre-lock all the tables that are used by any trigger defined on the subject table. The idea of the fix is to extend the pre-locking algorithm to keep track, for each table, what DML operation it is used for and not load triggers that are known to never be fired. mysql-test/r/trigger-trans.result: Update results (Bug#26141) mysql-test/r/trigger.result: Update results (Bug#28502) mysql-test/t/trigger-trans.test: Add a test case for Bug#26141 mixing table types in trigger causes full table lock on innodb table. mysql-test/t/trigger.test: Add a test case for Bug#28502 Triggers that update another innodb table will block echo on X lock unnecessarily. Add more test coverage for triggers. sql/item.h: enum trg_event_type is needed in table.h sql/sp.cc: Take into account table_list->trg_event_map when determining what tables to pre-lock. After this change, if we attempt to fire a trigger for which we had not pre-locked any tables, error 'Table was not locked with LOCK TABLES' will be printed. This, however, should never happen, provided the pre-locking algorithm has no programming bugs. Previously a trigger key in the sroutines hash was based on the name of the table the trigger belongs to. This was possible because we would always add to the pre-locking list all the triggers defined for a table when handling this table. Now the key is based on the name of the trigger, owing to the fact that a trigger name must be unique in the database it belongs to. sql/sp_head.cc: Generate sroutines hash key in init_spname(). This is a convenient place since there we have all the necessary information and can avoid an extra alloc. Maintain and merge trg_event_map when adding and merging elements of the pre-locking list. sql/sp_head.h: Add ,m_sroutines_key member, used when inserting the sphead for a trigger into the cache of routines used by a statement. Previously the key was based on the table name the trigger belonged to, since for a given table we would add to the sroutines list all the triggers defined on it. sql/sql_lex.cc: Introduce a new lex step: set_trg_event_type_for_tables(). It is called when we have finished parsing but before opening and locking tables. Now this step is used to evaluate for each TABLE_LIST instance which INSERT/UPDATE/DELETE operation, if any, it is used in. In future this method could be extended to aggregate other information that is hard to aggregate during parsing. sql/sql_lex.h: Add declaration for set_trg_event_type_for_tables(). sql/sql_parse.cc: Call set_trg_event_type_for_tables() after MYSQLparse(). Remove tabs. sql/sql_prepare.cc: Call set_trg_event_type_for_tables() after MYSQLparse(). sql/sql_trigger.cc: Call set_trg_event_type_for_tables() after MYSQLparse(). sql/sql_trigger.h: Remove an obsolete member. sql/sql_view.cc: Call set_trg_event_type_for_tables() after MYSQLparse(). sql/sql_yacc.yy: Move assignment of sp_head::m_type before calling sp_head::init_spname(), one is now used inside another. sql/table.cc: Implement TABLE_LIST::set_trg_event_map() - a method that calculates wh triggers may be fired on this table when executing a statement. sql/table.h: Add missing declarations. Move declaration of trg_event_type from item.h (it will be needed for trg_event_map bitmap when we start using Bitmap template instead of uint8). --- mysql-test/r/trigger-trans.result | 59 ++++ mysql-test/r/trigger.result | 457 ++++++++++++++++++++++++++++++ mysql-test/t/trigger-trans.test | 82 +++++- mysql-test/t/trigger.test | 365 ++++++++++++++++++++++++ sql/item.h | 8 - sql/sp.cc | 53 ++-- sql/sp_head.cc | 53 +++- sql/sp_head.h | 6 + sql/sql_lex.cc | 21 ++ sql/sql_lex.h | 2 + sql/sql_parse.cc | 5 +- sql/sql_prepare.cc | 1 + sql/sql_trigger.cc | 28 +- sql/sql_trigger.h | 8 - sql/sql_view.cc | 13 + sql/sql_yacc.yy | 4 +- sql/table.cc | 129 +++++++++ sql/table.h | 19 ++ 18 files changed, 1255 insertions(+), 58 deletions(-) diff --git a/mysql-test/r/trigger-trans.result b/mysql-test/r/trigger-trans.result index b56abf1f59a..cd5f629564f 100644 --- a/mysql-test/r/trigger-trans.result +++ b/mysql-test/r/trigger-trans.result @@ -82,3 +82,62 @@ ALICE 33 1 0 THE CROWN 43 1 0 THE PIE 53 1 1 drop table t1; + +Bug#26141 mixing table types in trigger causes full +table lock on innodb table + +Ensure we do not open and lock tables for the triggers we do not +fire. + +drop table if exists t1, t2, t3; +drop trigger if exists trg_bug26141_au; +drop trigger if exists trg_bug26141_ai; +create table t1 (c int primary key) engine=innodb; +create table t2 (c int) engine=myisam; +create table t3 (c int) engine=myisam; +insert into t1 (c) values (1); +create trigger trg_bug26141_ai after insert on t1 +for each row +begin +insert into t2 (c) values (1); +# We need the 'sync' lock to synchronously wait in connection 2 till +# the moment when the trigger acquired all the locks. +select release_lock("lock_bug26141_sync") into @a; +# 1000 is time in seconds of lock wait timeout -- this is a way +# to cause a manageable sleep up to 1000 seconds +select get_lock("lock_bug26141_wait", 1000) into @a; +end| +create trigger trg_bug26141_au after update on t1 +for each row +begin +insert into t3 (c) values (1); +end| +select get_lock("lock_bug26141_wait", 0); +get_lock("lock_bug26141_wait", 0) +1 +select get_lock("lock_bug26141_sync", /* must not be priorly locked */ 0); +get_lock("lock_bug26141_sync", /* must not be priorly locked */ 0) +1 +insert into t1 (c) values (2); +select get_lock("lock_bug26141_sync", 1000); +get_lock("lock_bug26141_sync", 1000) +1 +update t1 set c=3 where c=1; +select release_lock("lock_bug26141_sync"); +release_lock("lock_bug26141_sync") +1 +select release_lock("lock_bug26141_wait"); +release_lock("lock_bug26141_wait") +1 +select * from t1; +c +2 +3 +select * from t2; +c +1 +select * from t3; +c +1 +drop table t1, t2, t3; +End of 5.0 tests diff --git a/mysql-test/r/trigger.result b/mysql-test/r/trigger.result index 290929d476d..4b18e525e62 100644 --- a/mysql-test/r/trigger.result +++ b/mysql-test/r/trigger.result @@ -1476,4 +1476,461 @@ DROP TRIGGER t1_test; DROP TABLE t1,t2; SET SESSION LOW_PRIORITY_UPDATES=DEFAULT; SET GLOBAL LOW_PRIORITY_UPDATES=DEFAULT; + +Bug#28502 Triggers that update another innodb table will block +on X lock unnecessarily + +Ensure we do not open and lock tables for triggers we do not fire. + +drop table if exists t1, t2; +drop trigger if exists trg_bug28502_au; +create table t1 (id int, count int); +create table t2 (id int); +create trigger trg_bug28502_au before update on t2 +for each row +begin +if (new.id is not null) then +update t1 set count= count + 1 where id = old.id; +end if; +end| +insert into t1 (id, count) values (1, 0); +lock table t1 write; +insert into t2 set id=1; +unlock tables; +update t2 set id=1 where id=1; +select * from t1; +id count +1 1 +select * from t2; +id +1 +drop table t1, t2; + +Additionally, provide test coverage for triggers and +all MySQL data changing commands. + +drop table if exists t1, t2, t1_op_log; +drop view if exists v1; +drop trigger if exists trg_bug28502_bi; +drop trigger if exists trg_bug28502_ai; +drop trigger if exists trg_bug28502_bu; +drop trigger if exists trg_bug28502_au; +drop trigger if exists trg_bug28502_bd; +drop trigger if exists trg_bug28502_ad; +create table t1 (id int primary key auto_increment, operation varchar(255)); +create table t2 (id int primary key); +create table t1_op_log(operation varchar(255)); +create view v1 as select * from t1; +create trigger trg_bug28502_bi before insert on t1 +for each row +insert into t1_op_log (operation) +values (concat("Before INSERT, new=", new.operation)); +create trigger trg_bug28502_ai after insert on t1 +for each row +insert into t1_op_log (operation) +values (concat("After INSERT, new=", new.operation)); +create trigger trg_bug28502_bu before update on t1 +for each row +insert into t1_op_log (operation) +values (concat("Before UPDATE, new=", new.operation, +", old=", old.operation)); +create trigger trg_bug28502_au after update on t1 +for each row +insert into t1_op_log (operation) +values (concat("After UPDATE, new=", new.operation, +", old=", old.operation)); +create trigger trg_bug28502_bd before delete on t1 +for each row +insert into t1_op_log (operation) +values (concat("Before DELETE, old=", old.operation)); +create trigger trg_bug28502_ad after delete on t1 +for each row +insert into t1_op_log (operation) +values (concat("After DELETE, old=", old.operation)); +insert into t1 (operation) values ("INSERT"); +set @id=last_insert_id(); +select * from t1; +id operation +1 INSERT +select * from t1_op_log; +operation +Before INSERT, new=INSERT +After INSERT, new=INSERT +truncate t1_op_log; +update t1 set operation="UPDATE" where id=@id; +select * from t1; +id operation +1 UPDATE +select * from t1_op_log; +operation +Before UPDATE, new=UPDATE, old=INSERT +After UPDATE, new=UPDATE, old=INSERT +truncate t1_op_log; +delete from t1 where id=@id; +select * from t1; +id operation +select * from t1_op_log; +operation +Before DELETE, old=UPDATE +After DELETE, old=UPDATE +truncate t1; +truncate t1_op_log; +insert into t1 (id, operation) values +(NULL, "INSERT ON DUPLICATE KEY UPDATE, inserting a new key") +on duplicate key update id=NULL, operation="Should never happen"; +set @id=last_insert_id(); +select * from t1; +id operation +1 INSERT ON DUPLICATE KEY UPDATE, inserting a new key +select * from t1_op_log; +operation +Before INSERT, new=INSERT ON DUPLICATE KEY UPDATE, inserting a new key +After INSERT, new=INSERT ON DUPLICATE KEY UPDATE, inserting a new key +truncate t1_op_log; +insert into t1 (id, operation) values +(@id, "INSERT ON DUPLICATE KEY UPDATE, the key value is the same") +on duplicate key update id=NULL, +operation="INSERT ON DUPLICATE KEY UPDATE, updating the duplicate"; +select * from t1; +id operation +0 INSERT ON DUPLICATE KEY UPDATE, updating the duplicate +select * from t1_op_log; +operation +Before INSERT, new=INSERT ON DUPLICATE KEY UPDATE, the key value is the same +Before UPDATE, new=INSERT ON DUPLICATE KEY UPDATE, updating the duplicate, old=INSERT ON DUPLICATE KEY UPDATE, inserting a new key +After UPDATE, new=INSERT ON DUPLICATE KEY UPDATE, updating the duplicate, old=INSERT ON DUPLICATE KEY UPDATE, inserting a new key +truncate t1; +truncate t1_op_log; +replace into t1 values (NULL, "REPLACE, inserting a new key"); +set @id=last_insert_id(); +select * from t1; +id operation +1 REPLACE, inserting a new key +select * from t1_op_log; +operation +Before INSERT, new=REPLACE, inserting a new key +After INSERT, new=REPLACE, inserting a new key +truncate t1_op_log; +replace into t1 values (@id, "REPLACE, deleting the duplicate"); +select * from t1; +id operation +1 REPLACE, deleting the duplicate +select * from t1_op_log; +operation +Before INSERT, new=REPLACE, deleting the duplicate +Before DELETE, old=REPLACE, inserting a new key +After DELETE, old=REPLACE, inserting a new key +After INSERT, new=REPLACE, deleting the duplicate +truncate t1; +truncate t1_op_log; +create table if not exists t1 +select NULL, "CREATE TABLE ... SELECT, inserting a new key"; +Warnings: +Note 1050 Table 't1' already exists +set @id=last_insert_id(); +select * from t1; +id operation +1 CREATE TABLE ... SELECT, inserting a new key +select * from t1_op_log; +operation +Before INSERT, new=CREATE TABLE ... SELECT, inserting a new key +After INSERT, new=CREATE TABLE ... SELECT, inserting a new key +truncate t1_op_log; +create table if not exists t1 replace +select @id, "CREATE TABLE ... REPLACE SELECT, deleting a duplicate key"; +Warnings: +Note 1050 Table 't1' already exists +select * from t1; +id operation +1 CREATE TABLE ... REPLACE SELECT, deleting a duplicate key +select * from t1_op_log; +operation +Before INSERT, new=CREATE TABLE ... REPLACE SELECT, deleting a duplicate key +Before DELETE, old=CREATE TABLE ... SELECT, inserting a new key +After DELETE, old=CREATE TABLE ... SELECT, inserting a new key +After INSERT, new=CREATE TABLE ... REPLACE SELECT, deleting a duplicate key +truncate t1; +truncate t1_op_log; +insert into t1 (id, operation) +select NULL, "INSERT ... SELECT, inserting a new key"; +set @id=last_insert_id(); +select * from t1; +id operation +1 INSERT ... SELECT, inserting a new key +select * from t1_op_log; +operation +Before INSERT, new=INSERT ... SELECT, inserting a new key +After INSERT, new=INSERT ... SELECT, inserting a new key +truncate t1_op_log; +insert into t1 (id, operation) +select @id, +"INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate" +on duplicate key update id=NULL, +operation="INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate"; +select * from t1; +id operation +0 INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate +select * from t1_op_log; +operation +Before INSERT, new=INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate +Before UPDATE, new=INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate, old=INSERT ... SELECT, inserting a new key +After UPDATE, new=INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate, old=INSERT ... SELECT, inserting a new key +truncate t1; +truncate t1_op_log; +replace into t1 (id, operation) +select NULL, "REPLACE ... SELECT, inserting a new key"; +set @id=last_insert_id(); +select * from t1; +id operation +1 REPLACE ... SELECT, inserting a new key +select * from t1_op_log; +operation +Before INSERT, new=REPLACE ... SELECT, inserting a new key +After INSERT, new=REPLACE ... SELECT, inserting a new key +truncate t1_op_log; +replace into t1 (id, operation) +select @id, "REPLACE ... SELECT, deleting a duplicate"; +select * from t1; +id operation +1 REPLACE ... SELECT, deleting a duplicate +select * from t1_op_log; +operation +Before INSERT, new=REPLACE ... SELECT, deleting a duplicate +Before DELETE, old=REPLACE ... SELECT, inserting a new key +After DELETE, old=REPLACE ... SELECT, inserting a new key +After INSERT, new=REPLACE ... SELECT, deleting a duplicate +truncate t1; +truncate t1_op_log; +insert into t1 (id, operation) values (1, "INSERT for multi-DELETE"); +insert into t2 (id) values (1); +delete t1.*, t2.* from t1, t2 where t1.id=1; +select * from t1; +id operation +select * from t2; +id +select * from t1_op_log; +operation +Before INSERT, new=INSERT for multi-DELETE +After INSERT, new=INSERT for multi-DELETE +Before DELETE, old=INSERT for multi-DELETE +After DELETE, old=INSERT for multi-DELETE +truncate t1; +truncate t2; +truncate t1_op_log; +insert into t1 (id, operation) values (1, "INSERT for multi-UPDATE"); +insert into t2 (id) values (1); +update t1, t2 set t1.id=2, operation="multi-UPDATE" where t1.id=1; +update t1, t2 +set t2.id=3, operation="multi-UPDATE, SET for t2, but the trigger is fired" where t1.id=2; +select * from t1; +id operation +2 multi-UPDATE, SET for t2, but the trigger is fired +select * from t2; +id +3 +select * from t1_op_log; +operation +Before INSERT, new=INSERT for multi-UPDATE +After INSERT, new=INSERT for multi-UPDATE +Before UPDATE, new=multi-UPDATE, old=INSERT for multi-UPDATE +After UPDATE, new=multi-UPDATE, old=INSERT for multi-UPDATE +Before UPDATE, new=multi-UPDATE, SET for t2, but the trigger is fired, old=multi-UPDATE +After UPDATE, new=multi-UPDATE, SET for t2, but the trigger is fired, old=multi-UPDATE +truncate table t1; +truncate table t2; +truncate table t1_op_log; + +Now do the same but use a view instead of the base table. + +insert into v1 (operation) values ("INSERT"); +set @id=last_insert_id(); +select * from t1; +id operation +1 INSERT +select * from t1_op_log; +operation +Before INSERT, new=INSERT +After INSERT, new=INSERT +truncate t1_op_log; +update v1 set operation="UPDATE" where id=@id; +select * from t1; +id operation +1 UPDATE +select * from t1_op_log; +operation +Before UPDATE, new=UPDATE, old=INSERT +After UPDATE, new=UPDATE, old=INSERT +truncate t1_op_log; +delete from v1 where id=@id; +select * from t1; +id operation +select * from t1_op_log; +operation +Before DELETE, old=UPDATE +After DELETE, old=UPDATE +truncate t1; +truncate t1_op_log; +insert into v1 (id, operation) values +(NULL, "INSERT ON DUPLICATE KEY UPDATE, inserting a new key") +on duplicate key update id=NULL, operation="Should never happen"; +set @id=last_insert_id(); +select * from t1; +id operation +1 INSERT ON DUPLICATE KEY UPDATE, inserting a new key +select * from t1_op_log; +operation +Before INSERT, new=INSERT ON DUPLICATE KEY UPDATE, inserting a new key +After INSERT, new=INSERT ON DUPLICATE KEY UPDATE, inserting a new key +truncate t1_op_log; +insert into v1 (id, operation) values +(@id, "INSERT ON DUPLICATE KEY UPDATE, the key value is the same") +on duplicate key update id=NULL, +operation="INSERT ON DUPLICATE KEY UPDATE, updating the duplicate"; +select * from t1; +id operation +0 INSERT ON DUPLICATE KEY UPDATE, updating the duplicate +select * from t1_op_log; +operation +Before INSERT, new=INSERT ON DUPLICATE KEY UPDATE, the key value is the same +Before UPDATE, new=INSERT ON DUPLICATE KEY UPDATE, updating the duplicate, old=INSERT ON DUPLICATE KEY UPDATE, inserting a new key +After UPDATE, new=INSERT ON DUPLICATE KEY UPDATE, updating the duplicate, old=INSERT ON DUPLICATE KEY UPDATE, inserting a new key +truncate t1; +truncate t1_op_log; +replace into v1 values (NULL, "REPLACE, inserting a new key"); +set @id=last_insert_id(); +select * from t1; +id operation +1 REPLACE, inserting a new key +select * from t1_op_log; +operation +Before INSERT, new=REPLACE, inserting a new key +After INSERT, new=REPLACE, inserting a new key +truncate t1_op_log; +replace into v1 values (@id, "REPLACE, deleting the duplicate"); +select * from t1; +id operation +1 REPLACE, deleting the duplicate +select * from t1_op_log; +operation +Before INSERT, new=REPLACE, deleting the duplicate +Before DELETE, old=REPLACE, inserting a new key +After DELETE, old=REPLACE, inserting a new key +After INSERT, new=REPLACE, deleting the duplicate +truncate t1; +truncate t1_op_log; +create table if not exists v1 +select NULL, "CREATE TABLE ... SELECT, inserting a new key"; +Warnings: +Note 1050 Table 'v1' already exists +set @id=last_insert_id(); +select * from t1; +id operation +1 CREATE TABLE ... SELECT, inserting a new key +select * from t1_op_log; +operation +Before INSERT, new=CREATE TABLE ... SELECT, inserting a new key +After INSERT, new=CREATE TABLE ... SELECT, inserting a new key +truncate t1_op_log; +create table if not exists v1 replace +select @id, "CREATE TABLE ... REPLACE SELECT, deleting a duplicate key"; +Warnings: +Note 1050 Table 'v1' already exists +select * from t1; +id operation +1 CREATE TABLE ... REPLACE SELECT, deleting a duplicate key +select * from t1_op_log; +operation +Before INSERT, new=CREATE TABLE ... REPLACE SELECT, deleting a duplicate key +Before DELETE, old=CREATE TABLE ... SELECT, inserting a new key +After DELETE, old=CREATE TABLE ... SELECT, inserting a new key +After INSERT, new=CREATE TABLE ... REPLACE SELECT, deleting a duplicate key +truncate t1; +truncate t1_op_log; +insert into v1 (id, operation) +select NULL, "INSERT ... SELECT, inserting a new key"; +set @id=last_insert_id(); +select * from t1; +id operation +1 INSERT ... SELECT, inserting a new key +select * from t1_op_log; +operation +Before INSERT, new=INSERT ... SELECT, inserting a new key +After INSERT, new=INSERT ... SELECT, inserting a new key +truncate t1_op_log; +insert into v1 (id, operation) +select @id, +"INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate" +on duplicate key update id=NULL, +operation="INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate"; +select * from t1; +id operation +0 INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate +select * from t1_op_log; +operation +Before INSERT, new=INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate +Before UPDATE, new=INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate, old=INSERT ... SELECT, inserting a new key +After UPDATE, new=INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate, old=INSERT ... SELECT, inserting a new key +truncate t1; +truncate t1_op_log; +replace into v1 (id, operation) +select NULL, "REPLACE ... SELECT, inserting a new key"; +set @id=last_insert_id(); +select * from t1; +id operation +1 REPLACE ... SELECT, inserting a new key +select * from t1_op_log; +operation +Before INSERT, new=REPLACE ... SELECT, inserting a new key +After INSERT, new=REPLACE ... SELECT, inserting a new key +truncate t1_op_log; +replace into v1 (id, operation) +select @id, "REPLACE ... SELECT, deleting a duplicate"; +select * from t1; +id operation +1 REPLACE ... SELECT, deleting a duplicate +select * from t1_op_log; +operation +Before INSERT, new=REPLACE ... SELECT, deleting a duplicate +Before DELETE, old=REPLACE ... SELECT, inserting a new key +After DELETE, old=REPLACE ... SELECT, inserting a new key +After INSERT, new=REPLACE ... SELECT, deleting a duplicate +truncate t1; +truncate t1_op_log; +insert into v1 (id, operation) values (1, "INSERT for multi-DELETE"); +insert into t2 (id) values (1); +delete v1.*, t2.* from v1, t2 where v1.id=1; +select * from t1; +id operation +select * from t2; +id +select * from t1_op_log; +operation +Before INSERT, new=INSERT for multi-DELETE +After INSERT, new=INSERT for multi-DELETE +Before DELETE, old=INSERT for multi-DELETE +After DELETE, old=INSERT for multi-DELETE +truncate t1; +truncate t2; +truncate t1_op_log; +insert into v1 (id, operation) values (1, "INSERT for multi-UPDATE"); +insert into t2 (id) values (1); +update v1, t2 set v1.id=2, operation="multi-UPDATE" where v1.id=1; +update v1, t2 +set t2.id=3, operation="multi-UPDATE, SET for t2, but the trigger is fired" where v1.id=2; +select * from t1; +id operation +2 multi-UPDATE, SET for t2, but the trigger is fired +select * from t2; +id +3 +select * from t1_op_log; +operation +Before INSERT, new=INSERT for multi-UPDATE +After INSERT, new=INSERT for multi-UPDATE +Before UPDATE, new=multi-UPDATE, old=INSERT for multi-UPDATE +After UPDATE, new=multi-UPDATE, old=INSERT for multi-UPDATE +Before UPDATE, new=multi-UPDATE, SET for t2, but the trigger is fired, old=multi-UPDATE +After UPDATE, new=multi-UPDATE, SET for t2, but the trigger is fired, old=multi-UPDATE +drop view v1; +drop table t1, t2, t1_op_log; End of 5.0 tests diff --git a/mysql-test/t/trigger-trans.test b/mysql-test/t/trigger-trans.test index 5c135d98878..8103a1ba0b1 100644 --- a/mysql-test/t/trigger-trans.test +++ b/mysql-test/t/trigger-trans.test @@ -49,4 +49,84 @@ insert into t1 values ('The Pie', 50, 1, 1); select * from t1; drop table t1; -# End of 5.0 tests +--echo +--echo Bug#26141 mixing table types in trigger causes full +--echo table lock on innodb table +--echo +--echo Ensure we do not open and lock tables for the triggers we do not +--echo fire. +--echo +--disable_warnings +drop table if exists t1, t2, t3; +drop trigger if exists trg_bug26141_au; +drop trigger if exists trg_bug26141_ai; +--enable_warnings +# Note, for InnoDB to allow concurrent UPDATE and INSERT the +# table must have a unique key. +create table t1 (c int primary key) engine=innodb; +create table t2 (c int) engine=myisam; +create table t3 (c int) engine=myisam; +insert into t1 (c) values (1); +delimiter |; + +create trigger trg_bug26141_ai after insert on t1 +for each row +begin + insert into t2 (c) values (1); +# We need the 'sync' lock to synchronously wait in connection 2 till +# the moment when the trigger acquired all the locks. + select release_lock("lock_bug26141_sync") into @a; +# 1000 is time in seconds of lock wait timeout -- this is a way +# to cause a manageable sleep up to 1000 seconds + select get_lock("lock_bug26141_wait", 1000) into @a; +end| + +create trigger trg_bug26141_au after update on t1 +for each row +begin + insert into t3 (c) values (1); +end| +delimiter ;| + +# Establish an alternative connection. +--connect (connection_aux,localhost,root,,test,,) +--connect (connection_update,localhost,root,,test,,) + +connection connection_aux; +# Lock the wait lock, it must not be locked, so specify zero timeout. +select get_lock("lock_bug26141_wait", 0); + +# +connection default; +# +# Run the trigger synchronously +# +select get_lock("lock_bug26141_sync", /* must not be priorly locked */ 0); +# Will acquire the table level locks, perform the insert into t2, +# release the sync lock and block on the wait lock. +send insert into t1 (c) values (2); + +connection connection_update; +# Wait for the trigger to acquire its locks and unlock the sync lock. +select get_lock("lock_bug26141_sync", 1000); +# +# This must continue: after the fix for the bug, we do not +# open tables for t2, and with c=4 innobase allows the update +# to run concurrently with insert. +update t1 set c=3 where c=1; +select release_lock("lock_bug26141_sync"); +connection connection_aux; +select release_lock("lock_bug26141_wait"); +connection default; +reap; +select * from t1; +select * from t2; +select * from t3; + +# Drops the trigger as well. +drop table t1, t2, t3; +disconnect connection_update; +disconnect connection_aux; + + +--echo End of 5.0 tests diff --git a/mysql-test/t/trigger.test b/mysql-test/t/trigger.test index 0fa92f33de2..a6390036322 100644 --- a/mysql-test/t/trigger.test +++ b/mysql-test/t/trigger.test @@ -1828,5 +1828,370 @@ DROP TRIGGER t1_test; DROP TABLE t1,t2; SET SESSION LOW_PRIORITY_UPDATES=DEFAULT; SET GLOBAL LOW_PRIORITY_UPDATES=DEFAULT; +--echo +--echo Bug#28502 Triggers that update another innodb table will block +--echo on X lock unnecessarily +--echo +--echo Ensure we do not open and lock tables for triggers we do not fire. +--echo +--disable_warnings +drop table if exists t1, t2; +drop trigger if exists trg_bug28502_au; +--enable_warnings +create table t1 (id int, count int); +create table t2 (id int); +delimiter |; + +create trigger trg_bug28502_au before update on t2 +for each row +begin + if (new.id is not null) then + update t1 set count= count + 1 where id = old.id; + end if; +end| + +delimiter ;| +insert into t1 (id, count) values (1, 0); + +lock table t1 write; + +--connect (connection_insert, localhost, root, , test, , ) +connection connection_insert; +# Is expected to pass. +insert into t2 set id=1; +connection default; +unlock tables; +update t2 set id=1 where id=1; +select * from t1; +select * from t2; +# Will drop the trigger +drop table t1, t2; +disconnect connection_insert; +--echo +--echo Additionally, provide test coverage for triggers and +--echo all MySQL data changing commands. +--echo +--disable_warnings +drop table if exists t1, t2, t1_op_log; +drop view if exists v1; +drop trigger if exists trg_bug28502_bi; +drop trigger if exists trg_bug28502_ai; +drop trigger if exists trg_bug28502_bu; +drop trigger if exists trg_bug28502_au; +drop trigger if exists trg_bug28502_bd; +drop trigger if exists trg_bug28502_ad; +--enable_warnings +create table t1 (id int primary key auto_increment, operation varchar(255)); +create table t2 (id int primary key); +create table t1_op_log(operation varchar(255)); +create view v1 as select * from t1; +create trigger trg_bug28502_bi before insert on t1 +for each row + insert into t1_op_log (operation) + values (concat("Before INSERT, new=", new.operation)); + +create trigger trg_bug28502_ai after insert on t1 +for each row + insert into t1_op_log (operation) + values (concat("After INSERT, new=", new.operation)); + +create trigger trg_bug28502_bu before update on t1 +for each row + insert into t1_op_log (operation) + values (concat("Before UPDATE, new=", new.operation, + ", old=", old.operation)); + +create trigger trg_bug28502_au after update on t1 +for each row + insert into t1_op_log (operation) + values (concat("After UPDATE, new=", new.operation, + ", old=", old.operation)); + +create trigger trg_bug28502_bd before delete on t1 +for each row + insert into t1_op_log (operation) + values (concat("Before DELETE, old=", old.operation)); + +create trigger trg_bug28502_ad after delete on t1 +for each row + insert into t1_op_log (operation) + values (concat("After DELETE, old=", old.operation)); + +insert into t1 (operation) values ("INSERT"); + +set @id=last_insert_id(); + +select * from t1; +select * from t1_op_log; +truncate t1_op_log; + +update t1 set operation="UPDATE" where id=@id; + +select * from t1; +select * from t1_op_log; +truncate t1_op_log; + +delete from t1 where id=@id; + +select * from t1; +select * from t1_op_log; +truncate t1; +truncate t1_op_log; + +insert into t1 (id, operation) values +(NULL, "INSERT ON DUPLICATE KEY UPDATE, inserting a new key") +on duplicate key update id=NULL, operation="Should never happen"; + +set @id=last_insert_id(); + +select * from t1; +select * from t1_op_log; +truncate t1_op_log; + +insert into t1 (id, operation) values +(@id, "INSERT ON DUPLICATE KEY UPDATE, the key value is the same") +on duplicate key update id=NULL, +operation="INSERT ON DUPLICATE KEY UPDATE, updating the duplicate"; + +select * from t1; +select * from t1_op_log; +truncate t1; +truncate t1_op_log; + +replace into t1 values (NULL, "REPLACE, inserting a new key"); + +set @id=last_insert_id(); + +select * from t1; +select * from t1_op_log; +truncate t1_op_log; + +replace into t1 values (@id, "REPLACE, deleting the duplicate"); + +select * from t1; +select * from t1_op_log; +truncate t1; +truncate t1_op_log; + +create table if not exists t1 +select NULL, "CREATE TABLE ... SELECT, inserting a new key"; + +set @id=last_insert_id(); + +select * from t1; +select * from t1_op_log; +truncate t1_op_log; + +create table if not exists t1 replace +select @id, "CREATE TABLE ... REPLACE SELECT, deleting a duplicate key"; + +select * from t1; +select * from t1_op_log; +truncate t1; +truncate t1_op_log; + +insert into t1 (id, operation) +select NULL, "INSERT ... SELECT, inserting a new key"; + +set @id=last_insert_id(); + +select * from t1; +select * from t1_op_log; +truncate t1_op_log; + +insert into t1 (id, operation) +select @id, +"INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate" +on duplicate key update id=NULL, +operation="INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate"; + +select * from t1; +select * from t1_op_log; +truncate t1; +truncate t1_op_log; + +replace into t1 (id, operation) +select NULL, "REPLACE ... SELECT, inserting a new key"; + +set @id=last_insert_id(); + +select * from t1; +select * from t1_op_log; +truncate t1_op_log; + +replace into t1 (id, operation) +select @id, "REPLACE ... SELECT, deleting a duplicate"; + +select * from t1; +select * from t1_op_log; +truncate t1; +truncate t1_op_log; + +insert into t1 (id, operation) values (1, "INSERT for multi-DELETE"); +insert into t2 (id) values (1); + +delete t1.*, t2.* from t1, t2 where t1.id=1; + +select * from t1; +select * from t2; +select * from t1_op_log; +truncate t1; +truncate t2; +truncate t1_op_log; + +insert into t1 (id, operation) values (1, "INSERT for multi-UPDATE"); +insert into t2 (id) values (1); +update t1, t2 set t1.id=2, operation="multi-UPDATE" where t1.id=1; +update t1, t2 +set t2.id=3, operation="multi-UPDATE, SET for t2, but the trigger is fired" where t1.id=2; + +select * from t1; +select * from t2; +select * from t1_op_log; +truncate table t1; +truncate table t2; +truncate table t1_op_log; + +--echo +--echo Now do the same but use a view instead of the base table. +--echo + +insert into v1 (operation) values ("INSERT"); + +set @id=last_insert_id(); + +select * from t1; +select * from t1_op_log; +truncate t1_op_log; + +update v1 set operation="UPDATE" where id=@id; + +select * from t1; +select * from t1_op_log; +truncate t1_op_log; + +delete from v1 where id=@id; + +select * from t1; +select * from t1_op_log; +truncate t1; +truncate t1_op_log; + +insert into v1 (id, operation) values +(NULL, "INSERT ON DUPLICATE KEY UPDATE, inserting a new key") +on duplicate key update id=NULL, operation="Should never happen"; + +set @id=last_insert_id(); + +select * from t1; +select * from t1_op_log; +truncate t1_op_log; + +insert into v1 (id, operation) values +(@id, "INSERT ON DUPLICATE KEY UPDATE, the key value is the same") +on duplicate key update id=NULL, +operation="INSERT ON DUPLICATE KEY UPDATE, updating the duplicate"; + +select * from t1; +select * from t1_op_log; +truncate t1; +truncate t1_op_log; + +replace into v1 values (NULL, "REPLACE, inserting a new key"); + +set @id=last_insert_id(); + +select * from t1; +select * from t1_op_log; +truncate t1_op_log; + +replace into v1 values (@id, "REPLACE, deleting the duplicate"); + +select * from t1; +select * from t1_op_log; +truncate t1; +truncate t1_op_log; + +create table if not exists v1 +select NULL, "CREATE TABLE ... SELECT, inserting a new key"; + +set @id=last_insert_id(); + +select * from t1; +select * from t1_op_log; +truncate t1_op_log; + +create table if not exists v1 replace +select @id, "CREATE TABLE ... REPLACE SELECT, deleting a duplicate key"; + +select * from t1; +select * from t1_op_log; +truncate t1; +truncate t1_op_log; + +insert into v1 (id, operation) +select NULL, "INSERT ... SELECT, inserting a new key"; + +set @id=last_insert_id(); + +select * from t1; +select * from t1_op_log; +truncate t1_op_log; + +insert into v1 (id, operation) +select @id, +"INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate" +on duplicate key update id=NULL, +operation="INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate"; + +select * from t1; +select * from t1_op_log; +truncate t1; +truncate t1_op_log; + +replace into v1 (id, operation) +select NULL, "REPLACE ... SELECT, inserting a new key"; + +set @id=last_insert_id(); + +select * from t1; +select * from t1_op_log; +truncate t1_op_log; + +replace into v1 (id, operation) +select @id, "REPLACE ... SELECT, deleting a duplicate"; + +select * from t1; +select * from t1_op_log; +truncate t1; +truncate t1_op_log; + +insert into v1 (id, operation) values (1, "INSERT for multi-DELETE"); +insert into t2 (id) values (1); + +delete v1.*, t2.* from v1, t2 where v1.id=1; + +select * from t1; +select * from t2; +select * from t1_op_log; +truncate t1; +truncate t2; +truncate t1_op_log; + +insert into v1 (id, operation) values (1, "INSERT for multi-UPDATE"); +insert into t2 (id) values (1); +update v1, t2 set v1.id=2, operation="multi-UPDATE" where v1.id=1; +update v1, t2 +set t2.id=3, operation="multi-UPDATE, SET for t2, but the trigger is fired" where v1.id=2; + +select * from t1; +select * from t2; +select * from t1_op_log; + +drop view v1; +drop table t1, t2, t1_op_log; + +# +# TODO: test LOAD DATA INFILE --echo End of 5.0 tests diff --git a/sql/item.h b/sql/item.h index 58e3ec439b4..07bf7fec0ea 100644 --- a/sql/item.h +++ b/sql/item.h @@ -2309,14 +2309,6 @@ enum trg_action_time_type TRG_ACTION_BEFORE= 0, TRG_ACTION_AFTER= 1, TRG_ACTION_MAX }; -/* - Event on which trigger is invoked. -*/ -enum trg_event_type -{ - TRG_EVENT_INSERT= 0 , TRG_EVENT_UPDATE= 1, TRG_EVENT_DELETE= 2, TRG_EVENT_MAX -}; - class Table_triggers_list; /* diff --git a/sql/sp.cc b/sql/sp.cc index 3c8ebed4ae6..c0e7d5e2271 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -440,6 +440,19 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp, lex_start(thd); thd->spcont= NULL; ret= MYSQLparse(thd); + + if (ret == 0) + { + /* + Not strictly necessary to invoke this method here, since we know + that we've parsed CREATE PROCEDURE/FUNCTION and not an + UPDATE/DELETE/INSERT/REPLACE/LOAD/CREATE TABLE, but we try to + maintain the invariant that this method is called for each + distinct statement, in case its logic is extended with other + types of analyses in future. + */ + newlex.set_trg_event_type_for_tables(); + } } if (ret || thd->is_fatal_error || newlex.sphead == NULL) @@ -1742,31 +1755,39 @@ sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex, TABLE_LIST *table) { int ret= 0; - Table_triggers_list *triggers= table->table->triggers; - if (add_used_routine(lex, thd->stmt_arena, &triggers->sroutines_key, - table->belong_to_view)) + + Sroutine_hash_entry **last_cached_routine_ptr= + (Sroutine_hash_entry **)lex->sroutines_list.next; + + if (static_cast(table->lock_type) >= + static_cast(TL_WRITE_ALLOW_WRITE)) { - Sroutine_hash_entry **last_cached_routine_ptr= - (Sroutine_hash_entry **)lex->sroutines_list.next; for (int i= 0; i < (int)TRG_EVENT_MAX; i++) { - for (int j= 0; j < (int)TRG_ACTION_MAX; j++) + if (table->trg_event_map & + static_cast(1 << static_cast(i))) { - if (triggers->bodies[i][j]) + for (int j= 0; j < (int)TRG_ACTION_MAX; j++) { - (void)triggers->bodies[i][j]-> - add_used_tables_to_table_list(thd, &lex->query_tables_last, - table->belong_to_view); - sp_update_stmt_used_routines(thd, lex, - &triggers->bodies[i][j]->m_sroutines, - table->belong_to_view); + /* We can have only one trigger per action type currently */ + sp_head *trigger= table->table->triggers->bodies[i][j]; + if (trigger && + add_used_routine(lex, thd->stmt_arena, &trigger->m_sroutines_key, + table->belong_to_view)) + { + trigger->add_used_tables_to_table_list(thd, &lex->query_tables_last, + table->belong_to_view); + sp_update_stmt_used_routines(thd, lex, + &trigger->m_sroutines, + table->belong_to_view); + } } } } - ret= sp_cache_routines_and_add_tables_aux(thd, lex, - *last_cached_routine_ptr, - FALSE, NULL); } + ret= sp_cache_routines_and_add_tables_aux(thd, lex, + *last_cached_routine_ptr, + FALSE, NULL); return ret; } diff --git a/sql/sp_head.cc b/sql/sp_head.cc index d939fd20b9b..0ac1db336d0 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -478,12 +478,35 @@ sp_head::init(LEX *lex) */ lex->trg_table_fields.empty(); my_init_dynamic_array(&m_instr, sizeof(sp_instr *), 16, 8); - m_param_begin= m_param_end= m_body_begin= 0; - m_qname.str= m_db.str= m_name.str= m_params.str= - m_body.str= m_defstr.str= 0; - m_qname.length= m_db.length= m_name.length= m_params.length= - m_body.length= m_defstr.length= 0; + + m_param_begin= NULL; + m_param_end= NULL; + + m_body_begin= NULL ; + + m_qname.str= NULL; + m_qname.length= 0; + + m_db.str= NULL; + m_db.length= 0; + + m_name.str= NULL; + m_name.length= 0; + + m_params.str= NULL; + m_params.length= 0; + + m_body.str= NULL; + m_body.length= 0; + + m_defstr.str= NULL; + m_defstr.length= 0; + + m_sroutines_key.str= NULL; + m_sroutines_key.length= 0; + m_return_field_def.charset= NULL; + DBUG_VOID_RETURN; } @@ -509,9 +532,14 @@ sp_head::init_sp_name(THD *thd, sp_name *spname) if (spname->m_qname.length == 0) spname->init_qname(thd); - m_qname.length= spname->m_qname.length; - m_qname.str= strmake_root(thd->mem_root, spname->m_qname.str, - m_qname.length); + m_sroutines_key.length= spname->m_sroutines_key.length; + m_sroutines_key.str= memdup_root(thd->mem_root, + spname->m_sroutines_key.str, + spname->m_sroutines_key.length + 1); + m_sroutines_key.str[0]= static_cast(m_type); + + m_qname.length= m_sroutines_key.length - 1; + m_qname.str= m_sroutines_key.str + 1; DBUG_VOID_RETURN; } @@ -1796,8 +1824,11 @@ sp_head::restore_lex(THD *thd) { DBUG_ENTER("sp_head::restore_lex"); LEX *sublex= thd->lex; - LEX *oldlex= (LEX *)m_lex.pop(); + LEX *oldlex; + sublex->set_trg_event_type_for_tables(); + + oldlex= (LEX *)m_lex.pop(); if (! oldlex) return; // Nothing to restore @@ -3429,6 +3460,7 @@ typedef struct st_sp_table thr_lock_type lock_type; /* lock type used for prelocking */ uint lock_count; uint query_lock_count; + uint8 trg_event_map; } SP_TABLE; byte * @@ -3515,6 +3547,7 @@ sp_head::merge_table_list(THD *thd, TABLE_LIST *table, LEX *lex_for_tmp_check) tab->query_lock_count++; if (tab->query_lock_count > tab->lock_count) tab->lock_count++; + tab->trg_event_map|= table->trg_event_map; } else { @@ -3536,6 +3569,7 @@ sp_head::merge_table_list(THD *thd, TABLE_LIST *table, LEX *lex_for_tmp_check) tab->db_length= table->db_length; tab->lock_type= table->lock_type; tab->lock_count= tab->query_lock_count= 1; + tab->trg_event_map= table->trg_event_map; my_hash_insert(&m_sptabs, (byte *)tab); } } @@ -3613,6 +3647,7 @@ sp_head::add_used_tables_to_table_list(THD *thd, table->cacheable_table= 1; table->prelocking_placeholder= 1; table->belong_to_view= belong_to_view; + table->trg_event_map= stab->trg_event_map; /* Everyting else should be zeroed */ diff --git a/sql/sp_head.h b/sql/sp_head.h index ed99885ae9a..ebe40ce9c87 100644 --- a/sql/sp_head.h +++ b/sql/sp_head.h @@ -130,6 +130,12 @@ public: st_sp_chistics *m_chistics; ulong m_sql_mode; // For SHOW CREATE and execution LEX_STRING m_qname; // db.name + /** + Key representing routine in the set of stored routines used by statement. + [routine_type]db.name\0 + @sa sp_name::m_sroutines_key + */ + LEX_STRING m_sroutines_key; LEX_STRING m_db; LEX_STRING m_name; LEX_STRING m_params; diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index dbce1e38139..c37d77345b6 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -2034,6 +2034,27 @@ void st_select_lex_unit::set_limit(SELECT_LEX *sl) } +/** + Update the parsed tree with information about triggers that + may be fired when executing this statement. +*/ + +void st_lex::set_trg_event_type_for_tables() +{ + /* + Do not iterate over sub-selects, only the tables in the outermost + SELECT_LEX can be modified, if any. + */ + TABLE_LIST *tables= select_lex.get_table_list(); + + while (tables) + { + tables->set_trg_event_type(this); + tables= tables->next_local; + } +} + + /* Unlink the first table from the global table list and the first table from outer select (lex->select_lex) local list diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 25a6c31e21c..bfa6c05974f 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -1188,6 +1188,8 @@ typedef struct st_lex : public Query_tables_list un->uncacheable|= cause; } } + void set_trg_event_type_for_tables(); + TABLE_LIST *unlink_first_table(bool *link_to_local); void link_first_table_back(TABLE_LIST *first, bool link_to_local); void first_lists_tables_same(); diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 124fcff9517..91c51641fc0 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -6080,8 +6080,9 @@ void mysql_parse(THD *thd, const char *inBuf, uint length, (thd->query_length= (ulong)(lip.found_semicolon - thd->query))) thd->query_length--; /* Actually execute the query */ - mysql_execute_command(thd); - query_cache_end_of_result(thd); + lex->set_trg_event_type_for_tables(); + mysql_execute_command(thd); + query_cache_end_of_result(thd); } } } diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 567f92b55ba..c993ce32e50 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -2826,6 +2826,7 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) lex_start(thd); lex->safe_to_cache_query= FALSE; int err= MYSQLparse((void *)thd); + lex->set_trg_event_type_for_tables(); error= err || thd->is_fatal_error || thd->net.report_error || init_param_array(this); diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index 5762614e47f..6e4b5defb97 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -942,17 +942,6 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, table->triggers= triggers; - /* - Construct key that will represent triggers for this table in the set - of routines used by statement. - */ - triggers->sroutines_key.length= 1+strlen(db)+1+strlen(table_name)+1; - if (!(triggers->sroutines_key.str= - alloc_root(&table->mem_root, triggers->sroutines_key.length))) - DBUG_RETURN(1); - triggers->sroutines_key.str[0]= TYPE_ENUM_TRIGGER; - strxmov(triggers->sroutines_key.str+1, db, ".", table_name, NullS); - /* TODO: This could be avoided if there is no triggers for UPDATE and DELETE. @@ -991,6 +980,15 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, DBUG_ASSERT(lex.sphead == 0); goto err_with_lex_cleanup; } + /* + Not strictly necessary to invoke this method here, since we know + that we've parsed CREATE TRIGGER and not an + UPDATE/DELETE/INSERT/REPLACE/LOAD/CREATE TABLE, but we try to + maintain the invariant that this method is called for each + distinct statement, in case its logic is extended with other + types of analyses in future. + */ + lex.set_trg_event_type_for_tables(); lex.sphead->set_info(0, 0, &lex.sp_chistics, (ulong) *trg_sql_mode); @@ -1550,6 +1548,12 @@ bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event, new_field= record1_field; old_field= trigger_table->field; } + /* + This trigger must have been processed by the pre-locking + algorithm. + */ + DBUG_ASSERT(trigger_table->pos_in_table_list->trg_event_map & + static_cast(1 << static_cast(event))); thd->reset_sub_statement_state(&statement_state, SUB_STMT_TRIGGER); err_status= sp_trigger->execute_trigger @@ -1568,7 +1572,7 @@ bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event, SYNOPSIS mark_fields_used() thd Current thread context - event Type of event triggers for which we are going to inspect + event Type of event triggers for which we are going to ins DESCRIPTION This method marks fields of subject table which are read/set in its diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h index b029a70ca20..1dc573995f1 100644 --- a/sql/sql_trigger.h +++ b/sql/sql_trigger.h @@ -56,14 +56,6 @@ class Table_triggers_list: public Sql_alloc updating trigger definitions during RENAME TABLE. */ List on_table_names_list; - /* - Key representing triggers for this table in set of all stored - routines used by statement. - TODO: We won't need this member once triggers namespace will be - database-wide instead of table-wide because then we will be able - to use key based on sp_name as for other stored routines. - */ - LEX_STRING sroutines_key; /* Grant information for each trigger (pair: subject table, trigger definer). diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 6c94d388d0e..7857ba267c5 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -1153,7 +1153,20 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table, */ for (tbl= view_main_select_tables; tbl; tbl= tbl->next_local) tbl->lock_type= table->lock_type; + /* + If the view is mergeable, we might want to + INSERT/UPDATE/DELETE into tables of this view. Preserve the + original sql command and 'duplicates' of the outer lex. + This is used later in set_trg_event_type_for_command. + */ + lex->sql_command= old_lex->sql_command; + lex->duplicates= old_lex->duplicates; } + /* + This method has a dependency on the proper lock type being set, + so in case of views should be called here. + */ + lex->set_trg_event_type_for_tables(); /* If we are opening this view as part of implicit LOCK TABLES, then diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 949f3ed4161..8cc6642ae7e 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -9644,13 +9644,13 @@ trigger_tail: MYSQL_YYABORT; sp->reset_thd_mem_root(thd); sp->init(lex); + sp->m_type= TYPE_ENUM_TRIGGER; sp->init_sp_name(thd, $3); lex->stmt_definition_begin= $2; lex->ident.str= $7; lex->ident.length= $10 - $7; - sp->m_type= TYPE_ENUM_TRIGGER; lex->sphead= sp; lex->spname= $3; /* @@ -9728,9 +9728,9 @@ sp_tail: sp= new sp_head(); sp->reset_thd_mem_root(YYTHD); sp->init(lex); + sp->m_type= TYPE_ENUM_PROCEDURE; sp->init_sp_name(YYTHD, $3); - sp->m_type= TYPE_ENUM_PROCEDURE; lex->sphead= sp; /* * We have to turn of CLIENT_MULTI_QUERIES while parsing a diff --git a/sql/table.cc b/sql/table.cc index 899d0ab2ed0..9c3e7618aa0 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1776,6 +1776,135 @@ void st_table::reset_item_list(List *item_list) const } } + +/** + Set the initial purpose of this TABLE_LIST object in the list of + used tables. We need to track this information on table-by- + table basis, since when this table becomes an element of the + pre-locked list, it's impossible to identify which SQL + sub-statement it has been originally used in. + + E.g.: + + User request: SELECT * FROM t1 WHERE f1(); + FUNCTION f1(): DELETE FROM t2; RETURN 1; + BEFORE DELETE trigger on t2: INSERT INTO t3 VALUES (old.a); + + For this user request, the pre-locked list will contain t1, t2, t3 + table elements, each needed for different DML. + + This method is called immediately after parsing for tables + of the table list of the top-level select lex. + + The trigger event map is updated to reflect INSERT, UPDATE, DELETE, + REPLACE, LOAD DATA, CREATE TABLE .. SELECT, CREATE TABLE .. + REPLACE SELECT statements, and additionally ON DUPLICATE KEY UPDATE + clause. +*/ + +void +TABLE_LIST::set_trg_event_type(const st_lex *lex) +{ + enum trg_event_type trg_event; + + /* + Some auxiliary operations + (e.g. GRANT processing) create TABLE_LIST instances outside + the parser. Additionally, some commands (e.g. OPTIMIZE) change + the lock type for a table only after parsing is done. Luckily, + these do not fire triggers and do not need to pre-load them. + For these TABLE_LISTs set_trg_event_type is never called, and + trg_event_map is always empty. That means that the pre-locking + algorithm will ignore triggers defined on these tables, if + any, and the execution will either fail with an assert in + sql_trigger.cc or with an error that a used table was not + pre-locked, in case of a production build. + + TODO: this usage pattern creates unnecessary module dependencies + and should be rewritten to go through the parser. + Table list instances created outside the parser in most cases + refer to mysql.* system tables. It is not allowed to have + a trigger on a system table, but keeping track of + initialization provides extra safety in case this limitation + is circumvented. + */ + + /* + This is a fast check to filter out statements that do + not change data, or tables on the right side, in case of + INSERT .. SELECT, CREATE TABLE .. SELECT and so on. + Here we also filter out OPTIMIZE statement and non-updateable + views, for which lock_type is TL_UNLOCK or TL_READ after + parsing. + */ + if (static_cast(lock_type) < static_cast(TL_WRITE_ALLOW_WRITE)) + return; + + switch (lex->sql_command) { + /* + Basic INSERT. If there is an additional ON DUPLIATE KEY UPDATE + clause, it will be handled later in this method. + */ + case SQLCOM_INSERT: /* fall through */ + case SQLCOM_INSERT_SELECT: + /* + LOAD DATA ... INFILE is expected to fire BEFORE/AFTER INSERT + triggers. + If the statement also has REPLACE clause, it will be + handled later in this method. + */ + case SQLCOM_LOAD: /* fall through */ + /* + REPLACE is semantically equivalent to INSERT. In case + of a primary or unique key conflict, it deletes the old + record and inserts a new one. So we also may need to + fire ON DELETE triggers. This functionality is handled + later in this method. + */ + case SQLCOM_REPLACE: /* fall through */ + case SQLCOM_REPLACE_SELECT: + /* + CREATE TABLE ... SELECT defaults to INSERT if the table or + view already exists. REPLACE option of CREATE TABLE ... + REPLACE SELECT is handled later in this method. + */ + case SQLCOM_CREATE_TABLE: + trg_event= TRG_EVENT_INSERT; + break; + /* Basic update and multi-update */ + case SQLCOM_UPDATE: /* fall through */ + case SQLCOM_UPDATE_MULTI: + trg_event= TRG_EVENT_UPDATE; + break; + /* Basic delete and multi-delete */ + case SQLCOM_DELETE: /* fall through */ + case SQLCOM_DELETE_MULTI: + trg_event= TRG_EVENT_DELETE; + break; + default: + /* + OK to return, since value of 'duplicates' is irrelevant + for non-updating commands. + */ + return; + } + trg_event_map|= static_cast(1 << static_cast(trg_event)); + + switch (lex->duplicates) { + case DUP_UPDATE: + trg_event= TRG_EVENT_UPDATE; + break; + case DUP_REPLACE: + trg_event= TRG_EVENT_DELETE; + break; + case DUP_ERROR: + default: + return; + } + trg_event_map|= static_cast(1 << static_cast(trg_event)); +} + + /* calculate md5 of query diff --git a/sql/table.h b/sql/table.h index b29ef8c6566..f8f7d7f06b7 100644 --- a/sql/table.h +++ b/sql/table.h @@ -59,6 +59,17 @@ enum tmp_table_type {NO_TMP_TABLE=0, NON_TRANSACTIONAL_TMP_TABLE=1, TRANSACTIONAL_TMP_TABLE=2, SYSTEM_TMP_TABLE=3}; + +/** Event on which trigger is invoked. */ +enum trg_event_type +{ + TRG_EVENT_INSERT= 0, + TRG_EVENT_UPDATE= 1, + TRG_EVENT_DELETE= 2, + TRG_EVENT_MAX +}; + + enum frm_type_enum { FRMTYPE_ERROR= 0, @@ -702,6 +713,13 @@ struct TABLE_LIST */ bool create; + /** + Indicates what triggers we need to pre-load for this TABLE_LIST + when opening an associated TABLE. This is filled after + the parsed tree is created. + */ + uint8 trg_event_map; + enum enum_schema_table_state schema_table_state; void calc_md5(char *buffer); void set_underlying_merge(); @@ -752,6 +770,7 @@ struct TABLE_LIST void reinit_before_use(THD *thd); Item_subselect *containing_subselect(); + void set_trg_event_type(const st_lex *lex); private: bool prep_check_option(THD *thd, uint8 check_opt_type); bool prep_where(THD *thd, Item **conds, bool no_where_clause); From 0e2bb8dd43303331404615acdbc5c7ec91906113 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 12 Jul 2007 23:14:00 +0400 Subject: [PATCH 09/19] Apply community contributed fix for Bug#13326 SQLPS statement logging is incomplete in 5.0 (and review fixes). When in 5.0.13 I introduced class Prepared_statement and methods ::prepare and ::execute, general logging was left out of this class. This was good for stored procedures, since in stored procedures we do not log sub-statements, but introduced a regression in case of SQL syntax for prepared statements, as previously we would log the actual statements to the log, and after the change we would log only COM_QUERY text. Restore the old behavior, but still suppress logging if inside a stored procedure. Based on a community contributed patch from Vladimir Shebordaev. No test case since we do not have a mechanism to test output of the general log. sql/sql_prepare.cc: Apply community contributed fix for Bug#13326 SQLPS statement logging is incomplete in 5.0 (and review fixes). --- sql/sql_prepare.cc | 59 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 45 insertions(+), 14 deletions(-) diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 567f92b55ba..90a4a05dadd 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -1900,13 +1900,6 @@ void mysql_stmt_prepare(THD *thd, const char *packet, uint packet_length) /* Statement map deletes statement on erase */ thd->stmt_map.erase(stmt); } - else - { - const char *format= "[%lu] %.*b"; - mysql_log.write(thd, COM_STMT_PREPARE, format, stmt->id, - stmt->query_length, stmt->query); - - } /* check_prepared_statemnt sends the metadata packet in case of success */ DBUG_VOID_RETURN; } @@ -2289,13 +2282,6 @@ void mysql_stmt_execute(THD *thd, char *packet_arg, uint packet_length) test(flags & (ulong) CURSOR_TYPE_READ_ONLY)); if (!(specialflag & SPECIAL_NO_PRIOR)) my_pthread_setprio(pthread_self(), WAIT_PRIOR); - if (error == 0) - { - const char *format= "[%lu] %.*b"; - mysql_log.write(thd, COM_STMT_EXECUTE, format, stmt->id, - thd->query_length, thd->query); - } - DBUG_VOID_RETURN; set_params_data_err: @@ -2878,6 +2864,29 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) init_stmt_after_parse(lex); state= Query_arena::PREPARED; flags&= ~ (uint) IS_IN_USE; + + /* + Log COM_EXECUTE to the general log. Note, that in case of SQL + prepared statements this causes two records to be output: + + Query PREPARE stmt from @user_variable + Prepare + + This is considered user-friendly, since in the + second log entry we output the actual statement text. + + Do not print anything if this is an SQL prepared statement and + we're inside a stored procedure (also called Dynamic SQL) -- + sub-statements inside stored procedures are not logged into + the general log. + */ + if (thd->spcont == NULL) + { + const char *format= "[%lu] %.*b"; + mysql_log.write(thd, COM_STMT_PREPARE, format, id, + query_length, query); + + } } DBUG_RETURN(error); } @@ -3015,6 +3024,28 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor) if (state == Query_arena::PREPARED) state= Query_arena::EXECUTED; + /* + Log COM_EXECUTE to the general log. Note, that in case of SQL + prepared statements this causes two records to be output: + + Query EXECUTE + Execute + + This is considered user-friendly, since in the + second log entry we output values of parameter markers. + + Do not print anything if this is an SQL prepared statement and + we're inside a stored procedure (also called Dynamic SQL) -- + sub-statements inside stored procedures are not logged into + the general log. + */ + if (error == 0 && thd->spcont == NULL) + { + const char *format= "[%lu] %.*b"; + mysql_log.write(thd, COM_STMT_EXECUTE, format, id, + thd->query_length, thd->query); + } + error: flags&= ~ (uint) IS_IN_USE; return error; From 31ea7d042ba48bd34a457bba138d2189f844731d Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 14 Jul 2007 02:04:48 +0400 Subject: [PATCH 10/19] A follow up after the patch for Bug#21074 - even though we now have exclusive name lock on the table name in mysql_rm_table_part2, we still should keep LOCK_open - some storage engines are not ready for locking scope change and assume that LOCK_open is kept. Still, the binary logging and query cache invalidation calls moved out of LOCK_open scope. Fixes some of the broken 5.1-runtime tests (tests break on asserts). sql/ha_ndbcluster.cc: Do not lock LOCK_open for mysql_rm_table_part2 - it does that for us now. sql/mysql_priv.h: Remove an unused flag. sql/sql_class.h: Fix an unrelated compiler warning. sql/sql_db.cc: Adjust to the changed signature. sql/sql_table.cc: mysql_rm_table_part2: we need to keep LOCK_open while calling storage engine functions, even though now we have an exclusive lock on the table name. Some of them assume that it's kept and attempt to unlock it. --- sql/ha_ndbcluster.cc | 6 ++---- sql/mysql_priv.h | 3 +-- sql/sql_class.h | 2 +- sql/sql_db.cc | 2 +- sql/sql_table.cc | 44 ++++++++++++++++++-------------------------- 5 files changed, 23 insertions(+), 34 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 393c2356404..24ce9946086 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -7019,8 +7019,6 @@ int ndbcluster_find_files(handlerton *hton, THD *thd, } } - // Lock mutex before deleting and creating frm files - pthread_mutex_lock(&LOCK_open); if (!global_read_lock) { // Delete old files @@ -7037,14 +7035,14 @@ int ndbcluster_find_files(handlerton *hton, THD *thd, FALSE, /* if_exists */ FALSE, /* drop_temporary */ FALSE, /* drop_view */ - TRUE, /* dont_log_query*/ - FALSE); /* need lock open */ + TRUE /* dont_log_query*/); /* Clear error message that is returned when table is deleted */ thd->clear_error(); } } + pthread_mutex_lock(&LOCK_open); // Create new files List_iterator_fast it2(create_list); while ((file_name=it2++)) diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index e0de89a5b74..8b00cfb5d24 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -901,8 +901,7 @@ void mysql_client_binlog_statement(THD *thd); bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, my_bool drop_temporary); int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, - bool drop_temporary, bool drop_view, bool log_query, - bool need_lock_open); + bool drop_temporary, bool drop_view, bool log_query); bool quick_rm_table(handlerton *base,const char *db, const char *table_name, uint flags); void close_cached_table(THD *thd, TABLE *table); diff --git a/sql/sql_class.h b/sql/sql_class.h index a8d62d93b21..0dad4966623 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1999,8 +1999,8 @@ class select_insert :public select_result_interceptor { class select_create: public select_insert { ORDER *group; TABLE_LIST *create_table; - TABLE_LIST *select_tables; HA_CREATE_INFO *create_info; + TABLE_LIST *select_tables; Alter_info *alter_info; Field **field; public: diff --git a/sql/sql_db.cc b/sql/sql_db.cc index a0c6f8a8e9d..8b0e371be43 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -1113,7 +1113,7 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, } } if (thd->killed || - (tot_list && mysql_rm_table_part2(thd, tot_list, 1, 0, 1, 1, 1))) + (tot_list && mysql_rm_table_part2(thd, tot_list, 1, 0, 1, 1))) goto err; /* Remove RAID directories */ diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 31120f8020e..c603f1ad77f 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1430,11 +1430,7 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, LOCK_open during wait_if_global_read_lock(), other threads could not close their tables. This would make a pretty deadlock. */ - error= mysql_rm_table_part2(thd, tables, if_exists, drop_temporary, 0, 0, 1); - pthread_mutex_lock(&thd->mysys_var->mutex); - thd->mysys_var->current_mutex= 0; - thd->mysys_var->current_cond= 0; - pthread_mutex_unlock(&thd->mysys_var->mutex); + error= mysql_rm_table_part2(thd, tables, if_exists, drop_temporary, 0, 0); if (need_start_waiters) start_waiting_global_read_lock(thd); @@ -1477,7 +1473,7 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, bool drop_temporary, bool drop_view, - bool dont_log_query, bool need_lock_open) + bool dont_log_query) { TABLE_LIST *table; char path[FN_REFLEN], *alias; @@ -1489,9 +1485,6 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, String built_query; DBUG_ENTER("mysql_rm_table_part2"); - if (need_lock_open) - pthread_mutex_lock(&LOCK_open); - LINT_INIT(alias); LINT_INIT(path_length); @@ -1503,6 +1496,9 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, else built_query.append("DROP TABLE "); } + + pthread_mutex_lock(&LOCK_open); + /* If we have the table in the definition cache, we don't have to check the .frm file to find if the table is a normal table (not view) and what @@ -1522,20 +1518,17 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, table->table_name_length, table->table_name, 1)) { my_error(ER_BAD_LOG_STATEMENT, MYF(0), "DROP"); + pthread_mutex_unlock(&LOCK_open); DBUG_RETURN(1); } } if (!drop_temporary && lock_table_names_exclusively(thd, tables)) { - if (need_lock_open) - pthread_mutex_unlock(&LOCK_open); + pthread_mutex_unlock(&LOCK_open); DBUG_RETURN(1); } - if (need_lock_open) - pthread_mutex_unlock(&LOCK_open); - /* Don't give warnings for not found errors, as we already generate notes */ thd->no_warnings_for_error= 1; @@ -1545,7 +1538,7 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, handlerton *table_type; enum legacy_db_type frm_db_type; - mysql_ha_flush(thd, table, MYSQL_HA_CLOSE_FINAL, !need_lock_open); + mysql_ha_flush(thd, table, MYSQL_HA_CLOSE_FINAL, 1); if (!close_temporary_table(thd, table)) { tmp_table_deleted=1; @@ -1582,8 +1575,6 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, { TABLE *locked_table; abort_locked_tables(thd, db, table->table_name); - if (need_lock_open) - pthread_mutex_lock(&LOCK_open); remove_table_from_cache(thd, db, table->table_name, RTFC_WAIT_OTHER_THREAD_FLAG | RTFC_CHECK_KILLED_FLAG); @@ -1594,13 +1585,10 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, if ((locked_table= drop_locked_tables(thd, db, table->table_name))) table->table= locked_table; - if (need_lock_open) - pthread_mutex_unlock(&LOCK_open); - if (thd->killed) { - thd->no_warnings_for_error= 0; - DBUG_RETURN(-1); + error= -1; + goto err_with_placeholders; } alias= (lower_case_table_names == 2) ? table->alias : table->table_name; /* remove .frm file and engine files */ @@ -1663,6 +1651,11 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, wrong_tables.append(String(table->table_name,system_charset_info)); } } + /* + It's safe to unlock LOCK_open: we have an exclusive lock + on the table name. + */ + pthread_mutex_unlock(&LOCK_open); thd->tmp_table_used= tmp_table_deleted; error= 0; if (wrong_tables.length()) @@ -1722,11 +1715,10 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, */ } } - if (need_lock_open) - pthread_mutex_lock(&LOCK_open); + pthread_mutex_lock(&LOCK_open); +err_with_placeholders: unlock_table_names(thd, tables, (TABLE_LIST*) 0); - if (need_lock_open) - pthread_mutex_unlock(&LOCK_open); + pthread_mutex_unlock(&LOCK_open); thd->no_warnings_for_error= 0; DBUG_RETURN(error); } From 9a52e13ccb92d6f30bbd4e92e73771b2a2662021 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 14 Jul 2007 04:38:21 +0400 Subject: [PATCH 11/19] Fix a warning in a non-debug build. --- sql/sql_cache.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 69efbdf006a..0e4404b87fc 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -793,7 +793,8 @@ void query_cache_end_of_result(THD *thd) query_cache.wreck() switched query cache off but left content untouched for investigation (it is debugging method). */ - goto end; + STRUCT_UNLOCK(&query_cache.structure_guard_mutex); + DBUG_VOID_RETURN; } #endif header->found_rows(current_thd->limit_found_rows); @@ -808,7 +809,6 @@ void query_cache_end_of_result(THD *thd) } -end: STRUCT_UNLOCK(&query_cache.structure_guard_mutex); DBUG_VOID_RETURN; } From 393e0f654172bd90c2b079d79bc7ae381df7540a Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 14 Jul 2007 05:22:24 +0400 Subject: [PATCH 12/19] Fix a compiler warning. --- sql/sql_trigger.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index b5165a29ab4..55eae2f5ea5 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -1805,16 +1805,16 @@ bool Table_triggers_list::change_table_name(THD *thd, const char *db, bzero(&table, sizeof(table)); init_alloc_root(&table.mem_root, 8192, 0); - uchar key[MAX_DBKEY_LENGTH]; - uint key_length= (uint) (strmov(strmov((char*)&key[0], db)+1, - old_table)-(char*)&key[0])+1; - /* This method interfaces the mysql server code protected by either LOCK_open mutex or with an exclusive table name lock. In the future, only an exclusive table name lock will be enough. */ #ifndef DBUG_OFF + uchar key[MAX_DBKEY_LENGTH]; + uint key_length= (uint) (strmov(strmov((char*)&key[0], db)+1, + old_table)-(char*)&key[0])+1; + if (!is_table_name_exclusively_locked_by_this_thread(thd, key, key_length)) safe_mutex_assert_owner(&LOCK_open); #endif From 7416224c60a876c5cde518e41469911d1b00b3ce Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 15 Jul 2007 13:25:38 +0400 Subject: [PATCH 13/19] A fix for Bug#27406 Events: failure only causes a warning. Update test results. When executing a CREATE EVENT statement with ON COMPLETION NOT PRESERVE clause (explicit or implicit) and completion date in the past, we do not create the event. Or, put it differently, we create it and then drop immediately. A warning is issued in this case, not an error -- we want to load successfully old database dumps, and such dumps may contain events that are no longer valid. Update the warning text to not imply an erroneous condition. mysql-test/r/events_bugs.result: Update the test results (Bug#27406) sql/share/errmsg.txt: Fix Bug#27406 "Events: failure only causes a warning" -- update the error message to not imply that there was a failure. --- mysql-test/r/events_bugs.result | 14 +++++++------- sql/share/errmsg.txt | 4 ++-- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/mysql-test/r/events_bugs.result b/mysql-test/r/events_bugs.result index a01f098affb..14728bdcc2e 100644 --- a/mysql-test/r/events_bugs.result +++ b/mysql-test/r/events_bugs.result @@ -31,7 +31,7 @@ create event e_55 on schedule at 10000101000000 do drop table t; ERROR HY000: Incorrect AT value: '10000101000000' create event e_55 on schedule at 20000101000000 do drop table t; Warnings: -Note 1584 Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been created +Note 1584 Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation. show events; Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation create event e_55 on schedule at 20200101000000 starts 10000101000000 do drop table t; @@ -447,32 +447,32 @@ e3 +00:00 CREATE EVENT `e3` ON SCHEDULE EVERY 1 DAY STARTS '2006-01-01 00:00:00 The following should fail, and nothing should be altered. ALTER EVENT e1 ON SCHEDULE EVERY 1 HOUR STARTS '1999-01-01 00:00:00' ENDS '1999-01-02 00:00:00'; -ERROR HY000: Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been altered +ERROR HY000: Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation. ALTER EVENT e1 ON SCHEDULE EVERY 1 HOUR STARTS '1999-01-01 00:00:00' ENDS '1999-01-02 00:00:00' DISABLE; -ERROR HY000: Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been altered +ERROR HY000: Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation. The following should give warnings, and nothing should be created. CREATE EVENT e4 ON SCHEDULE EVERY 1 HOUR STARTS '1999-01-01 00:00:00' ENDS '1999-01-02 00:00:00' DO SELECT 1; Warnings: -Note 1584 Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been created +Note 1584 Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation. CREATE EVENT e4 ON SCHEDULE EVERY 1 HOUR STARTS '1999-01-01 00:00:00' ENDS '1999-01-02 00:00:00' DISABLE DO SELECT 1; Warnings: -Note 1584 Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been created +Note 1584 Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation. CREATE EVENT e4 ON SCHEDULE AT '1999-01-01 00:00:00' DO SELECT 1; Warnings: -Note 1584 Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been created +Note 1584 Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation. CREATE EVENT e4 ON SCHEDULE AT '1999-01-01 00:00:00' DISABLE DO SELECT 1; Warnings: -Note 1584 Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been created +Note 1584 Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation. SHOW EVENTS; Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation events_test e1 root@localhost +05:00 RECURRING NULL 1 DAY 2006-01-01 00:00:00 NULL ENABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt index 0e3544415d1..0b26fa52e32 100644 --- a/sql/share/errmsg.txt +++ b/sql/share/errmsg.txt @@ -6052,9 +6052,9 @@ ER_BINLOG_PURGE_EMFILE eng "Too many files opened, please execute the command again" ger "Zu viele offene Dateien, bitte führen Sie den Befehl noch einmal aus" ER_EVENT_CANNOT_CREATE_IN_THE_PAST - eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been created" + eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation." ER_EVENT_CANNOT_ALTER_IN_THE_PAST - eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been altered" + eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation." ER_SLAVE_INCIDENT eng "The incident %s occured on the master. Message: %-.64s" ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT From 8023d91929247f1b2e2f81ca10daca4dde4ab2e2 Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 15 Jul 2007 13:34:35 +0400 Subject: [PATCH 14/19] Add a teste case for Bug#27296 "Assertion in ALTER TABLE SET DEFAULT in Linux Debug build (possible deadlock)" The bug is not repeatable any more. mysql-test/r/innodb_mysql.result: Update test results (Bug#27296) mysql-test/t/innodb_mysql.test: Add a teste case for Bug#27296 "Assertion in ALTER TABLE SET DEFAULT in Linux Debug build (possible deadlock)" --- mysql-test/r/innodb_mysql.result | 4 ++++ mysql-test/t/innodb_mysql.test | 14 ++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/mysql-test/r/innodb_mysql.result b/mysql-test/r/innodb_mysql.result index 11c7c2aedc9..847805e7b6f 100644 --- a/mysql-test/r/innodb_mysql.result +++ b/mysql-test/r/innodb_mysql.result @@ -676,4 +676,8 @@ INSERT INTO t1 VALUES (1); switch to connection default SET AUTOCOMMIT=default; DROP TABLE t1,t2; +drop table if exists t1; +create table t1 (a int) engine=innodb; +alter table t1 alter a set default 1; +drop table t1; End of 5.0 tests diff --git a/mysql-test/t/innodb_mysql.test b/mysql-test/t/innodb_mysql.test index 25ba9a258ff..49ffe359fd9 100644 --- a/mysql-test/t/innodb_mysql.test +++ b/mysql-test/t/innodb_mysql.test @@ -670,5 +670,19 @@ DISCONNECT c1; DISCONNECT c2; DROP TABLE t1,t2; +# +# Bug#27296 Assertion in ALTER TABLE SET DEFAULT in Linux Debug build +# (possible deadlock). +# +# The bug is applicable only to a transactoinal table. +# Cover with tests behavior that no longer causes an +# assertion. +# +--disable_warnings +drop table if exists t1; +--enable_warnings +create table t1 (a int) engine=innodb; +alter table t1 alter a set default 1; +drop table t1; --echo End of 5.0 tests From 02a832df2e9743fbe132fe84f906761fc00c1279 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 16 Jul 2007 15:57:20 +0400 Subject: [PATCH 15/19] A follow up after the fix for Bug#21074 - fix NDB tests breaking on asserts. The patch for Bug#21074 replaces acquisition of the global LOCK_open lock with exclusive locks on table names in such operations ad DROP TABLE and RENAME TABLE. Unfortunately, NDB internally assumes that LOCK_open is acquired and tries to release it. This dependency should be fixed by a separate (and significant in size) patch. For now we just satisfy it - after all, the original goal of the patch for Bug#21074 was to move query_cache_invalidate outside of the scope of LOCK_open, and we still can do that. This fixes some failing NDB tests in the runtime tree. sql/sql_rename.cc: Move release of LOCK_open after ha_ndbcluster::rename_tables to satisfy an assert in ndb_log_schema_op. --- sql/sql_rename.cc | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc index e68a360f2d4..f5e1b8988f3 100644 --- a/sql/sql_rename.cc +++ b/sql/sql_rename.cc @@ -150,7 +150,6 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent) pthread_mutex_unlock(&LOCK_open); goto err; } - pthread_mutex_unlock(&LOCK_open); error=0; if ((ren_table=rename_tables(thd,table_list,0))) @@ -174,6 +173,17 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent) error= 1; } + /* + An exclusive lock on table names is satisfactory to ensure + no other thread accesses this table. + However, NDB assumes that handler::rename_tables is called under + LOCK_open. And it indeed is, from ALTER TABLE. + TODO: remove this limitation. + We still should unlock LOCK_open as early as possible, to provide + higher concurrency - query_cache_invalidate can take minutes to + complete. + */ + pthread_mutex_unlock(&LOCK_open); /* Lets hope this doesn't fail as the result will be messy */ if (!silent && !error) From b6c0fb605d613df14405e1d7c5621d22b35bc4fe Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 16 Jul 2007 22:34:36 +0400 Subject: [PATCH 16/19] Fix ndb_cache* test failures in the -runtime tree. Do not try to acquire structure_guard_mutex for the second time when invalidating a table from send_result_to_client. sql/sql_cache.cc: Do not try to acquire mutex when invalidating a table from send_result_to_client(). A follow up patch for the patch for Bug#21074. Reuse code by moving locking-independent invalidation functionality into invalidate_table_internal. sql/sql_cache.h: Add a new declaration. --- sql/sql_cache.cc | 33 +++++++++++++++++++++++++-------- sql/sql_cache.h | 1 + 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index f7ad024c143..cab9ef94d6d 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -1347,7 +1347,9 @@ def_week_frmt: %lu", ("Handler require invalidation queries of %s.%s %lu-%lu", table_list.db, table_list.alias, (ulong) engine_data, (ulong) table->engine_data())); - invalidate_table(thd, (uchar *) table->db(), table->key_length()); + invalidate_table_internal(thd, + (uchar *) table->db(), + table->key_length()); } else thd->lex->safe_to_cache_query= 0; // Don't try to cache this @@ -2468,13 +2470,8 @@ void Query_cache::invalidate_table(THD *thd, uchar * key, uint32 key_length) m_cache_status= Query_cache::TABLE_FLUSH_IN_PROGRESS; STRUCT_UNLOCK(&structure_guard_mutex); - Query_cache_block *table_block= - (Query_cache_block*)hash_search(&tables, key, key_length); - if (query_cache_size > 0 && table_block) - { - Query_cache_block_table *list_root= table_block->table(0); - invalidate_query_block_list(thd, list_root); - } + if (query_cache_size > 0) + invalidate_table_internal(thd, key, key_length); STRUCT_LOCK(&structure_guard_mutex); m_cache_status= Query_cache::NO_FLUSH_IN_PROGRESS; @@ -2488,6 +2485,26 @@ void Query_cache::invalidate_table(THD *thd, uchar * key, uint32 key_length) } +/** + Try to locate and invalidate a table by name. + The caller must ensure that no other thread is trying to work with + the query cache when this function is executed. + + @pre structure_guard_mutex is acquired or TABLE_FLUSH_IN_PROGRESS is set. +*/ + +void +Query_cache::invalidate_table_internal(THD *thd, uchar *key, uint32 key_length) +{ + Query_cache_block *table_block= + (Query_cache_block*)hash_search(&tables, key, key_length); + if (table_block) + { + Query_cache_block_table *list_root= table_block->table(0); + invalidate_query_block_list(thd, list_root); + } +} + /** @brief Invalidate a linked list of query cache blocks. diff --git a/sql/sql_cache.h b/sql/sql_cache.h index cfc52e5d33a..c4c7e1dbc5e 100644 --- a/sql/sql_cache.h +++ b/sql/sql_cache.h @@ -279,6 +279,7 @@ private: Cache_status m_cache_status; void free_query_internal(Query_cache_block *point); + void invalidate_table_internal(THD *thd, uchar *key, uint32 key_length); protected: /* From de2089264fcafa7f850c73382200ed1d5d91fdef Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 16 Jul 2007 23:37:02 +0400 Subject: [PATCH 17/19] Bug#29050 Creation of a legal stored procedure fails if a database is not selected prior: ensure the fix also works for information_schema tables. sql/sql_parse.cc: Ensure the fix for Bug#29050 works for information_schema tables. --- sql/sql_parse.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 91c51641fc0..a765dfe891f 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2272,7 +2272,7 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, DBUG_RETURN(1); #else if (lex->select_lex.db == NULL && - thd->copy_db_to(&lex->select_lex.db, NULL)) + lex->copy_db_to(&lex->select_lex.db, NULL)) { DBUG_RETURN(1); } From bc642e113255417907d2759e5355c7b374b8d479 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 17 Jul 2007 00:59:21 +0400 Subject: [PATCH 18/19] Post-merge fixes (merge from the main). mysql-test/r/innodb_mysql.result: Update test results (merge from the main tree). mysql-test/r/query_cache.result: Update test results (merge from the main tree). mysql-test/r/sp.result: Update test results (merge from the main tree). mysql-test/t/query_cache.test: Use --echo End of to simplify future merges. sql/handler.h: st_table_list -> TABLE_LIST sql/item_create.cc: A post-merge fix (this code is in sql_yacc.yy in 5.0) sql/rpl_utility.h: st_table_list -> TABLE_LIST sql/sp.cc: A post-merge fix. sql/sp_head.cc: In 5.1 memdup_root returns void*. sql/sql_show.cc: st_table_list -> TABLE_LIST sql/sql_show.h: st_table_list -> TABLE_LIST sql/sql_yacc.yy: A post-merge fix. sql/table.cc: st_table_list -> TABLE_LIST sql/table.h: st_table_list -> TABLE_LIST --- mysql-test/r/innodb_mysql.result | 52 +++----------------------------- mysql-test/r/query_cache.result | 1 + mysql-test/r/sp.result | 50 +++++++++++++++--------------- mysql-test/t/query_cache.test | 6 ++-- sql/handler.h | 2 +- sql/item_create.cc | 2 +- sql/rpl_utility.h | 2 +- sql/sp.cc | 2 +- sql/sp_head.cc | 6 ++-- sql/sql_show.cc | 4 +-- sql/sql_show.h | 3 +- sql/sql_yacc.yy | 3 +- sql/table.cc | 6 ++-- sql/table.h | 2 +- 14 files changed, 49 insertions(+), 92 deletions(-) diff --git a/mysql-test/r/innodb_mysql.result b/mysql-test/r/innodb_mysql.result index 3e4e10780d2..8a4749a478d 100644 --- a/mysql-test/r/innodb_mysql.result +++ b/mysql-test/r/innodb_mysql.result @@ -166,7 +166,6 @@ t1.a4='UNcT5pIde4I6c2SheTo4gt92OV1jgJCVkXmzyf325R1DwLURkbYHwhydANIZMbKTgdcR5xS'; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables DROP TABLE t1; -End of 4.1 tests create table t1m (a int) engine = MEMORY; create table t1i (a int); create table t2m (a int) engine = MEMORY; @@ -362,22 +361,6 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 index NULL fkey 5 NULL 5 Using index 1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.fkey 1 Using where DROP TABLE t1,t2; -CREATE TABLE t1 (a INT PRIMARY KEY, b INT, c FLOAT, KEY b(b)) ENGINE = INNODB; -INSERT INTO t1 VALUES ( 1 , 1 , 1); -INSERT INTO t1 SELECT a + 1 , MOD(a + 1 , 20), 1 FROM t1; -INSERT INTO t1 SELECT a + 2 , MOD(a + 2 , 20), 1 FROM t1; -INSERT INTO t1 SELECT a + 4 , MOD(a + 4 , 20), 1 FROM t1; -INSERT INTO t1 SELECT a + 8 , MOD(a + 8 , 20), 1 FROM t1; -INSERT INTO t1 SELECT a + 16, MOD(a + 16, 20), 1 FROM t1; -INSERT INTO t1 SELECT a + 32, MOD(a + 32, 20), 1 FROM t1; -INSERT INTO t1 SELECT a + 64, MOD(a + 64, 20), 1 FROM t1; -EXPLAIN SELECT b, SUM(c) FROM t1 GROUP BY b; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index NULL b 5 NULL 128 -EXPLAIN SELECT SQL_BIG_RESULT b, SUM(c) FROM t1 GROUP BY b; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ALL NULL NULL NULL NULL 128 Using filesort -DROP TABLE t1; CREATE TABLE t1 ( id int NOT NULL, name varchar(20) NOT NULL, @@ -503,37 +486,6 @@ a 2 5 drop table t1; -set @save_qcache_size=@@global.query_cache_size; -set @save_qcache_type=@@global.query_cache_type; -set global query_cache_size=10*1024*1024; -set global query_cache_type=1; -drop table if exists `test`; -Warnings: -Note 1051 Unknown table 'test' -CREATE TABLE `test` (`test1` varchar(3) NOT NULL, -`test2` varchar(4) NOT NULL,PRIMARY KEY (`test1`)) -ENGINE=InnoDB DEFAULT CHARSET=latin1; -INSERT INTO `test` (`test1`, `test2`) VALUES ('tes', '5678'); -select * from test; -test1 test2 -tes 5678 -INSERT INTO `test` (`test1`, `test2`) VALUES ('tes', '1234') -ON DUPLICATE KEY UPDATE `test2` = '1234'; -select * from test; -test1 test2 -tes 1234 -flush tables; -select * from test; -test1 test2 -tes 1234 -drop table test; -set global query_cache_type=@save_qcache_type; -set global query_cache_size=@save_qcache_size; -drop table if exists t1; -create table t1 (a int) engine=innodb; -alter table t1 alter a set default 1; -drop table t1; -End of 5.0 tests create table t1( id int auto_increment, c char(1) not null, @@ -858,6 +810,10 @@ a 2 5 drop table t1; +drop table if exists t1; +create table t1 (a int) engine=innodb; +alter table t1 alter a set default 1; +drop table t1; End of 5.0 tests CREATE TABLE `t2` ( `k` int(11) NOT NULL auto_increment, diff --git a/mysql-test/r/query_cache.result b/mysql-test/r/query_cache.result index db513902b2c..7a3cbf26a21 100644 --- a/mysql-test/r/query_cache.result +++ b/mysql-test/r/query_cache.result @@ -1301,6 +1301,7 @@ drop procedure f3; drop procedure f4; drop table t1; set GLOBAL query_cache_size=0; +End of 4.1 tests SET GLOBAL query_cache_size=102400; create table t1(a int); insert into t1 values(0), (1), (4), (5); diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result index 9c98dc8d027..273c60110b3 100644 --- a/mysql-test/r/sp.result +++ b/mysql-test/r/sp.result @@ -5659,31 +5659,6 @@ t3_id_1 t3_id_2 t4_id DROP PROCEDURE p1| DROP VIEW v1, v2| DROP TABLE t3, t4| -drop database if exists mysqltest_db1; -create database mysqltest_db1; -create procedure mysqltest_db1.sp_bug28551() begin end; -call mysqltest_db1.sp_bug28551(); -show warnings; -Level Code Message -drop database mysqltest_db1; -drop database if exists mysqltest_db1; -drop table if exists test.t1; -create database mysqltest_db1; -use mysqltest_db1; -drop database mysqltest_db1; -create table test.t1 (id int); -insert into test.t1 (id) values (1); -create procedure test.sp_bug29050() begin select * from t1; end// -show warnings; -Level Code Message -call test.sp_bug29050(); -id -1 -show warnings; -Level Code Message -use test; -drop procedure sp_bug29050; -drop table t1; End of 5.0 tests Begin of 5.1 tests drop function if exists pi; @@ -6306,6 +6281,31 @@ v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VI DROP VIEW v1; DROP FUNCTION metered; DROP TABLE t1; +drop database if exists mysqltest_db1; +create database mysqltest_db1; +create procedure mysqltest_db1.sp_bug28551() begin end; +call mysqltest_db1.sp_bug28551(); +show warnings; +Level Code Message +drop database mysqltest_db1; +drop database if exists mysqltest_db1; +drop table if exists test.t1; +create database mysqltest_db1; +use mysqltest_db1; +drop database mysqltest_db1; +create table test.t1 (id int); +insert into test.t1 (id) values (1); +create procedure test.sp_bug29050() begin select * from t1; end// +show warnings; +Level Code Message +call test.sp_bug29050(); +id +1 +show warnings; +Level Code Message +use test; +drop procedure sp_bug29050; +drop table t1; drop procedure if exists proc_25411_a; drop procedure if exists proc_25411_b; drop procedure if exists proc_25411_c; diff --git a/mysql-test/t/query_cache.test b/mysql-test/t/query_cache.test index 3a2434892c5..e0db99cf42b 100644 --- a/mysql-test/t/query_cache.test +++ b/mysql-test/t/query_cache.test @@ -878,7 +878,7 @@ drop procedure f4; drop table t1; set GLOBAL query_cache_size=0; -# End of 4.1 tests +--echo End of 4.1 tests # # Bug #10303: problem with last_query_cost @@ -1129,7 +1129,7 @@ set GLOBAL query_cache_type=default; set GLOBAL query_cache_limit=default; set GLOBAL query_cache_min_res_unit=default; set GLOBAL query_cache_size=default; -# End of 5.0 tests +--echo End of 5.0 tests # @@ -1174,4 +1174,4 @@ show status like 'Qcache_queries_in_cache'; drop database db2; drop database db3; -# End of 5.1 tests +--echo End of 5.1 tests diff --git a/sql/handler.h b/sql/handler.h index c4e45e5b8f1..f45b28c55f5 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -663,7 +663,7 @@ struct handlerton uint (*alter_table_flags)(uint flags); int (*alter_tablespace)(handlerton *hton, THD *thd, st_alter_tablespace *ts_info); int (*fill_files_table)(handlerton *hton, THD *thd, - struct st_table_list *tables, + TABLE_LIST *tables, class Item *cond); uint32 flags; /* global handler flags */ /* diff --git a/sql/item_create.cc b/sql/item_create.cc index e20926c564f..fa15b992e5c 100644 --- a/sql/item_create.cc +++ b/sql/item_create.cc @@ -2326,7 +2326,7 @@ Item* Create_qfunc::create(THD *thd, LEX_STRING name, List *item_list) { LEX_STRING db; - if (thd->copy_db_to(&db.str, &db.length)) + if (thd->lex->copy_db_to(&db.str, &db.length)) return NULL; return create(thd, db, name, false, item_list); diff --git a/sql/rpl_utility.h b/sql/rpl_utility.h index 2ce8def4577..79e69aecaeb 100644 --- a/sql/rpl_utility.h +++ b/sql/rpl_utility.h @@ -128,7 +128,7 @@ private: slave thread, but nowhere else. */ struct RPL_TABLE_LIST - : public st_table_list + : public TABLE_LIST { bool m_tabledef_valid; table_def m_tabledef; diff --git a/sql/sp.cc b/sql/sp.cc index d806673c6f3..aed4976f839 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -1954,7 +1954,7 @@ sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex, } ret= sp_cache_routines_and_add_tables_aux(thd, lex, *last_cached_routine_ptr, - FALSE, NULL); + FALSE); return ret; } diff --git a/sql/sp_head.cc b/sql/sp_head.cc index a11c3c666c8..9b67a89bed2 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -567,9 +567,9 @@ sp_head::init_sp_name(THD *thd, sp_name *spname) spname->init_qname(thd); m_sroutines_key.length= spname->m_sroutines_key.length; - m_sroutines_key.str= memdup_root(thd->mem_root, - spname->m_sroutines_key.str, - spname->m_sroutines_key.length + 1); + m_sroutines_key.str= (char*) memdup_root(thd->mem_root, + spname->m_sroutines_key.str, + spname->m_sroutines_key.length + 1); m_sroutines_key.str[0]= static_cast(m_type); m_qname.length= m_sroutines_key.length - 1; diff --git a/sql/sql_show.cc b/sql/sql_show.cc index e503d0acd84..c6bf816b290 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -4094,7 +4094,7 @@ static void store_schema_partitions_record(THD *thd, TABLE *schema_table, } -static int get_schema_partitions_record(THD *thd, struct st_table_list *tables, +static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables, TABLE *table, bool res, const char *base_name, const char *file_name) @@ -4640,7 +4640,7 @@ int fill_status(THD *thd, TABLE_LIST *tables, COND *cond) */ static int -get_referential_constraints_record(THD *thd, struct st_table_list *tables, +get_referential_constraints_record(THD *thd, TABLE_LIST *tables, TABLE *table, bool res, const char *base_name, const char *file_name) { diff --git a/sql/sql_show.h b/sql/sql_show.h index d5c3f3bf675..57004323ca9 100644 --- a/sql/sql_show.h +++ b/sql/sql_show.h @@ -20,9 +20,8 @@ class String; class THD; struct st_ha_create_information; -struct st_table_list; typedef st_ha_create_information HA_CREATE_INFO; -typedef st_table_list TABLE_LIST; +struct TABLE_LIST; enum find_files_result { FIND_FILES_OK, diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 08ce421ef86..d9a808bf8f7 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -1940,7 +1940,8 @@ sp_name: } | ident { - LEX *lex= Lex; + THD *thd= YYTHD; + LEX *lex= thd->lex; LEX_STRING db; if (check_routine_name(&$1)) { diff --git a/sql/table.cc b/sql/table.cc index 6678073e145..5ac43343934 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -98,7 +98,7 @@ View_creation_ctx *View_creation_ctx::create(THD *thd) /*************************************************************************/ View_creation_ctx * View_creation_ctx::create(THD *thd, - st_table_list *view) + TABLE_LIST *view) { View_creation_ctx *ctx= new (thd->mem_root) View_creation_ctx(thd); @@ -4588,7 +4588,7 @@ Item_subselect *TABLE_LIST::containing_subselect() DESCRIPTION The parser collects the index hints for each table in a "tagged list" - (st_table_list::index_hints). Using the information in this tagged list + (TABLE_LIST::index_hints). Using the information in this tagged list this function sets the members st_table::keys_in_use_for_query, st_table::keys_in_use_for_group_by, st_table::keys_in_use_for_order_by, st_table::force_index and st_table::covering_keys. @@ -4630,7 +4630,7 @@ Item_subselect *TABLE_LIST::containing_subselect() FALSE no errors found TRUE found and reported an error. */ -bool st_table_list::process_index_hints(TABLE *table) +bool TABLE_LIST::process_index_hints(TABLE *table) { /* initialize the result variables */ table->keys_in_use_for_query= table->keys_in_use_for_group_by= diff --git a/sql/table.h b/sql/table.h index b70517d5067..494b74d564c 100644 --- a/sql/table.h +++ b/sql/table.h @@ -38,7 +38,7 @@ public: static View_creation_ctx *create(THD *thd); static View_creation_ctx *create(THD *thd, - struct st_table_list *view); + TABLE_LIST *view); private: View_creation_ctx(THD *thd) From b5ad823b65c64c48fa2719a5af3115898396d7b1 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 17 Jul 2007 01:30:57 +0400 Subject: [PATCH 19/19] Fix a build break on 64 bit (uint and size_t are distinct types). --- sql/sql_lex.cc | 2 +- sql/sql_lex.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 5e91f147033..9f69e7dee05 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -2326,7 +2326,7 @@ uint8 st_lex::get_effective_with_check(TABLE_LIST *view) */ bool -st_lex::copy_db_to(char **p_db, uint *p_db_length) const +st_lex::copy_db_to(char **p_db, size_t *p_db_length) const { if (sphead) { diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 09ace624559..4ac59fbacde 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -1782,7 +1782,7 @@ typedef struct st_lex : public Query_tables_list context_stack.pop(); } - bool copy_db_to(char **p_db, uint *p_db_length) const; + bool copy_db_to(char **p_db, size_t *p_db_length) const; Name_resolution_context *current_context() {