Merge mysql-5.1-innodb -> mysql-5.1-bugteam
This commit is contained in:
commit
3bc7c50813
@ -12,6 +12,8 @@ funcs_1.ndb* # joro : NDB tests marked as experiment
|
||||
|
||||
funcs_2.ndb_charset # joro : NDB tests marked as experimental as agreed with bochklin
|
||||
|
||||
innodb_plugin.* @solaris # Bug#56063 InnoDB Plugin mysql-tests fail on Solaris
|
||||
|
||||
main.ctype_gbk_binlog @solaris # Bug#46010: main.ctype_gbk_binlog fails sporadically : Table 't2' already exists
|
||||
main.func_str @solaris # joro: Bug#40928
|
||||
main.sp @solaris # joro : Bug#54138
|
||||
|
@ -616,8 +616,7 @@ dict_table_get_on_id(
|
||||
{
|
||||
dict_table_t* table;
|
||||
|
||||
if (ut_dulint_cmp(table_id, DICT_FIELDS_ID) <= 0
|
||||
|| trx->dict_operation_lock_mode == RW_X_LATCH) {
|
||||
if (trx->dict_operation_lock_mode == RW_X_LATCH) {
|
||||
|
||||
/* Note: An X latch implies that the transaction
|
||||
already owns the dictionary mutex. */
|
||||
@ -2140,7 +2139,7 @@ dict_foreign_add_to_cache(
|
||||
mem_heap_free(foreign->heap);
|
||||
}
|
||||
|
||||
return(DB_CANNOT_ADD_CONSTRAINT);
|
||||
return(DB_FOREIGN_NO_INDEX);
|
||||
}
|
||||
|
||||
for_in_cache->referenced_table = ref_table;
|
||||
@ -2184,7 +2183,7 @@ dict_foreign_add_to_cache(
|
||||
mem_heap_free(foreign->heap);
|
||||
}
|
||||
|
||||
return(DB_CANNOT_ADD_CONSTRAINT);
|
||||
return(DB_REFERENCING_NO_INDEX);
|
||||
}
|
||||
|
||||
for_in_cache->foreign_table = for_table;
|
||||
@ -3754,7 +3753,6 @@ dict_update_statistics_low(
|
||||
dictionary mutex */
|
||||
{
|
||||
dict_index_t* index;
|
||||
ulint size;
|
||||
ulint sum_of_index_sizes = 0;
|
||||
|
||||
if (table->ibd_file_missing) {
|
||||
@ -3770,14 +3768,6 @@ dict_update_statistics_low(
|
||||
return;
|
||||
}
|
||||
|
||||
/* If we have set a high innodb_force_recovery level, do not calculate
|
||||
statistics, as a badly corrupted index can cause a crash in it. */
|
||||
|
||||
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* Find out the sizes of the indexes and how many different values
|
||||
for the key they approximately have */
|
||||
|
||||
@ -3789,26 +3779,48 @@ dict_update_statistics_low(
|
||||
return;
|
||||
}
|
||||
|
||||
while (index) {
|
||||
size = btr_get_size(index, BTR_TOTAL_SIZE);
|
||||
|
||||
index->stat_index_size = size;
|
||||
do {
|
||||
if (UNIV_LIKELY
|
||||
(srv_force_recovery < SRV_FORCE_NO_IBUF_MERGE
|
||||
|| (srv_force_recovery < SRV_FORCE_NO_LOG_REDO
|
||||
&& (index->type & DICT_CLUSTERED)))) {
|
||||
ulint size;
|
||||
size = btr_get_size(index, BTR_TOTAL_SIZE);
|
||||
|
||||
sum_of_index_sizes += size;
|
||||
index->stat_index_size = size;
|
||||
|
||||
size = btr_get_size(index, BTR_N_LEAF_PAGES);
|
||||
sum_of_index_sizes += size;
|
||||
|
||||
if (size == 0) {
|
||||
/* The root node of the tree is a leaf */
|
||||
size = 1;
|
||||
size = btr_get_size(index, BTR_N_LEAF_PAGES);
|
||||
|
||||
if (size == 0) {
|
||||
/* The root node of the tree is a leaf */
|
||||
size = 1;
|
||||
}
|
||||
|
||||
index->stat_n_leaf_pages = size;
|
||||
|
||||
btr_estimate_number_of_different_key_vals(index);
|
||||
} else {
|
||||
/* If we have set a high innodb_force_recovery
|
||||
level, do not calculate statistics, as a badly
|
||||
corrupted index can cause a crash in it.
|
||||
Initialize some bogus index cardinality
|
||||
statistics, so that the data can be queried in
|
||||
various means, also via secondary indexes. */
|
||||
ulint i;
|
||||
|
||||
sum_of_index_sizes++;
|
||||
index->stat_index_size = index->stat_n_leaf_pages = 1;
|
||||
|
||||
for (i = dict_index_get_n_unique(index); i; ) {
|
||||
index->stat_n_diff_key_vals[i--] = 1;
|
||||
}
|
||||
}
|
||||
|
||||
index->stat_n_leaf_pages = size;
|
||||
|
||||
btr_estimate_number_of_different_key_vals(index);
|
||||
|
||||
index = dict_table_get_next_index(index);
|
||||
}
|
||||
} while (index);
|
||||
|
||||
index = dict_table_get_first_index(table);
|
||||
|
||||
|
@ -966,6 +966,8 @@ try_again:
|
||||
HASH_SEARCH(name_hash, system->name_hash, ut_fold_string(name), space,
|
||||
0 == strcmp(name, space->name));
|
||||
if (space != NULL) {
|
||||
ibool success;
|
||||
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: Warning: trying to init to the"
|
||||
@ -1002,9 +1004,10 @@ try_again:
|
||||
|
||||
namesake_id = space->id;
|
||||
|
||||
mutex_exit(&(system->mutex));
|
||||
success = fil_space_free(namesake_id, FALSE);
|
||||
ut_a(success);
|
||||
|
||||
fil_space_free(namesake_id);
|
||||
mutex_exit(&(system->mutex));
|
||||
|
||||
goto try_again;
|
||||
}
|
||||
@ -1127,6 +1130,33 @@ fil_assign_new_space_id(void)
|
||||
return(id);
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
Check if the space id exists in the cache, complain to stderr if the
|
||||
space id cannot be found. */
|
||||
static
|
||||
fil_space_t*
|
||||
fil_space_search(
|
||||
/*=============*/
|
||||
/* out: file space instance*/
|
||||
ulint id) /* in: space id */
|
||||
{
|
||||
fil_space_t* space;
|
||||
|
||||
ut_ad(mutex_own(&fil_system->mutex));
|
||||
|
||||
HASH_SEARCH(hash, fil_system->spaces, id, space, space->id == id);
|
||||
|
||||
if (space == NULL) {
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: Error: trying to remove tablespace %lu"
|
||||
" from the cache but\n"
|
||||
"InnoDB: it is not there.\n", (ulong) id);
|
||||
}
|
||||
|
||||
return(space);
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
Frees a space object from the tablespace memory cache. Closes the files in
|
||||
the chain but does not delete them. There must not be any pending i/o's or
|
||||
@ -1135,27 +1165,21 @@ flushes on the files. */
|
||||
ibool
|
||||
fil_space_free(
|
||||
/*===========*/
|
||||
/* out: TRUE if success */
|
||||
ulint id) /* in: space id */
|
||||
/* out: TRUE if success */
|
||||
ulint id, /* in: space id */
|
||||
ibool x_latched) /* in: TRUE if caller has space->latch
|
||||
in X mode */
|
||||
{
|
||||
fil_system_t* system = fil_system;
|
||||
fil_space_t* space;
|
||||
fil_space_t* namespace;
|
||||
fil_node_t* fil_node;
|
||||
|
||||
mutex_enter(&(system->mutex));
|
||||
ut_ad(mutex_own(&fil_system->mutex));
|
||||
|
||||
HASH_SEARCH(hash, system->spaces, id, space, space->id == id);
|
||||
|
||||
if (!space) {
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: Error: trying to remove tablespace %lu"
|
||||
" from the cache but\n"
|
||||
"InnoDB: it is not there.\n", (ulong) id);
|
||||
|
||||
mutex_exit(&(system->mutex));
|
||||
space = fil_space_search(id);
|
||||
|
||||
if (space == NULL) {
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
@ -1191,7 +1215,9 @@ fil_space_free(
|
||||
|
||||
ut_a(0 == UT_LIST_GET_LEN(space->chain));
|
||||
|
||||
mutex_exit(&(system->mutex));
|
||||
if (x_latched) {
|
||||
rw_lock_x_unlock(&space->latch);
|
||||
}
|
||||
|
||||
rw_lock_free(&(space->latch));
|
||||
|
||||
@ -2048,6 +2074,19 @@ try_again:
|
||||
path = mem_strdup(space->name);
|
||||
|
||||
mutex_exit(&(system->mutex));
|
||||
|
||||
/* Important: We rely on the data dictionary mutex to ensure
|
||||
that a race is not possible here. It should serialize the tablespace
|
||||
drop/free. We acquire an X latch only to avoid a race condition
|
||||
when accessing the tablespace instance via:
|
||||
|
||||
fsp_get_available_space_in_free_extents().
|
||||
|
||||
There our main motivation is to reduce the contention on the
|
||||
dictionary mutex and not correctness. */
|
||||
|
||||
rw_lock_x_lock(&space->latch);
|
||||
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
/* Invalidate in the buffer pool all pages belonging to the
|
||||
tablespace. Since we have set space->is_being_deleted = TRUE, readahead
|
||||
@ -2060,7 +2099,11 @@ try_again:
|
||||
#endif
|
||||
/* printf("Deleting tablespace %s id %lu\n", space->name, id); */
|
||||
|
||||
success = fil_space_free(id);
|
||||
mutex_enter(&system->mutex);
|
||||
|
||||
success = fil_space_free(id, TRUE);
|
||||
|
||||
mutex_exit(&system->mutex);
|
||||
|
||||
if (success) {
|
||||
success = os_file_delete(path);
|
||||
@ -2068,6 +2111,8 @@ try_again:
|
||||
if (!success) {
|
||||
success = os_file_delete_if_exists(path);
|
||||
}
|
||||
} else {
|
||||
rw_lock_x_unlock(&space->latch);
|
||||
}
|
||||
|
||||
if (success) {
|
||||
@ -4569,3 +4614,28 @@ fil_page_get_type(
|
||||
|
||||
return(mach_read_from_2(page + FIL_PAGE_TYPE));
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
Returns TRUE if a single-table tablespace is being deleted. */
|
||||
|
||||
ibool
|
||||
fil_tablespace_is_being_deleted(
|
||||
/*============================*/
|
||||
/* out: TRUE if space is being deleted */
|
||||
ulint id) /* in: space id */
|
||||
{
|
||||
fil_space_t* space;
|
||||
ibool is_being_deleted;
|
||||
|
||||
mutex_enter(&fil_system->mutex);
|
||||
|
||||
HASH_SEARCH(hash, fil_system->spaces, id, space, space->id == id);
|
||||
|
||||
ut_a(space != NULL);
|
||||
|
||||
is_being_deleted = space->is_being_deleted;
|
||||
|
||||
mutex_exit(&fil_system->mutex);
|
||||
|
||||
return(is_being_deleted);
|
||||
}
|
||||
|
@ -2842,12 +2842,61 @@ fsp_get_available_space_in_free_extents(
|
||||
|
||||
ut_ad(!mutex_own(&kernel_mutex));
|
||||
|
||||
/* The convoluted mutex acquire is to overcome latching order
|
||||
issues: The problem is that the fil_mutex is at a lower level
|
||||
than the tablespace latch and the buffer pool mutex. We have to
|
||||
first prevent any operations on the file system by acquiring the
|
||||
dictionary mutex. Then acquire the tablespace latch to obey the
|
||||
latching order and then release the dictionary mutex. That way we
|
||||
ensure that the tablespace instance can't be freed while we are
|
||||
examining its contents (see fil_space_free()).
|
||||
|
||||
However, there is one further complication, we release the fil_mutex
|
||||
when we need to invalidate the the pages in the buffer pool and we
|
||||
reacquire the fil_mutex when deleting and freeing the tablespace
|
||||
instance in fil0fil.c. Here we need to account for that situation
|
||||
too. */
|
||||
|
||||
dict_mutex_enter_for_mysql();
|
||||
|
||||
/* At this stage there is no guarantee that the tablespace even
|
||||
exists in the cache. */
|
||||
|
||||
if (fil_tablespace_deleted_or_being_deleted_in_mem(space, -1)) {
|
||||
|
||||
dict_mutex_exit_for_mysql();
|
||||
|
||||
return(ULLINT_UNDEFINED);
|
||||
}
|
||||
|
||||
mtr_start(&mtr);
|
||||
|
||||
latch = fil_space_get_latch(space);
|
||||
|
||||
/* This should ensure that the tablespace instance can't be freed
|
||||
by another thread. However, the tablespace pages can still be freed
|
||||
from the buffer pool. We need to check for that again. */
|
||||
|
||||
mtr_x_lock(latch, &mtr);
|
||||
|
||||
dict_mutex_exit_for_mysql();
|
||||
|
||||
/* At this point it is possible for the tablespace to be deleted and
|
||||
its pages removed from the buffer pool. We need to check for that
|
||||
situation. However, the tablespace instance can't be deleted because
|
||||
our latching above should ensure that. */
|
||||
|
||||
if (fil_tablespace_is_being_deleted(space)) {
|
||||
|
||||
mtr_commit(&mtr);
|
||||
|
||||
return(ULLINT_UNDEFINED);
|
||||
}
|
||||
|
||||
/* From here on even if the user has dropped the tablespace, the
|
||||
pages _must_ still exist in the buffer pool and the tablespace
|
||||
instance _must be in the file system hash table. */
|
||||
|
||||
space_header = fsp_get_space_header(space, &mtr);
|
||||
|
||||
size = mtr_read_ulint(space_header + FSP_SIZE, MLOG_4BYTES, &mtr);
|
||||
|
@ -707,7 +707,9 @@ convert_error_code_to_mysql(
|
||||
|
||||
return(HA_ERR_ROW_IS_REFERENCED);
|
||||
|
||||
} else if (error == (int) DB_CANNOT_ADD_CONSTRAINT) {
|
||||
} else if (error == (int) DB_CANNOT_ADD_CONSTRAINT
|
||||
|| error == (int) DB_FOREIGN_NO_INDEX
|
||||
|| error == (int) DB_REFERENCING_NO_INDEX) {
|
||||
|
||||
return(HA_ERR_CANNOT_ADD_FOREIGN);
|
||||
|
||||
@ -6099,6 +6101,8 @@ ha_innobase::rename_table(
|
||||
innobase_commit_low(trx);
|
||||
trx_free_for_mysql(trx);
|
||||
|
||||
switch (error) {
|
||||
case DB_DUPLICATE_KEY:
|
||||
/* Add a special case to handle the Duplicated Key error
|
||||
and return DB_ERROR instead.
|
||||
This is to avoid a possible SIGSEGV error from mysql error
|
||||
@ -6111,10 +6115,28 @@ ha_innobase::rename_table(
|
||||
the dup key error here is due to an existing table whose name
|
||||
is the one we are trying to rename to) and return the generic
|
||||
error code. */
|
||||
if (error == (int) DB_DUPLICATE_KEY) {
|
||||
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), to);
|
||||
|
||||
error = DB_ERROR;
|
||||
break;
|
||||
case DB_FOREIGN_NO_INDEX:
|
||||
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
||||
HA_ERR_CANNOT_ADD_FOREIGN,
|
||||
"Alter or rename of table '%s' failed"
|
||||
" because the new table is a child table"
|
||||
" in a FK relationship and it does not"
|
||||
" have an index that contains foreign"
|
||||
" keys as its prefix columns.", norm_to);
|
||||
break;
|
||||
case DB_REFERENCING_NO_INDEX:
|
||||
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
||||
HA_ERR_CANNOT_ADD_FOREIGN,
|
||||
"Alter or rename of table '%s' failed"
|
||||
" because the new table is a parent table"
|
||||
" in a FK relationship and it does not"
|
||||
" have an index that contains foreign"
|
||||
" keys as its prefix columns.", norm_to);
|
||||
break;
|
||||
}
|
||||
|
||||
error = convert_error_code_to_mysql(error, NULL);
|
||||
@ -6343,8 +6365,6 @@ ha_innobase::info(
|
||||
dict_index_t* index;
|
||||
ha_rows rec_per_key;
|
||||
ib_longlong n_rows;
|
||||
ulong j;
|
||||
ulong i;
|
||||
char path[FN_REFLEN];
|
||||
os_file_stat_t stat_info;
|
||||
|
||||
@ -6354,16 +6374,6 @@ ha_innobase::info(
|
||||
statistics calculation on tables, because that may crash the
|
||||
server if an index is badly corrupted. */
|
||||
|
||||
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
|
||||
|
||||
/* We return success (0) instead of HA_ERR_CRASHED,
|
||||
because we want MySQL to process this query and not
|
||||
stop, like it would do if it received the error code
|
||||
HA_ERR_CRASHED. */
|
||||
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
/* We do not know if MySQL can call this function before calling
|
||||
external_lock(). To be safe, update the thd of the current table
|
||||
handle. */
|
||||
@ -6458,25 +6468,24 @@ ha_innobase::info(
|
||||
acquiring latches inside InnoDB, we do not call it if we
|
||||
are asked by MySQL to avoid locking. Another reason to
|
||||
avoid the call is that it uses quite a lot of CPU.
|
||||
See Bug#38185.
|
||||
We do not update delete_length if no locking is requested
|
||||
so the "old" value can remain. delete_length is initialized
|
||||
to 0 in the ha_statistics' constructor. */
|
||||
if (!(flag & HA_STATUS_NO_LOCK)) {
|
||||
See Bug#38185. */
|
||||
if (flag & HA_STATUS_NO_LOCK) {
|
||||
/* We do not update delete_length if no
|
||||
locking is requested so the "old" value can
|
||||
remain. delete_length is initialized to 0 in
|
||||
the ha_statistics' constructor. */
|
||||
} else if (UNIV_UNLIKELY
|
||||
(srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE)) {
|
||||
/* Avoid accessing the tablespace if
|
||||
innodb_crash_recovery is set to a high value. */
|
||||
stats.delete_length = 0;
|
||||
} else {
|
||||
ullint avail_space;
|
||||
|
||||
/* lock the data dictionary to avoid races with
|
||||
ibd_file_missing and tablespace_discarded */
|
||||
row_mysql_lock_data_dictionary(prebuilt->trx);
|
||||
|
||||
/* ib_table->space must be an existent tablespace */
|
||||
if (!ib_table->ibd_file_missing
|
||||
&& !ib_table->tablespace_discarded) {
|
||||
|
||||
stats.delete_length =
|
||||
fsp_get_available_space_in_free_extents(
|
||||
ib_table->space) * 1024;
|
||||
} else {
|
||||
avail_space = fsp_get_available_space_in_free_extents(
|
||||
ib_table->space);
|
||||
|
||||
if (avail_space == ULLINT_UNDEFINED) {
|
||||
THD* thd;
|
||||
|
||||
thd = ha_thd();
|
||||
@ -6493,9 +6502,9 @@ ha_innobase::info(
|
||||
ib_table->name);
|
||||
|
||||
stats.delete_length = 0;
|
||||
} else {
|
||||
stats.delete_length = avail_space * 1024;
|
||||
}
|
||||
|
||||
row_mysql_unlock_data_dictionary(prebuilt->trx);
|
||||
}
|
||||
|
||||
stats.check_time = 0;
|
||||
@ -6508,6 +6517,7 @@ ha_innobase::info(
|
||||
}
|
||||
|
||||
if (flag & HA_STATUS_CONST) {
|
||||
ulong i = 0;
|
||||
index = dict_table_get_first_index_noninline(ib_table);
|
||||
|
||||
if (prebuilt->clust_index_was_generated) {
|
||||
@ -6515,6 +6525,8 @@ ha_innobase::info(
|
||||
}
|
||||
|
||||
for (i = 0; i < table->s->keys; i++) {
|
||||
ulong j;
|
||||
|
||||
if (index == NULL) {
|
||||
sql_print_error("Table %s contains fewer "
|
||||
"indexes inside InnoDB than "
|
||||
@ -6571,6 +6583,11 @@ ha_innobase::info(
|
||||
}
|
||||
}
|
||||
|
||||
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
|
||||
|
||||
goto func_exit;
|
||||
}
|
||||
|
||||
if (flag & HA_STATUS_ERRKEY) {
|
||||
ut_a(prebuilt->trx);
|
||||
ut_a(prebuilt->trx->magic_n == TRX_MAGIC_N);
|
||||
@ -6583,6 +6600,7 @@ ha_innobase::info(
|
||||
stats.auto_increment_value = innobase_peek_autoinc();
|
||||
}
|
||||
|
||||
func_exit:
|
||||
prebuilt->trx->op_info = (char*)"";
|
||||
|
||||
DBUG_RETURN(0);
|
||||
@ -7814,16 +7832,17 @@ ha_innobase::store_lock(
|
||||
&& (lock_type == TL_READ || lock_type == TL_READ_NO_INSERT)
|
||||
&& (sql_command == SQLCOM_INSERT_SELECT
|
||||
|| sql_command == SQLCOM_UPDATE
|
||||
|| sql_command == SQLCOM_CREATE_TABLE)) {
|
||||
|| sql_command == SQLCOM_CREATE_TABLE
|
||||
|| sql_command == SQLCOM_SET_OPTION)) {
|
||||
|
||||
/* If we either have innobase_locks_unsafe_for_binlog
|
||||
option set or this session is using READ COMMITTED
|
||||
isolation level and isolation level of the transaction
|
||||
is not set to serializable and MySQL is doing
|
||||
INSERT INTO...SELECT or UPDATE ... = (SELECT ...) or
|
||||
CREATE ... SELECT... without FOR UPDATE or
|
||||
IN SHARE MODE in select, then we use consistent
|
||||
read for select. */
|
||||
CREATE ... SELECT... or SET ... = (SELECT ...)
|
||||
without FOR UPDATE or IN SHARE MODE in select,
|
||||
then we use consistent read for select. */
|
||||
|
||||
prebuilt->select_lock_type = LOCK_NONE;
|
||||
prebuilt->stored_select_lock_type = LOCK_NONE;
|
||||
|
@ -73,6 +73,12 @@ Created 5/24/1996 Heikki Tuuri
|
||||
a later version of the engine. */
|
||||
#define DB_INTERRUPTED 49 /* the query has been interrupted with
|
||||
"KILL QUERY N;" */
|
||||
#define DB_FOREIGN_NO_INDEX 50 /* the child (foreign) table does not
|
||||
have an index that contains the
|
||||
foreign keys as its prefix columns */
|
||||
#define DB_REFERENCING_NO_INDEX 51 /* the parent (referencing) table does
|
||||
not have an index that contains the
|
||||
foreign keys as its prefix columns */
|
||||
|
||||
/* The following are partial failure codes */
|
||||
#define DB_FAIL 1000
|
||||
|
@ -202,8 +202,10 @@ the chain but does not delete them. */
|
||||
ibool
|
||||
fil_space_free(
|
||||
/*===========*/
|
||||
/* out: TRUE if success */
|
||||
ulint id); /* in: space id */
|
||||
/* out: TRUE if success */
|
||||
ulint id, /* in: space id */
|
||||
ibool x_latched); /* in: TRUE if caller has space->latch
|
||||
in X mode */
|
||||
/***********************************************************************
|
||||
Returns the size of the space in pages. The tablespace must be cached in the
|
||||
memory cache. */
|
||||
@ -710,6 +712,14 @@ fil_page_get_type(
|
||||
written to page, the return value not defined */
|
||||
byte* page); /* in: file page */
|
||||
|
||||
/***********************************************************************
|
||||
Returns TRUE if a single-table tablespace is being deleted. */
|
||||
|
||||
ibool
|
||||
fil_tablespace_is_being_deleted(
|
||||
/*============================*/
|
||||
/* out: TRUE if space is being deleted */
|
||||
ulint id); /* in: space id */
|
||||
|
||||
typedef struct fil_space_struct fil_space_t;
|
||||
|
||||
|
@ -234,6 +234,12 @@ typedef unsigned long long int ullint;
|
||||
/* Maximum value for a ulint */
|
||||
#define ULINT_MAX ((ulint)(-2))
|
||||
|
||||
/* THe 'undefined' value for ullint */
|
||||
#define ULLINT_UNDEFINED ((ullint)(-1))
|
||||
|
||||
/* Maximum value for a ullint */
|
||||
#define ULLINT_MAX ((ullint)(-2))
|
||||
|
||||
/* This 'ibool' type is used within Innobase. Remember that different included
|
||||
headers may define 'bool' differently. Do not assume that 'bool' is a ulint! */
|
||||
#define ibool ulint
|
||||
|
@ -1,3 +1,20 @@
|
||||
2010-08-24 The InnoDB Team
|
||||
|
||||
* handler/ha_innodb.c, dict/dict0dict.c:
|
||||
Fix Bug #55832 selects crash too easily when innodb_force_recovery>3
|
||||
|
||||
2010-08-03 The InnoDB Team
|
||||
|
||||
* include/ut0mem.h, ut/ut0mem.c:
|
||||
Fix Bug #55627 segv in ut_free pars_lexer_close innobase_shutdown
|
||||
innodb-use-sys-malloc=0
|
||||
|
||||
2010-08-01 The InnoDB Team
|
||||
|
||||
* handler/ha_innodb.cc
|
||||
Fix Bug #55382 Assignment with SELECT expressions takes unexpected
|
||||
S locks in READ COMMITTED
|
||||
|
||||
2010-07-27 The InnoDB Team
|
||||
|
||||
* include/mem0pool.h, mem/mem0mem.c, mem/mem0pool.c, srv/srv0start.c:
|
||||
|
@ -1734,6 +1734,7 @@ function_exit:
|
||||
}
|
||||
}
|
||||
|
||||
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
||||
/********************************************************************//**
|
||||
Validates the search system.
|
||||
@return TRUE if ok */
|
||||
@ -1897,3 +1898,4 @@ btr_search_validate(void)
|
||||
|
||||
return(ok);
|
||||
}
|
||||
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
|
||||
|
@ -568,8 +568,7 @@ dict_table_get_on_id(
|
||||
{
|
||||
dict_table_t* table;
|
||||
|
||||
if (ut_dulint_cmp(table_id, DICT_FIELDS_ID) <= 0
|
||||
|| trx->dict_operation_lock_mode == RW_X_LATCH) {
|
||||
if (trx->dict_operation_lock_mode == RW_X_LATCH) {
|
||||
|
||||
/* Note: An X latch implies that the transaction
|
||||
already owns the dictionary mutex. */
|
||||
@ -4192,7 +4191,6 @@ dict_update_statistics_low(
|
||||
dictionary mutex */
|
||||
{
|
||||
dict_index_t* index;
|
||||
ulint size;
|
||||
ulint sum_of_index_sizes = 0;
|
||||
|
||||
if (table->ibd_file_missing) {
|
||||
@ -4207,14 +4205,6 @@ dict_update_statistics_low(
|
||||
return;
|
||||
}
|
||||
|
||||
/* If we have set a high innodb_force_recovery level, do not calculate
|
||||
statistics, as a badly corrupted index can cause a crash in it. */
|
||||
|
||||
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* Find out the sizes of the indexes and how many different values
|
||||
for the key they approximately have */
|
||||
|
||||
@ -4226,26 +4216,48 @@ dict_update_statistics_low(
|
||||
return;
|
||||
}
|
||||
|
||||
while (index) {
|
||||
size = btr_get_size(index, BTR_TOTAL_SIZE);
|
||||
|
||||
index->stat_index_size = size;
|
||||
do {
|
||||
if (UNIV_LIKELY
|
||||
(srv_force_recovery < SRV_FORCE_NO_IBUF_MERGE
|
||||
|| (srv_force_recovery < SRV_FORCE_NO_LOG_REDO
|
||||
&& dict_index_is_clust(index)))) {
|
||||
ulint size;
|
||||
size = btr_get_size(index, BTR_TOTAL_SIZE);
|
||||
|
||||
sum_of_index_sizes += size;
|
||||
index->stat_index_size = size;
|
||||
|
||||
size = btr_get_size(index, BTR_N_LEAF_PAGES);
|
||||
sum_of_index_sizes += size;
|
||||
|
||||
if (size == 0) {
|
||||
/* The root node of the tree is a leaf */
|
||||
size = 1;
|
||||
size = btr_get_size(index, BTR_N_LEAF_PAGES);
|
||||
|
||||
if (size == 0) {
|
||||
/* The root node of the tree is a leaf */
|
||||
size = 1;
|
||||
}
|
||||
|
||||
index->stat_n_leaf_pages = size;
|
||||
|
||||
btr_estimate_number_of_different_key_vals(index);
|
||||
} else {
|
||||
/* If we have set a high innodb_force_recovery
|
||||
level, do not calculate statistics, as a badly
|
||||
corrupted index can cause a crash in it.
|
||||
Initialize some bogus index cardinality
|
||||
statistics, so that the data can be queried in
|
||||
various means, also via secondary indexes. */
|
||||
ulint i;
|
||||
|
||||
sum_of_index_sizes++;
|
||||
index->stat_index_size = index->stat_n_leaf_pages = 1;
|
||||
|
||||
for (i = dict_index_get_n_unique(index); i; ) {
|
||||
index->stat_n_diff_key_vals[i--] = 1;
|
||||
}
|
||||
}
|
||||
|
||||
index->stat_n_leaf_pages = size;
|
||||
|
||||
btr_estimate_number_of_different_key_vals(index);
|
||||
|
||||
index = dict_table_get_next_index(index);
|
||||
}
|
||||
} while (index);
|
||||
|
||||
index = dict_table_get_first_index(table);
|
||||
|
||||
|
@ -354,6 +354,7 @@ ha_remove_all_nodes_to_page(
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
||||
/*************************************************************//**
|
||||
Validates a given range of the cells in hash table.
|
||||
@return TRUE if ok */
|
||||
@ -400,6 +401,7 @@ ha_validate(
|
||||
|
||||
return(ok);
|
||||
}
|
||||
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
|
||||
|
||||
/*************************************************************//**
|
||||
Prints info of a hash table. */
|
||||
|
@ -7511,28 +7511,15 @@ ha_innobase::info(
|
||||
dict_index_t* index;
|
||||
ha_rows rec_per_key;
|
||||
ib_int64_t n_rows;
|
||||
ulong j;
|
||||
ulong i;
|
||||
char path[FN_REFLEN];
|
||||
os_file_stat_t stat_info;
|
||||
|
||||
|
||||
DBUG_ENTER("info");
|
||||
|
||||
/* If we are forcing recovery at a high level, we will suppress
|
||||
statistics calculation on tables, because that may crash the
|
||||
server if an index is badly corrupted. */
|
||||
|
||||
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
|
||||
|
||||
/* We return success (0) instead of HA_ERR_CRASHED,
|
||||
because we want MySQL to process this query and not
|
||||
stop, like it would do if it received the error code
|
||||
HA_ERR_CRASHED. */
|
||||
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
/* We do not know if MySQL can call this function before calling
|
||||
external_lock(). To be safe, update the thd of the current table
|
||||
handle. */
|
||||
@ -7627,12 +7614,18 @@ ha_innobase::info(
|
||||
acquiring latches inside InnoDB, we do not call it if we
|
||||
are asked by MySQL to avoid locking. Another reason to
|
||||
avoid the call is that it uses quite a lot of CPU.
|
||||
See Bug#38185.
|
||||
We do not update delete_length if no locking is requested
|
||||
so the "old" value can remain. delete_length is initialized
|
||||
to 0 in the ha_statistics' constructor. */
|
||||
if (!(flag & HA_STATUS_NO_LOCK)) {
|
||||
|
||||
See Bug#38185. */
|
||||
if (flag & HA_STATUS_NO_LOCK) {
|
||||
/* We do not update delete_length if no
|
||||
locking is requested so the "old" value can
|
||||
remain. delete_length is initialized to 0 in
|
||||
the ha_statistics' constructor. */
|
||||
} else if (UNIV_UNLIKELY
|
||||
(srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE)) {
|
||||
/* Avoid accessing the tablespace if
|
||||
innodb_crash_recovery is set to a high value. */
|
||||
stats.delete_length = 0;
|
||||
} else {
|
||||
/* lock the data dictionary to avoid races with
|
||||
ibd_file_missing and tablespace_discarded */
|
||||
row_mysql_lock_data_dictionary(prebuilt->trx);
|
||||
@ -7677,6 +7670,7 @@ ha_innobase::info(
|
||||
}
|
||||
|
||||
if (flag & HA_STATUS_CONST) {
|
||||
ulong i;
|
||||
/* Verify the number of index in InnoDB and MySQL
|
||||
matches up. If prebuilt->clust_index_was_generated
|
||||
holds, InnoDB defines GEN_CLUST_INDEX internally */
|
||||
@ -7693,6 +7687,7 @@ ha_innobase::info(
|
||||
}
|
||||
|
||||
for (i = 0; i < table->s->keys; i++) {
|
||||
ulong j;
|
||||
/* We could get index quickly through internal
|
||||
index mapping with the index translation table.
|
||||
The identity of index (match up index name with
|
||||
@ -7758,6 +7753,11 @@ ha_innobase::info(
|
||||
}
|
||||
}
|
||||
|
||||
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
|
||||
|
||||
goto func_exit;
|
||||
}
|
||||
|
||||
if (flag & HA_STATUS_ERRKEY) {
|
||||
const dict_index_t* err_index;
|
||||
|
||||
@ -7778,6 +7778,7 @@ ha_innobase::info(
|
||||
stats.auto_increment_value = innobase_peek_autoinc();
|
||||
}
|
||||
|
||||
func_exit:
|
||||
prebuilt->trx->op_info = (char*)"";
|
||||
|
||||
DBUG_RETURN(0);
|
||||
@ -9235,7 +9236,8 @@ ha_innobase::store_lock(
|
||||
&& (sql_command == SQLCOM_INSERT_SELECT
|
||||
|| sql_command == SQLCOM_REPLACE_SELECT
|
||||
|| sql_command == SQLCOM_UPDATE
|
||||
|| sql_command == SQLCOM_CREATE_TABLE)) {
|
||||
|| sql_command == SQLCOM_CREATE_TABLE
|
||||
|| sql_command == SQLCOM_SET_OPTION)) {
|
||||
|
||||
/* If we either have innobase_locks_unsafe_for_binlog
|
||||
option set or this session is using READ COMMITTED
|
||||
@ -9243,9 +9245,9 @@ ha_innobase::store_lock(
|
||||
is not set to serializable and MySQL is doing
|
||||
INSERT INTO...SELECT or REPLACE INTO...SELECT
|
||||
or UPDATE ... = (SELECT ...) or CREATE ...
|
||||
SELECT... without FOR UPDATE or IN SHARE
|
||||
MODE in select, then we use consistent read
|
||||
for select. */
|
||||
SELECT... or SET ... = (SELECT ...) without
|
||||
FOR UPDATE or IN SHARE MODE in select,
|
||||
then we use consistent read for select. */
|
||||
|
||||
prebuilt->select_lock_type = LOCK_NONE;
|
||||
prebuilt->stored_select_lock_type = LOCK_NONE;
|
||||
|
@ -180,6 +180,7 @@ btr_search_update_hash_on_delete(
|
||||
btr_cur_t* cursor);/*!< in: cursor which was positioned on the
|
||||
record to delete using btr_cur_search_...,
|
||||
the record is not yet deleted */
|
||||
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
||||
/********************************************************************//**
|
||||
Validates the search system.
|
||||
@return TRUE if ok */
|
||||
@ -187,6 +188,9 @@ UNIV_INTERN
|
||||
ibool
|
||||
btr_search_validate(void);
|
||||
/*======================*/
|
||||
#else
|
||||
# define btr_search_validate() TRUE
|
||||
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
|
||||
|
||||
/** Flag: has the search system been enabled?
|
||||
Protected by btr_search_latch and btr_search_enabled_mutex. */
|
||||
|
@ -186,6 +186,7 @@ ha_remove_all_nodes_to_page(
|
||||
hash_table_t* table, /*!< in: hash table */
|
||||
ulint fold, /*!< in: fold value */
|
||||
const page_t* page); /*!< in: buffer page */
|
||||
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
||||
/*************************************************************//**
|
||||
Validates a given range of the cells in hash table.
|
||||
@return TRUE if ok */
|
||||
@ -196,6 +197,7 @@ ha_validate(
|
||||
hash_table_t* table, /*!< in: hash table */
|
||||
ulint start_index, /*!< in: start index */
|
||||
ulint end_index); /*!< in: end index */
|
||||
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
|
||||
/*************************************************************//**
|
||||
Prints info of a hash table. */
|
||||
UNIV_INTERN
|
||||
|
@ -46,7 +46,7 @@ Created 1/20/1994 Heikki Tuuri
|
||||
|
||||
#define INNODB_VERSION_MAJOR 1
|
||||
#define INNODB_VERSION_MINOR 0
|
||||
#define INNODB_VERSION_BUGFIX 11
|
||||
#define INNODB_VERSION_BUGFIX 12
|
||||
|
||||
/* The following is the InnoDB version as shown in
|
||||
SELECT plugin_version FROM information_schema.plugins;
|
||||
|
@ -113,7 +113,8 @@ ut_test_malloc(
|
||||
ulint n); /*!< in: try to allocate this many bytes */
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
/**********************************************************************//**
|
||||
Frees a memory block allocated with ut_malloc. */
|
||||
Frees a memory block allocated with ut_malloc. Freeing a NULL pointer is
|
||||
a nop. */
|
||||
UNIV_INTERN
|
||||
void
|
||||
ut_free(
|
||||
|
@ -2690,7 +2690,6 @@ row_sel_store_mysql_rec(
|
||||
ut_ad(prebuilt->mysql_template);
|
||||
ut_ad(prebuilt->default_rec);
|
||||
ut_ad(rec_offs_validate(rec, NULL, offsets));
|
||||
ut_ad(!rec_get_deleted_flag(rec, rec_offs_comp(offsets)));
|
||||
|
||||
if (UNIV_LIKELY_NULL(prebuilt->blob_heap)) {
|
||||
mem_heap_free(prebuilt->blob_heap);
|
||||
@ -3611,6 +3610,7 @@ row_search_for_mysql(
|
||||
row_sel_try_search_shortcut_for_mysql().
|
||||
The latch will not be released until
|
||||
mtr_commit(&mtr). */
|
||||
ut_ad(!rec_get_deleted_flag(rec, comp));
|
||||
|
||||
if (!row_sel_store_mysql_rec(buf, prebuilt,
|
||||
rec, offsets)) {
|
||||
@ -4238,7 +4238,7 @@ no_gap_lock:
|
||||
|
||||
rec = old_vers;
|
||||
}
|
||||
} else if (!lock_sec_rec_cons_read_sees(rec, trx->read_view)) {
|
||||
} else {
|
||||
/* We are looking into a non-clustered index,
|
||||
and to get the right version of the record we
|
||||
have to look also into the clustered index: this
|
||||
@ -4246,8 +4246,12 @@ no_gap_lock:
|
||||
information via the clustered index record. */
|
||||
|
||||
ut_ad(index != clust_index);
|
||||
ut_ad(!dict_index_is_clust(index));
|
||||
|
||||
goto requires_clust_rec;
|
||||
if (!lock_sec_rec_cons_read_sees(
|
||||
rec, trx->read_view)) {
|
||||
goto requires_clust_rec;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -4370,8 +4374,13 @@ requires_clust_rec:
|
||||
ULINT_UNDEFINED, &heap);
|
||||
result_rec = rec;
|
||||
}
|
||||
|
||||
/* result_rec can legitimately be delete-marked
|
||||
now that it has been established that it points to a
|
||||
clustered index record that exists in the read view. */
|
||||
} else {
|
||||
result_rec = rec;
|
||||
ut_ad(!rec_get_deleted_flag(rec, comp));
|
||||
}
|
||||
|
||||
/* We found a qualifying record 'result_rec'. At this point,
|
||||
|
@ -1938,7 +1938,8 @@ trx_undo_update_cleanup(
|
||||
|
||||
UT_LIST_ADD_FIRST(undo_list, rseg->update_undo_cached, undo);
|
||||
} else {
|
||||
ut_ad(undo->state == TRX_UNDO_TO_PURGE);
|
||||
ut_ad(undo->state == TRX_UNDO_TO_PURGE
|
||||
|| undo->state == TRX_UNDO_TO_FREE);
|
||||
|
||||
trx_undo_mem_free(undo);
|
||||
}
|
||||
|
@ -290,7 +290,8 @@ ut_test_malloc(
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
|
||||
/**********************************************************************//**
|
||||
Frees a memory block allocated with ut_malloc. */
|
||||
Frees a memory block allocated with ut_malloc. Freeing a NULL pointer is
|
||||
a nop. */
|
||||
UNIV_INTERN
|
||||
void
|
||||
ut_free(
|
||||
@ -300,7 +301,9 @@ ut_free(
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
ut_mem_block_t* block;
|
||||
|
||||
if (UNIV_LIKELY(srv_use_sys_malloc)) {
|
||||
if (ptr == NULL) {
|
||||
return;
|
||||
} else if (UNIV_LIKELY(srv_use_sys_malloc)) {
|
||||
free(ptr);
|
||||
return;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user