merge
This commit is contained in:
commit
cfa413bf4e
@ -3005,6 +3005,44 @@ EXECUTE stmt;
|
||||
1
|
||||
DEALLOCATE PREPARE stmt;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# Bug#54494 crash with explain extended and prepared statements
|
||||
#
|
||||
CREATE TABLE t1(a INT);
|
||||
INSERT INTO t1 VALUES (1),(2);
|
||||
PREPARE stmt FROM 'EXPLAIN EXTENDED SELECT 1 FROM t1 RIGHT JOIN t1 t2 ON 1';
|
||||
EXECUTE stmt;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00
|
||||
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
|
||||
Warnings:
|
||||
Note 1003 select 1 AS `1` from `test`.`t1` `t2` left join `test`.`t1` on(1) where 1
|
||||
EXECUTE stmt;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00
|
||||
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
|
||||
Warnings:
|
||||
Note 1003 select 1 AS `1` from `test`.`t1` `t2` left join `test`.`t1` on(1) where 1
|
||||
DEALLOCATE PREPARE stmt;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# Bug#54488 crash when using explain and prepared statements with subqueries
|
||||
#
|
||||
CREATE TABLE t1(f1 INT);
|
||||
INSERT INTO t1 VALUES (1),(1);
|
||||
PREPARE stmt FROM 'EXPLAIN SELECT 1 FROM t1 WHERE (SELECT (SELECT 1 FROM t1 GROUP BY f1))';
|
||||
EXECUTE stmt;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY t1 ALL NULL NULL NULL NULL 2
|
||||
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL No tables used
|
||||
3 SUBQUERY t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort
|
||||
EXECUTE stmt;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY t1 ALL NULL NULL NULL NULL 2
|
||||
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL No tables used
|
||||
3 SUBQUERY t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort
|
||||
DEALLOCATE PREPARE stmt;
|
||||
DROP TABLE t1;
|
||||
|
||||
End of 5.1 tests.
|
||||
|
||||
|
10
mysql-test/suite/innodb/r/innodb_bug57255.result
Normal file
10
mysql-test/suite/innodb/r/innodb_bug57255.result
Normal file
@ -0,0 +1,10 @@
|
||||
create table A(id int not null primary key) engine=innodb;
|
||||
create table B(id int not null auto_increment primary key, f1 int not null, foreign key(f1) references A(id) on delete cascade) engine=innodb;
|
||||
create table C(id int not null auto_increment primary key, f1 int not null, foreign key(f1) references B(id) on delete cascade) engine=innodb;
|
||||
insert into A values(1), (2);
|
||||
DELETE FROM A where id = 1;
|
||||
DELETE FROM C where f1 = 2;
|
||||
DELETE FROM A where id = 1;
|
||||
DROP TABLE C;
|
||||
DROP TABLE B;
|
||||
DROP TABLE A;
|
36
mysql-test/suite/innodb/t/innodb_bug57255.test
Normal file
36
mysql-test/suite/innodb/t/innodb_bug57255.test
Normal file
@ -0,0 +1,36 @@
|
||||
# Test Bug #57255. Cascade deletes that affect different rows should not
|
||||
# result in DB_FOREIGN_EXCEED_MAX_CASCADE error
|
||||
|
||||
--source include/have_innodb.inc
|
||||
|
||||
create table A(id int not null primary key) engine=innodb;
|
||||
|
||||
create table B(id int not null auto_increment primary key, f1 int not null, foreign key(f1) references A(id) on delete cascade) engine=innodb;
|
||||
|
||||
create table C(id int not null auto_increment primary key, f1 int not null, foreign key(f1) references B(id) on delete cascade) engine=innodb;
|
||||
|
||||
insert into A values(1), (2);
|
||||
|
||||
--disable_query_log
|
||||
let $i=257;
|
||||
while ($i)
|
||||
{
|
||||
insert into B(f1) values(1);
|
||||
dec $i;
|
||||
}
|
||||
let $i=486;
|
||||
while ($i)
|
||||
{
|
||||
insert into C(f1) values(2);
|
||||
dec $i;
|
||||
}
|
||||
--enable_query_log
|
||||
|
||||
# Following Deletes should not report error
|
||||
DELETE FROM A where id = 1;
|
||||
DELETE FROM C where f1 = 2;
|
||||
DELETE FROM A where id = 1;
|
||||
|
||||
DROP TABLE C;
|
||||
DROP TABLE B;
|
||||
DROP TABLE A;
|
@ -195,6 +195,6 @@ show create table THREADS;
|
||||
Table Create Table
|
||||
THREADS CREATE TABLE `THREADS` (
|
||||
`THREAD_ID` int(11) NOT NULL,
|
||||
`ID` int(11) NOT NULL,
|
||||
`NAME` varchar(64) NOT NULL
|
||||
`PROCESSLIST_ID` int(11) DEFAULT NULL,
|
||||
`NAME` varchar(128) NOT NULL
|
||||
) ENGINE=PERFORMANCE_SCHEMA DEFAULT CHARSET=utf8
|
||||
|
@ -3079,7 +3079,27 @@ EXECUTE stmt;
|
||||
DEALLOCATE PREPARE stmt;
|
||||
DROP TABLE t1;
|
||||
|
||||
###########################################################################
|
||||
--echo #
|
||||
--echo # Bug#54494 crash with explain extended and prepared statements
|
||||
--echo #
|
||||
CREATE TABLE t1(a INT);
|
||||
INSERT INTO t1 VALUES (1),(2);
|
||||
PREPARE stmt FROM 'EXPLAIN EXTENDED SELECT 1 FROM t1 RIGHT JOIN t1 t2 ON 1';
|
||||
EXECUTE stmt;
|
||||
EXECUTE stmt;
|
||||
DEALLOCATE PREPARE stmt;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # Bug#54488 crash when using explain and prepared statements with subqueries
|
||||
--echo #
|
||||
CREATE TABLE t1(f1 INT);
|
||||
INSERT INTO t1 VALUES (1),(1);
|
||||
PREPARE stmt FROM 'EXPLAIN SELECT 1 FROM t1 WHERE (SELECT (SELECT 1 FROM t1 GROUP BY f1))';
|
||||
EXECUTE stmt;
|
||||
EXECUTE stmt;
|
||||
DEALLOCATE PREPARE stmt;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo
|
||||
--echo End of 5.1 tests.
|
||||
|
@ -935,16 +935,22 @@ private:
|
||||
/* lock already taken */
|
||||
if (auto_increment_safe_stmt_log_lock)
|
||||
return;
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
DBUG_ASSERT(table_share->ha_part_data && !auto_increment_lock);
|
||||
#endif
|
||||
if(table_share->tmp_table == NO_TMP_TABLE)
|
||||
{
|
||||
auto_increment_lock= TRUE;
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
mysql_mutex_lock(&table_share->ha_part_data->LOCK_auto_inc);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
virtual void unlock_auto_increment()
|
||||
{
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
DBUG_ASSERT(table_share->ha_part_data);
|
||||
#endif
|
||||
/*
|
||||
If auto_increment_safe_stmt_log_lock is true, we have to keep the lock.
|
||||
It will be set to false and thus unlocked at the end of the statement by
|
||||
@ -952,19 +958,25 @@ private:
|
||||
*/
|
||||
if(auto_increment_lock && !auto_increment_safe_stmt_log_lock)
|
||||
{
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
mysql_mutex_unlock(&table_share->ha_part_data->LOCK_auto_inc);
|
||||
#endif
|
||||
auto_increment_lock= FALSE;
|
||||
}
|
||||
}
|
||||
virtual void set_auto_increment_if_higher(Field *field)
|
||||
{
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
ulonglong nr= (((Field_num*) field)->unsigned_flag ||
|
||||
field->val_int() > 0) ? field->val_int() : 0;
|
||||
#endif
|
||||
lock_auto_increment();
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
DBUG_ASSERT(table_share->ha_part_data->auto_inc_initialized == TRUE);
|
||||
/* must check when the mutex is taken */
|
||||
if (nr >= table_share->ha_part_data->next_auto_inc_val)
|
||||
table_share->ha_part_data->next_auto_inc_val= nr + 1;
|
||||
#endif
|
||||
unlock_auto_increment();
|
||||
}
|
||||
|
||||
|
@ -1911,18 +1911,22 @@ int subselect_single_select_engine::exec()
|
||||
}
|
||||
if (!select_lex->uncacheable && thd->lex->describe &&
|
||||
!(join->select_options & SELECT_DESCRIBE) &&
|
||||
join->need_tmp && item->const_item())
|
||||
join->need_tmp)
|
||||
{
|
||||
/*
|
||||
Force join->join_tmp creation, because this subquery will be replaced
|
||||
by a simple select from the materialization temp table by optimize()
|
||||
called by EXPLAIN and we need to preserve the initial query structure
|
||||
so we can display it.
|
||||
*/
|
||||
select_lex->uncacheable|= UNCACHEABLE_EXPLAIN;
|
||||
select_lex->master_unit()->uncacheable|= UNCACHEABLE_EXPLAIN;
|
||||
if (join->init_save_join_tab())
|
||||
DBUG_RETURN(1); /* purecov: inspected */
|
||||
item->update_used_tables();
|
||||
if (item->const_item())
|
||||
{
|
||||
/*
|
||||
Force join->join_tmp creation, because this subquery will be replaced
|
||||
by a simple select from the materialization temp table by optimize()
|
||||
called by EXPLAIN and we need to preserve the initial query structure
|
||||
so we can display it.
|
||||
*/
|
||||
select_lex->uncacheable|= UNCACHEABLE_EXPLAIN;
|
||||
select_lex->master_unit()->uncacheable|= UNCACHEABLE_EXPLAIN;
|
||||
if (join->init_save_join_tab())
|
||||
DBUG_RETURN(1); /* purecov: inspected */
|
||||
}
|
||||
}
|
||||
if (item->engine_changed)
|
||||
{
|
||||
|
@ -166,12 +166,13 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info,
|
||||
uint min_len, uint max_len,
|
||||
uint flags,
|
||||
PARTITION_ITERATOR *part_iter);
|
||||
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
static int cmp_rec_and_tuple(part_column_list_val *val, uint32 nvals_in_rec);
|
||||
static int cmp_rec_and_tuple_prune(part_column_list_val *val,
|
||||
uint32 n_vals_in_rec,
|
||||
bool tail_is_min);
|
||||
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
/*
|
||||
Convert constants in VALUES definition to the character set the
|
||||
corresponding field uses.
|
||||
|
@ -2420,11 +2420,15 @@ void reinit_stmt_before_use(THD *thd, LEX *lex)
|
||||
sl->where= sl->prep_where->copy_andor_structure(thd);
|
||||
sl->where->cleanup();
|
||||
}
|
||||
else
|
||||
sl->where= NULL;
|
||||
if (sl->prep_having)
|
||||
{
|
||||
sl->having= sl->prep_having->copy_andor_structure(thd);
|
||||
sl->having->cleanup();
|
||||
}
|
||||
else
|
||||
sl->having= NULL;
|
||||
DBUG_ASSERT(sl->join == 0);
|
||||
ORDER *order;
|
||||
/* Fix GROUP list */
|
||||
|
@ -98,11 +98,13 @@ static TYPELIB grant_types = { sizeof(grant_names)/sizeof(char **),
|
||||
static void store_key_options(THD *thd, String *packet, TABLE *table,
|
||||
KEY *key_info);
|
||||
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
static void get_cs_converted_string_value(THD *thd,
|
||||
String *input_str,
|
||||
String *output_str,
|
||||
CHARSET_INFO *cs,
|
||||
bool use_hex);
|
||||
#endif
|
||||
|
||||
static void
|
||||
append_algorithm(TABLE_LIST *table, String *buff);
|
||||
@ -7850,6 +7852,7 @@ void initialize_information_schema_acl()
|
||||
&is_internal_schema_access);
|
||||
}
|
||||
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
/*
|
||||
Convert a string in character set in column character set format
|
||||
to utf8 character set if possible, the utf8 character set string
|
||||
@ -7941,3 +7944,4 @@ static void get_cs_converted_string_value(THD *thd,
|
||||
}
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
@ -1718,17 +1718,28 @@ err_exit:
|
||||
|
||||
err = dict_load_indexes(table, heap);
|
||||
|
||||
/* Initialize table foreign_child value. Its value could be
|
||||
changed when dict_load_foreigns() is called below */
|
||||
table->fk_max_recusive_level = 0;
|
||||
|
||||
/* If the force recovery flag is set, we open the table irrespective
|
||||
of the error condition, since the user may want to dump data from the
|
||||
clustered index. However we load the foreign key information only if
|
||||
all indexes were loaded. */
|
||||
if (!cached) {
|
||||
} else if (err == DB_SUCCESS) {
|
||||
err = dict_load_foreigns(table->name, TRUE);
|
||||
err = dict_load_foreigns(table->name, TRUE, TRUE);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
dict_table_remove_from_cache(table);
|
||||
table = NULL;
|
||||
}
|
||||
} else if (!srv_force_recovery) {
|
||||
dict_table_remove_from_cache(table);
|
||||
table = NULL;
|
||||
}
|
||||
|
||||
table->fk_max_recusive_level = 0;
|
||||
#if 0
|
||||
if (err != DB_SUCCESS && table != NULL) {
|
||||
|
||||
@ -1952,8 +1963,12 @@ dict_load_foreign(
|
||||
/*==============*/
|
||||
const char* id, /*!< in: foreign constraint id as a
|
||||
null-terminated string */
|
||||
ibool check_charsets)
|
||||
ibool check_charsets,
|
||||
/*!< in: TRUE=check charset compatibility */
|
||||
ibool check_recursive)
|
||||
/*!< in: Whether to record the foreign table
|
||||
parent count to avoid unlimited recursive
|
||||
load of chained foreign tables */
|
||||
{
|
||||
dict_foreign_t* foreign;
|
||||
dict_table_t* sys_foreign;
|
||||
@ -1967,6 +1982,8 @@ dict_load_foreign(
|
||||
ulint len;
|
||||
ulint n_fields_and_type;
|
||||
mtr_t mtr;
|
||||
dict_table_t* for_table;
|
||||
dict_table_t* ref_table;
|
||||
|
||||
ut_ad(mutex_own(&(dict_sys->mutex)));
|
||||
|
||||
@ -2051,11 +2068,54 @@ dict_load_foreign(
|
||||
|
||||
dict_load_foreign_cols(id, foreign);
|
||||
|
||||
/* If the foreign table is not yet in the dictionary cache, we
|
||||
have to load it so that we are able to make type comparisons
|
||||
in the next function call. */
|
||||
ref_table = dict_table_check_if_in_cache_low(
|
||||
foreign->referenced_table_name);
|
||||
|
||||
dict_table_get_low(foreign->foreign_table_name);
|
||||
/* We could possibly wind up in a deep recursive calls if
|
||||
we call dict_table_get_low() again here if there
|
||||
is a chain of tables concatenated together with
|
||||
foreign constraints. In such case, each table is
|
||||
both a parent and child of the other tables, and
|
||||
act as a "link" in such table chains.
|
||||
To avoid such scenario, we would need to check the
|
||||
number of ancesters the current table has. If that
|
||||
exceeds DICT_FK_MAX_CHAIN_LEN, we will stop loading
|
||||
the child table.
|
||||
Foreign constraints are loaded in a Breath First fashion,
|
||||
that is, the index on FOR_NAME is scanned first, and then
|
||||
index on REF_NAME. So foreign constrains in which
|
||||
current table is a child (foreign table) are loaded first,
|
||||
and then those constraints where current table is a
|
||||
parent (referenced) table.
|
||||
Thus we could check the parent (ref_table) table's
|
||||
reference count (fk_max_recusive_level) to know how deep the
|
||||
recursive call is. If the parent table (ref_table) is already
|
||||
loaded, and its fk_max_recusive_level is larger than
|
||||
DICT_FK_MAX_CHAIN_LEN, we will stop the recursive loading
|
||||
by skipping loading the child table. It will not affect foreign
|
||||
constraint check for DMLs since child table will be loaded
|
||||
at that time for the constraint check. */
|
||||
if (!ref_table
|
||||
|| ref_table->fk_max_recusive_level < DICT_FK_MAX_RECURSIVE_LOAD) {
|
||||
|
||||
/* If the foreign table is not yet in the dictionary cache, we
|
||||
have to load it so that we are able to make type comparisons
|
||||
in the next function call. */
|
||||
|
||||
for_table = dict_table_get_low(foreign->foreign_table_name);
|
||||
|
||||
if (for_table && ref_table && check_recursive) {
|
||||
/* This is to record the longest chain of ancesters
|
||||
this table has, if the parent has more ancesters
|
||||
than this table has, record it after add 1 (for this
|
||||
parent */
|
||||
if (ref_table->fk_max_recusive_level
|
||||
>= for_table->fk_max_recusive_level) {
|
||||
for_table->fk_max_recusive_level =
|
||||
ref_table->fk_max_recusive_level + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Note that there may already be a foreign constraint object in
|
||||
the dictionary cache for this constraint: then the following
|
||||
@ -2080,6 +2140,8 @@ ulint
|
||||
dict_load_foreigns(
|
||||
/*===============*/
|
||||
const char* table_name, /*!< in: table name */
|
||||
ibool check_recursive,/*!< in: Whether to check recursive
|
||||
load of tables chained by FK */
|
||||
ibool check_charsets) /*!< in: TRUE=check charset
|
||||
compatibility */
|
||||
{
|
||||
@ -2181,7 +2243,7 @@ loop:
|
||||
|
||||
/* Load the foreign constraint definition to the dictionary cache */
|
||||
|
||||
err = dict_load_foreign(id, check_charsets);
|
||||
err = dict_load_foreign(id, check_charsets, check_recursive);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
btr_pcur_close(&pcur);
|
||||
@ -2209,6 +2271,11 @@ load_next_index:
|
||||
|
||||
mtr_start(&mtr);
|
||||
|
||||
/* Switch to scan index on REF_NAME, fk_max_recusive_level
|
||||
already been updated when scanning FOR_NAME index, no need to
|
||||
update again */
|
||||
check_recursive = FALSE;
|
||||
|
||||
goto start_load;
|
||||
}
|
||||
|
||||
|
@ -900,6 +900,19 @@ convert_error_code_to_mysql(
|
||||
case DB_INTERRUPTED:
|
||||
my_error(ER_QUERY_INTERRUPTED, MYF(0));
|
||||
/* fall through */
|
||||
|
||||
case DB_FOREIGN_EXCEED_MAX_CASCADE:
|
||||
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
||||
HA_ERR_ROW_IS_REFERENCED,
|
||||
"InnoDB: Cannot delete/update "
|
||||
"rows with cascading foreign key "
|
||||
"constraints that exceed max "
|
||||
"depth of %d. Please "
|
||||
"drop extra constraints and try "
|
||||
"again", DICT_FK_MAX_RECURSIVE_LOAD);
|
||||
|
||||
/* fall through */
|
||||
|
||||
case DB_ERROR:
|
||||
default:
|
||||
return(-1); /* unspecified error */
|
||||
|
@ -101,6 +101,9 @@ enum db_err {
|
||||
requested but this storage does not
|
||||
exist itself or the stats for a given
|
||||
table do not exist */
|
||||
DB_FOREIGN_EXCEED_MAX_CASCADE, /* Foreign key constraint related
|
||||
cascading delete/update exceeds
|
||||
maximum allowed depth */
|
||||
|
||||
/* The following are partial failure codes */
|
||||
DB_FAIL = 1000,
|
||||
|
@ -200,6 +200,8 @@ ulint
|
||||
dict_load_foreigns(
|
||||
/*===============*/
|
||||
const char* table_name, /*!< in: table name */
|
||||
ibool check_recursive,/*!< in: Whether to check recursive
|
||||
load of tables chained by FK */
|
||||
ibool check_charsets);/*!< in: TRUE=check charsets
|
||||
compatibility */
|
||||
/********************************************************************//**
|
||||
|
@ -116,6 +116,21 @@ ROW_FORMAT=REDUNDANT. */
|
||||
in table->flags. */
|
||||
/* @} */
|
||||
|
||||
/** Tables could be chained together with Foreign key constraint. When
|
||||
first load the parent table, we would load all of its descedents.
|
||||
This could result in rescursive calls and out of stack error eventually.
|
||||
DICT_FK_MAX_RECURSIVE_LOAD defines the maximum number of recursive loads,
|
||||
when exceeded, the child table will not be loaded. It will be loaded when
|
||||
the foreign constraint check needs to be run. */
|
||||
#define DICT_FK_MAX_RECURSIVE_LOAD 255
|
||||
|
||||
/** Similarly, when tables are chained together with foreign key constraints
|
||||
with on cascading delete/update clause, delete from parent table could
|
||||
result in recursive cascading calls. This defines the maximum number of
|
||||
such cascading deletes/updates allowed. When exceeded, the delete from
|
||||
parent table will fail, and user has to drop excessive foreign constraint
|
||||
before proceeds. */
|
||||
#define FK_MAX_CASCADE_DEL 255
|
||||
|
||||
/**********************************************************************//**
|
||||
Creates a table memory object.
|
||||
@ -469,6 +484,12 @@ struct dict_table_struct{
|
||||
NOT allowed until this count gets to zero;
|
||||
MySQL does NOT itself check the number of
|
||||
open handles at drop */
|
||||
unsigned fk_max_recusive_level:8;
|
||||
/*!< maximum recursive level we support when
|
||||
loading tables chained together with FK
|
||||
constraints. If exceeds this level, we will
|
||||
stop loading child table into memory along with
|
||||
its parent table */
|
||||
ulint n_foreign_key_checks_running;
|
||||
/*!< count of how many foreign key check
|
||||
operations are currently being performed
|
||||
|
@ -381,6 +381,9 @@ struct que_thr_struct{
|
||||
thus far */
|
||||
ulint lock_state; /*!< lock state of thread (table or
|
||||
row) */
|
||||
ulint fk_cascade_depth; /*!< maximum cascading call depth
|
||||
supported for foreign key constraint
|
||||
related delete/updates */
|
||||
};
|
||||
|
||||
#define QUE_THR_MAGIC_N 8476583
|
||||
|
@ -2418,7 +2418,7 @@ row_merge_rename_tables(
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
err = dict_load_foreigns(old_name, TRUE);
|
||||
err = dict_load_foreigns(old_name, FALSE, TRUE);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
err_exit:
|
||||
|
@ -635,6 +635,13 @@ handle_new_error:
|
||||
"InnoDB: " REFMAN "forcing-recovery.html"
|
||||
" for help.\n", stderr);
|
||||
break;
|
||||
case DB_FOREIGN_EXCEED_MAX_CASCADE:
|
||||
fprintf(stderr, "InnoDB: Cannot delete/update rows with"
|
||||
" cascading foreign key constraints that exceed max"
|
||||
" depth of %lu\n"
|
||||
"Please drop excessive foreign constraints"
|
||||
" and try again\n", (ulong) DICT_FK_MAX_RECURSIVE_LOAD);
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "InnoDB: unknown error code %lu\n",
|
||||
(ulong) err);
|
||||
@ -1440,11 +1447,15 @@ row_update_for_mysql(
|
||||
run_again:
|
||||
thr->run_node = node;
|
||||
thr->prev_node = node;
|
||||
thr->fk_cascade_depth = 0;
|
||||
|
||||
row_upd_step(thr);
|
||||
|
||||
err = trx->error_state;
|
||||
|
||||
/* Reset fk_cascade_depth back to 0 */
|
||||
thr->fk_cascade_depth = 0;
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
que_thr_stop_for_mysql(thr);
|
||||
|
||||
@ -1640,12 +1651,27 @@ row_update_cascade_for_mysql(
|
||||
trx_t* trx;
|
||||
|
||||
trx = thr_get_trx(thr);
|
||||
|
||||
/* Increment fk_cascade_depth to record the recursive call depth on
|
||||
a single update/delete that affects multiple tables chained
|
||||
together with foreign key relations. */
|
||||
thr->fk_cascade_depth++;
|
||||
|
||||
if (thr->fk_cascade_depth > FK_MAX_CASCADE_DEL) {
|
||||
return (DB_FOREIGN_EXCEED_MAX_CASCADE);
|
||||
}
|
||||
run_again:
|
||||
thr->run_node = node;
|
||||
thr->prev_node = node;
|
||||
|
||||
row_upd_step(thr);
|
||||
|
||||
/* The recursive call for cascading update/delete happens
|
||||
in above row_upd_step(), reset the counter once we come
|
||||
out of the recursive call, so it does not accumulate for
|
||||
different row deletes */
|
||||
thr->fk_cascade_depth = 0;
|
||||
|
||||
err = trx->error_state;
|
||||
|
||||
/* Note that the cascade node is a subnode of another InnoDB
|
||||
@ -2120,7 +2146,7 @@ row_table_add_foreign_constraints(
|
||||
name, reject_fks);
|
||||
if (err == DB_SUCCESS) {
|
||||
/* Check that also referencing constraints are ok */
|
||||
err = dict_load_foreigns(name, TRUE);
|
||||
err = dict_load_foreigns(name, FALSE, TRUE);
|
||||
}
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
@ -3992,7 +4018,7 @@ end:
|
||||
an ALTER, not in a RENAME. */
|
||||
|
||||
err = dict_load_foreigns(
|
||||
new_name, !old_is_tmp || trx->check_foreigns);
|
||||
new_name, FALSE, !old_is_tmp || trx->check_foreigns);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
ut_print_timestamp(stderr);
|
||||
|
@ -693,6 +693,8 @@ ut_strerr(
|
||||
return("Lock structs have exhausted the buffer pool");
|
||||
case DB_FOREIGN_DUPLICATE_KEY:
|
||||
return("Foreign key activated with duplicate keys");
|
||||
case DB_FOREIGN_EXCEED_MAX_CASCADE:
|
||||
return("Foreign key cascade delete/update exceeds max depth");
|
||||
case DB_TOO_MANY_CONCURRENT_TRXS:
|
||||
return("Too many concurrent transactions");
|
||||
case DB_UNSUPPORTED:
|
||||
|
@ -974,11 +974,17 @@ echo "=====" >> $STATUS_HISTORY
|
||||
%attr(755, root, root) %{_libdir}/mysql/plugin/mypluglib.so
|
||||
%attr(755, root, root) %{_libdir}/mysql/plugin/semisync_master.so
|
||||
%attr(755, root, root) %{_libdir}/mysql/plugin/semisync_slave.so
|
||||
%attr(755, root, root) %{_libdir}/mysql/plugin/auth.so
|
||||
%attr(755, root, root) %{_libdir}/mysql/plugin/auth_socket.so
|
||||
%attr(755, root, root) %{_libdir}/mysql/plugin/auth_test_plugin.so
|
||||
%attr(755, root, root) %{_libdir}/mysql/plugin/debug/adt_null.so
|
||||
%attr(755, root, root) %{_libdir}/mysql/plugin/debug/libdaemon_example.so
|
||||
%attr(755, root, root) %{_libdir}/mysql/plugin/debug/mypluglib.so
|
||||
%attr(755, root, root) %{_libdir}/mysql/plugin/debug/semisync_master.so
|
||||
%attr(755, root, root) %{_libdir}/mysql/plugin/debug/semisync_slave.so
|
||||
%attr(755, root, root) %{_libdir}/mysql/plugin/debug/auth.so
|
||||
%attr(755, root, root) %{_libdir}/mysql/plugin/debug/auth_socket.so
|
||||
%attr(755, root, root) %{_libdir}/mysql/plugin/debug/auth_test_plugin.so
|
||||
|
||||
%if %{WITH_TCMALLOC}
|
||||
%attr(755, root, root) %{_libdir}/mysql/%{malloc_lib_target}
|
||||
@ -1075,6 +1081,10 @@ echo "=====" >> $STATUS_HISTORY
|
||||
# merging BK trees)
|
||||
##############################################################################
|
||||
%changelog
|
||||
* Wed Oct 6 2010 Georgi Kodinov <georgi.godinov@oracle.com>
|
||||
|
||||
- Added example external authentication (WL#1054) plugin binaries
|
||||
|
||||
* Wed Aug 11 2010 Joerg Bruehe <joerg.bruehe@oracle.com>
|
||||
|
||||
- With a recent spec file cleanup, names have changed: A "-community" part was dropped.
|
||||
|
Loading…
x
Reference in New Issue
Block a user