Merge bk-internal.mysql.com:/home/bk/mysql-5.1-runtime

into  jabberwock.site:/home/dlenev/mysql-5.1-bg18437


sql/ha_ndbcluster.cc:
  Auto merged
sql/ha_partition.cc:
  Auto merged
sql/log_event.cc:
  Auto merged
sql/mysql_priv.h:
  Auto merged
sql/sql_insert.cc:
  Auto merged
sql/sql_parse.cc:
  Auto merged
sql/sql_trigger.cc:
  Auto merged
sql/sql_update.cc:
  Auto merged
sql/table.cc:
  Auto merged
sql/sql_table.cc:
  Manual merge.
This commit is contained in:
unknown 2006-07-07 14:11:35 +04:00
commit 78942270ed
19 changed files with 538 additions and 76 deletions

View File

@ -156,7 +156,16 @@ enum ha_extra_function {
Mark the table as a log table. For some handlers (e.g. CSV) this results
in a special locking for the table.
*/
HA_EXTRA_MARK_AS_LOG_TABLE
HA_EXTRA_MARK_AS_LOG_TABLE,
/*
Informs handler that write_row() which tries to insert new row into the
table and encounters some already existing row with same primary/unique
key can replace old row with new row instead of reporting error (basically
it informs handler that we do REPLACE instead of simple INSERT).
Off by default.
*/
HA_EXTRA_WRITE_CAN_REPLACE,
HA_EXTRA_WRITE_CANNOT_REPLACE
};
/* The following is parameter to ha_panic() */

View File

@ -1602,6 +1602,34 @@ fld_cid fld_name fld_parentid fld_delt
DROP TABLE federated.t1;
DROP TABLE federated.bug_17377_table;
DROP TABLE federated.t1;
drop table if exists federated.t1;
create table federated.t1 (a int, b int, c int);
drop table if exists federated.t1;
drop table if exists federated.t2;
create table federated.t1 (a int, b int, c int) engine=federated connection='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1';
create trigger federated.t1_bi before insert on federated.t1 for each row set new.c= new.a * new.b;
create table federated.t2 (a int, b int);
insert into federated.t2 values (13, 17), (19, 23);
insert into federated.t1 (a, b) values (1, 2), (3, 5), (7, 11);
select * from federated.t1 order by a;
a b c
1 2 2
3 5 15
7 11 77
delete from federated.t1;
insert into federated.t1 (a, b) select * from federated.t2;
select * from federated.t1 order by a;
a b c
13 17 221
19 23 437
delete from federated.t1;
load data infile '../std_data_ln/loaddata5.dat' into table federated.t1 fields terminated by '' enclosed by '' ignore 1 lines (a, b);
select * from federated.t1 order by a;
a b c
3 4 12
5 6 30
drop tables federated.t1, federated.t2;
drop table federated.t1;
DROP TABLE IF EXISTS federated.t1;
DROP DATABASE IF EXISTS federated;
DROP TABLE IF EXISTS federated.t1;

View File

@ -31,6 +31,7 @@ SELECT * from t1 ORDER BY i;
i j k
3 1 42
17 2 NULL
DROP TABLE t1;
CREATE TABLE t2 (a INT(11) NOT NULL,
b INT(11) NOT NULL,
c INT(11) NOT NULL,
@ -52,3 +53,47 @@ SELECT * FROM t2 ORDER BY id;
a b c x y z id i
1 1 1 b b b 5 2
DROP TABLE t2;
drop table if exists t1;
create table t1 (pk int primary key, apk int unique, data int) engine=ndbcluster;
insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
replace into t1 (pk, apk) values (4, 1), (5, 2);
select * from t1 order by pk;
pk apk data
3 3 3
4 1 NULL
5 2 NULL
delete from t1;
insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
replace into t1 (pk, apk) values (1, 4), (2, 5);
select * from t1 order by pk;
pk apk data
1 4 NULL
2 5 NULL
3 3 3
delete from t1;
insert into t1 values (1, 1, 1), (4, 4, 4), (6, 6, 6);
load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (pk, apk);
select * from t1 order by pk;
pk apk data
1 1 1
3 4 NULL
5 6 NULL
delete from t1;
insert into t1 values (1, 1, 1), (3, 3, 3), (5, 5, 5);
load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (pk, apk);
select * from t1 order by pk;
pk apk data
1 1 1
3 4 NULL
5 6 NULL
delete from t1;
insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
replace into t1 (pk, apk) select 4, 1;
replace into t1 (pk, apk) select 2, 4;
select * from t1 order by pk;
pk apk data
2 4 NULL
3 3 3
4 1 NULL
drop table t1;
End of 5.0 tests.

View File

@ -0,0 +1,119 @@
drop table if exists t1, t2, t3;
create table t1 (id int primary key, a int not null, b decimal (63,30) default 0) engine=ndb;
create table t2 (op char(1), a int not null, b decimal (63,30));
create table t3 select 1 as i;
create trigger t1_bu before update on t1 for each row
begin
insert into t2 values ("u", old.a, old.b);
set new.b = old.b + 10;
end;//
create trigger t1_bd before delete on t1 for each row
begin
insert into t2 values ("d", old.a, old.b);
end;//
insert into t1 values (1, 1, 1.05), (2, 2, 2.05), (3, 3, 3.05), (4, 4, 4.05);
update t1 set a=5 where a != 3;
select * from t1 order by id;
id a b
1 5 11.050000000000000000000000000000
2 5 12.050000000000000000000000000000
3 3 3.050000000000000000000000000000
4 5 14.050000000000000000000000000000
select * from t2 order by op, a, b;
op a b
u 1 1.050000000000000000000000000000
u 2 2.050000000000000000000000000000
u 4 4.050000000000000000000000000000
delete from t2;
update t1, t3 set a=6 where a = 5;
select * from t1 order by id;
id a b
1 6 21.050000000000000000000000000000
2 6 22.050000000000000000000000000000
3 3 3.050000000000000000000000000000
4 6 24.050000000000000000000000000000
select * from t2 order by op, a, b;
op a b
u 5 11.050000000000000000000000000000
u 5 12.050000000000000000000000000000
u 5 14.050000000000000000000000000000
delete from t2;
delete from t1 where a != 3;
select * from t1 order by id;
id a b
3 3 3.050000000000000000000000000000
select * from t2 order by op, a, b;
op a b
d 6 21.050000000000000000000000000000
d 6 22.050000000000000000000000000000
d 6 24.050000000000000000000000000000
delete from t2;
insert into t1 values (1, 1, 1.05), (2, 2, 2.05), (4, 4, 4.05);
delete t1 from t1, t3 where a != 3;
select * from t1 order by id;
id a b
3 3 3.050000000000000000000000000000
select * from t2 order by op, a, b;
op a b
d 1 1.050000000000000000000000000000
d 2 2.050000000000000000000000000000
d 4 4.050000000000000000000000000000
delete from t2;
insert into t1 values (4, 4, 4.05);
insert into t1 (id, a) values (4, 1), (3, 1) on duplicate key update a= a + 1;
select * from t1 order by id;
id a b
3 4 13.050000000000000000000000000000
4 5 14.050000000000000000000000000000
select * from t2 order by op, a, b;
op a b
u 3 3.050000000000000000000000000000
u 4 4.050000000000000000000000000000
delete from t2;
delete from t3;
insert into t3 values (4), (3);
insert into t1 (id, a) (select i, 1 from t3) on duplicate key update a= a + 1;
select * from t1 order by id;
id a b
3 5 23.050000000000000000000000000000
4 6 24.050000000000000000000000000000
select * from t2 order by op, a, b;
op a b
u 4 13.050000000000000000000000000000
u 5 14.050000000000000000000000000000
delete from t2;
replace into t1 (id, a) values (4, 1), (3, 1);
select * from t1 order by id;
id a b
3 1 0.000000000000000000000000000000
4 1 0.000000000000000000000000000000
select * from t2 order by op, a, b;
op a b
d 5 23.050000000000000000000000000000
d 6 24.050000000000000000000000000000
delete from t1;
delete from t2;
insert into t1 values (3, 1, 1.05), (4, 1, 2.05);
replace into t1 (id, a) (select i, 2 from t3);
select * from t1 order by id;
id a b
3 2 0.000000000000000000000000000000
4 2 0.000000000000000000000000000000
select * from t2 order by op, a, b;
op a b
d 1 1.050000000000000000000000000000
d 1 2.050000000000000000000000000000
delete from t1;
delete from t2;
insert into t1 values (3, 1, 1.05), (5, 2, 2.05);
load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (id, a);
select * from t1 order by id;
id a b
3 4 0.000000000000000000000000000000
5 6 0.000000000000000000000000000000
select * from t2 order by op, a, b;
op a b
d 1 1.050000000000000000000000000000
d 2 2.050000000000000000000000000000
drop tables t1, t2, t3;
End of 5.0 tests

View File

@ -1363,4 +1363,46 @@ drop table federated.t1, federated.t2;
connection master;
--enable_parsing
#
# Additional test for bug#18437 "Wrong values inserted with a before
# update trigger on NDB table". SQL-layer didn't properly inform
# handler about fields which were read and set in triggers. In some
# cases this resulted in incorrect (garbage) values of OLD variables
# and lost changes to NEW variables.
# Since for federated engine only operation which is affected by wrong
# fields mark-up is handler::write_row() this file constains coverage
# for ON INSERT triggers only. Tests for other types of triggers reside
# in ndb_trigger.test.
#
--disable_warnings
drop table if exists federated.t1;
--enable_warnings
create table federated.t1 (a int, b int, c int);
connection master;
--disable_warnings
drop table if exists federated.t1;
drop table if exists federated.t2;
--enable_warnings
--replace_result $SLAVE_MYPORT SLAVE_PORT
eval create table federated.t1 (a int, b int, c int) engine=federated connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1';
create trigger federated.t1_bi before insert on federated.t1 for each row set new.c= new.a * new.b;
create table federated.t2 (a int, b int);
insert into federated.t2 values (13, 17), (19, 23);
# Each of three statements should correctly set values for all three fields
# insert
insert into federated.t1 (a, b) values (1, 2), (3, 5), (7, 11);
select * from federated.t1 order by a;
delete from federated.t1;
# insert ... select
insert into federated.t1 (a, b) select * from federated.t2;
select * from federated.t1 order by a;
delete from federated.t1;
# load
load data infile '../std_data_ln/loaddata5.dat' into table federated.t1 fields terminated by '' enclosed by '' ignore 1 lines (a, b);
select * from federated.t1 order by a;
drop tables federated.t1, federated.t2;
connection slave;
drop table federated.t1;
--source include/federated_cleanup.inc

View File

@ -39,6 +39,7 @@ INSERT INTO t1 VALUES (1,1,23),(2,2,24);
REPLACE INTO t1 (j,k) VALUES (1,42);
REPLACE INTO t1 (i,j) VALUES (17,2);
SELECT * from t1 ORDER BY i;
DROP TABLE t1;
# bug#19906
CREATE TABLE t2 (a INT(11) NOT NULL,
@ -64,4 +65,40 @@ SELECT * FROM t2 ORDER BY id;
DROP TABLE t2;
#
# Bug #20728 "REPLACE does not work correctly for NDB table with PK and
# unique index"
#
--disable_warnings
drop table if exists t1;
--enable_warnings
create table t1 (pk int primary key, apk int unique, data int) engine=ndbcluster;
# Test for plain replace which updates pk
insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
replace into t1 (pk, apk) values (4, 1), (5, 2);
select * from t1 order by pk;
delete from t1;
# Another test for plain replace which doesn't touch pk
insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
replace into t1 (pk, apk) values (1, 4), (2, 5);
select * from t1 order by pk;
delete from t1;
# Test for load data replace which updates pk
insert into t1 values (1, 1, 1), (4, 4, 4), (6, 6, 6);
load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (pk, apk);
select * from t1 order by pk;
delete from t1;
# Now test for load data replace which doesn't touch pk
insert into t1 values (1, 1, 1), (3, 3, 3), (5, 5, 5);
load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (pk, apk);
select * from t1 order by pk;
delete from t1;
# Finally test for both types of replace ... select
insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
replace into t1 (pk, apk) select 4, 1;
replace into t1 (pk, apk) select 2, 4;
select * from t1 order by pk;
# Clean-up
drop table t1;
--echo End of 5.0 tests.

View File

@ -0,0 +1,92 @@
# Tests which involve triggers and NDB storage engine
--source include/have_ndb.inc
--source include/not_embedded.inc
#
# Test for bug#18437 "Wrong values inserted with a before update
# trigger on NDB table". SQL-layer didn't properly inform handler
# about fields which were read and set in triggers. In some cases
# this resulted in incorrect (garbage) values of OLD variables and
# lost changes to NEW variables.
# You can find similar tests for ON INSERT triggers in federated.test
# since this engine so far is the only engine in MySQL which cares
# about field mark-up during handler::write_row() operation.
#
--disable_warnings
drop table if exists t1, t2, t3;
--enable_warnings
create table t1 (id int primary key, a int not null, b decimal (63,30) default 0) engine=ndb;
create table t2 (op char(1), a int not null, b decimal (63,30));
create table t3 select 1 as i;
delimiter //;
create trigger t1_bu before update on t1 for each row
begin
insert into t2 values ("u", old.a, old.b);
set new.b = old.b + 10;
end;//
create trigger t1_bd before delete on t1 for each row
begin
insert into t2 values ("d", old.a, old.b);
end;//
delimiter ;//
insert into t1 values (1, 1, 1.05), (2, 2, 2.05), (3, 3, 3.05), (4, 4, 4.05);
# Check that usual update works as it should
update t1 set a=5 where a != 3;
select * from t1 order by id;
select * from t2 order by op, a, b;
delete from t2;
# Check that everything works for multi-update
update t1, t3 set a=6 where a = 5;
select * from t1 order by id;
select * from t2 order by op, a, b;
delete from t2;
# Check for delete
delete from t1 where a != 3;
select * from t1 order by id;
select * from t2 order by op, a, b;
delete from t2;
# Check for multi-delete
insert into t1 values (1, 1, 1.05), (2, 2, 2.05), (4, 4, 4.05);
delete t1 from t1, t3 where a != 3;
select * from t1 order by id;
select * from t2 order by op, a, b;
delete from t2;
# Check for insert ... on duplicate key update
insert into t1 values (4, 4, 4.05);
insert into t1 (id, a) values (4, 1), (3, 1) on duplicate key update a= a + 1;
select * from t1 order by id;
select * from t2 order by op, a, b;
delete from t2;
# Check for insert ... select ... on duplicate key update
delete from t3;
insert into t3 values (4), (3);
insert into t1 (id, a) (select i, 1 from t3) on duplicate key update a= a + 1;
select * from t1 order by id;
select * from t2 order by op, a, b;
delete from t2;
# Check for replace
replace into t1 (id, a) values (4, 1), (3, 1);
select * from t1 order by id;
select * from t2 order by op, a, b;
delete from t1;
delete from t2;
# Check for replace ... select ...
insert into t1 values (3, 1, 1.05), (4, 1, 2.05);
replace into t1 (id, a) (select i, 2 from t3);
select * from t1 order by id;
select * from t2 order by op, a, b;
delete from t1;
delete from t2;
# Check for load data replace
insert into t1 values (3, 1, 1.05), (5, 2, 2.05);
load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (id, a);
select * from t1 order by id;
select * from t2 order by op, a, b;
drop tables t1, t2, t3;
--echo End of 5.0 tests

View File

@ -3663,20 +3663,11 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
switch (operation) {
case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/
DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY"));
if (current_thd->lex->sql_command == SQLCOM_REPLACE && !m_has_unique_index)
{
DBUG_PRINT("info", ("Turning ON use of write instead of insert"));
m_use_write= TRUE;
} else
{
DBUG_PRINT("info", ("Ignoring duplicate key"));
m_ignore_dup_key= TRUE;
}
DBUG_PRINT("info", ("Ignoring duplicate key"));
m_ignore_dup_key= TRUE;
break;
case HA_EXTRA_NO_IGNORE_DUP_KEY:
DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY"));
DBUG_PRINT("info", ("Turning OFF use of write instead of insert"));
m_use_write= FALSE;
m_ignore_dup_key= FALSE;
break;
case HA_EXTRA_IGNORE_NO_KEY:
@ -3689,6 +3680,19 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
DBUG_PRINT("info", ("Turning on AO_IgnoreError at Commit/NoCommit"));
m_ignore_no_key= FALSE;
break;
case HA_EXTRA_WRITE_CAN_REPLACE:
DBUG_PRINT("info", ("HA_EXTRA_WRITE_CAN_REPLACE"));
if (!m_has_unique_index)
{
DBUG_PRINT("info", ("Turning ON use of write instead of insert"));
m_use_write= TRUE;
}
break;
case HA_EXTRA_WRITE_CANNOT_REPLACE:
DBUG_PRINT("info", ("HA_EXTRA_WRITE_CANNOT_REPLACE"));
DBUG_PRINT("info", ("Turning OFF use of write instead of insert"));
m_use_write= FALSE;
break;
default:
break;
}

View File

@ -4666,6 +4666,27 @@ int ha_partition::extra(enum ha_extra_function operation)
*/
break;
}
case HA_EXTRA_WRITE_CAN_REPLACE:
case HA_EXTRA_WRITE_CANNOT_REPLACE:
{
/*
Informs handler that write_row() can replace rows which conflict
with row being inserted by PK/unique key without reporting error
to the SQL-layer.
This optimization is not safe for partitioned table in general case
since we may have to put new version of row into partition which is
different from partition in which old version resides (for example
when we partition by non-PK column or by some column which is not
part of unique key which were violated).
And since NDB which is the only engine at the moment that supports
this optimization handles partitioning on its own we simple disable
it here. (BTW for NDB this optimization is safe since it supports
only KEY partitioning and won't use this optimization for tables
which have additional unique constraints).
*/
break;
}
default:
{
/* Temporary crash to discover what is wrong */

View File

@ -5436,9 +5436,14 @@ void Item_insert_value::print(String *str)
void Item_trigger_field::setup_field(THD *thd, TABLE *table,
GRANT_INFO *table_grant_info)
{
/*
It is too early to mark fields used here, because before execution
of statement that will invoke trigger other statements may use same
TABLE object, so all such mark-up will be wiped out.
So instead we do it in Table_triggers_list::mark_fields_used()
method which is called during execution of these statements.
*/
enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
/* TODO: Think more about consequences of this step. */
thd->mark_used_columns= MARK_COLUMNS_NONE;
/*
Try to find field by its name and if it will be found

View File

@ -6138,6 +6138,7 @@ int Write_rows_log_event::do_before_row_operations(TABLE *table)
thd->lex->sql_command= SQLCOM_REPLACE;
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); // Needed for ndbcluster
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); // Needed for ndbcluster
table->file->extra(HA_EXTRA_IGNORE_NO_KEY); // Needed for ndbcluster
/*
TODO: the cluster team (Tomas?) says that it's better if the engine knows

View File

@ -872,9 +872,7 @@ bool mysql_alter_table(THD *thd, char *new_db, char *new_name,
TABLE_LIST *table_list,
List<create_field> &fields,
List<Key> &keys,
uint order_num, ORDER *order,
enum enum_duplicates handle_duplicates,
bool ignore,
uint order_num, ORDER *order, bool ignore,
ALTER_INFO *alter_info, bool do_send_ok);
bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool do_send_ok);
bool mysql_create_like_table(THD *thd, TABLE_LIST *table,

View File

@ -181,9 +181,6 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
}
}
}
if (table->found_next_number_field)
table->mark_auto_increment_column();
table->mark_columns_needed_for_insert();
// For the values we need select_priv
#ifndef NO_EMBEDDED_ACCESS_CHECKS
table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege);
@ -414,6 +411,9 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
thd->proc_info="update";
if (duplic != DUP_ERROR || ignore)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (duplic == DUP_REPLACE &&
(!table->triggers || !table->triggers->has_delete_triggers()))
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
/*
let's *try* to start bulk inserts. It won't necessary
start them as values_list.elements should be greater than
@ -442,6 +442,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
error= 1;
}
table->mark_columns_needed_for_insert();
if (table_list->prepare_where(thd, 0, TRUE) ||
table_list->prepare_check_option(thd))
error= 1;
@ -615,6 +617,9 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
thd->next_insert_id=0; // Reset this if wrongly used
if (duplic != DUP_ERROR || ignore)
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
if (duplic == DUP_REPLACE &&
(!table->triggers || !table->triggers->has_delete_triggers()))
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
/* Reset value of LAST_INSERT_ID if no rows where inserted */
if (!info.copied && thd->insert_id_used)
@ -1961,9 +1966,8 @@ bool delayed_insert::handle_inserts(void)
{
int error;
ulong max_rows;
bool using_ignore=0,
using_bin_log= mysql_bin_log.is_open();
bool using_ignore= 0, using_opt_replace= 0,
using_bin_log= mysql_bin_log.is_open();
delayed_row *row;
DBUG_ENTER("handle_inserts");
@ -2026,6 +2030,13 @@ bool delayed_insert::handle_inserts(void)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
using_ignore=1;
}
if (info.handle_duplicates == DUP_REPLACE &&
(!table->triggers ||
!table->triggers->has_delete_triggers()))
{
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
using_opt_replace= 1;
}
thd.clear_error(); // reset error for binlog
if (write_record(&thd, table, &info))
{
@ -2038,6 +2049,11 @@ bool delayed_insert::handle_inserts(void)
using_ignore=0;
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
}
if (using_opt_replace)
{
using_opt_replace= 0;
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
}
if (table->s->blob_fields)
free_delayed_insert_blobs(table);
thread_safe_sub(delayed_rows_in_use,1,&LOCK_delayed_status);
@ -2283,6 +2299,9 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
thd->cuted_fields=0;
if (info.ignore || info.handle_duplicates != DUP_ERROR)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (info.handle_duplicates == DUP_REPLACE &&
(!table->triggers || !table->triggers->has_delete_triggers()))
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
thd->no_trans_update= 0;
thd->abort_on_warning= (!info.ignore &&
(thd->variables.sql_mode &
@ -2292,6 +2311,10 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
check_that_all_fields_are_given_values(thd, table, table_list)) ||
table_list->prepare_where(thd, 0, TRUE) ||
table_list->prepare_check_option(thd));
if (!res)
table->mark_columns_needed_for_insert();
DBUG_RETURN(res);
}
@ -2482,6 +2505,7 @@ bool select_insert::send_eof()
error= (!thd->prelocked_mode) ? table->file->ha_end_bulk_insert():0;
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
if (info.copied || info.deleted || info.updated)
{
@ -2766,6 +2790,9 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
thd->cuted_fields=0;
if (info.ignore || info.handle_duplicates != DUP_ERROR)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (info.handle_duplicates == DUP_REPLACE &&
(!table->triggers || !table->triggers->has_delete_triggers()))
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
if (!thd->prelocked_mode)
table->file->ha_start_bulk_insert((ha_rows) 0);
thd->no_trans_update= 0;
@ -2773,8 +2800,10 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
(thd->variables.sql_mode &
(MODE_STRICT_TRANS_TABLES |
MODE_STRICT_ALL_TABLES)));
DBUG_RETURN(check_that_all_fields_are_given_values(thd, table,
table_list));
if (check_that_all_fields_are_given_values(thd, table, table_list))
DBUG_RETURN(1);
table->mark_columns_needed_for_insert();
DBUG_RETURN(0);
}
@ -2848,6 +2877,7 @@ bool select_create::send_eof()
else
{
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
VOID(pthread_mutex_lock(&LOCK_open));
mysql_unlock_tables(thd, thd->extra_lock);
if (!table->s->tmp_table)
@ -2873,6 +2903,7 @@ void select_create::abort()
if (table)
{
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
handlerton *table_type=table->s->db_type;
if (!table->s->tmp_table)
{

View File

@ -187,9 +187,6 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
table= table_list->table;
transactional_table= table->file->has_transactions();
if (table->found_next_number_field)
table->mark_auto_increment_column();
if (!fields_vars.elements)
{
Field **field;
@ -232,6 +229,8 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
DBUG_RETURN(TRUE);
}
table->mark_columns_needed_for_insert();
uint tot_length=0;
bool use_blobs= 0, use_vars= 0;
List_iterator_fast<Item> it(fields_vars);
@ -362,6 +361,10 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
if (ignore ||
handle_duplicates == DUP_REPLACE)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (handle_duplicates == DUP_REPLACE &&
(!table->triggers ||
!table->triggers->has_delete_triggers()))
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
if (!thd->prelocked_mode)
table->file->ha_start_bulk_insert((ha_rows) 0);
table->copy_blobs=1;
@ -386,6 +389,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
error= 1;
}
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
table->next_number_field=0;
}
ha_enable_transaction(thd, TRUE);

View File

@ -3131,8 +3131,7 @@ end_with_restore_list:
lex->key_list,
select_lex->order_list.elements,
(ORDER *) select_lex->order_list.first,
lex->duplicates, lex->ignore, &lex->alter_info,
1);
lex->ignore, &lex->alter_info, 1);
break;
}
#endif /*DONT_ALLOW_SHOW_COMMANDS*/
@ -7300,7 +7299,7 @@ bool mysql_create_index(THD *thd, TABLE_LIST *table_list, List<Key> &keys)
DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->table_name,
&create_info, table_list,
fields, keys, 0, (ORDER*)0,
DUP_ERROR, 0, &alter_info, 1));
0, &alter_info, 1));
}
@ -7318,7 +7317,7 @@ bool mysql_drop_index(THD *thd, TABLE_LIST *table_list, ALTER_INFO *alter_info)
DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->table_name,
&create_info, table_list,
fields, keys, 0, (ORDER*)0,
DUP_ERROR, 0, alter_info, 1));
0, alter_info, 1));
}

View File

@ -35,9 +35,7 @@ const char *primary_key_name="PRIMARY";
static bool check_if_keyname_exists(const char *name,KEY *start, KEY *end);
static char *make_unique_key_name(const char *field_name,KEY *start,KEY *end);
static int copy_data_between_tables(TABLE *from,TABLE *to,
List<create_field> &create,
enum enum_duplicates handle_duplicates,
bool ignore,
List<create_field> &create, bool ignore,
uint order_num, ORDER *order,
ha_rows *copied,ha_rows *deleted);
static bool prepare_blob_field(THD *thd, create_field *sql_field);
@ -4955,8 +4953,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
HA_CREATE_INFO *lex_create_info,
TABLE_LIST *table_list,
List<create_field> &fields, List<Key> &keys,
uint order_num, ORDER *order,
enum enum_duplicates handle_duplicates, bool ignore,
uint order_num, ORDER *order, bool ignore,
ALTER_INFO *alter_info, bool do_send_ok)
{
TABLE *table,*new_table=0;
@ -5794,8 +5791,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
/* We don't want update TIMESTAMP fields during ALTER TABLE. */
new_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
new_table->next_number_field=new_table->found_next_number_field;
error=copy_data_between_tables(table,new_table,create_list,
handle_duplicates, ignore,
error=copy_data_between_tables(table, new_table, create_list, ignore,
order_num, order, &copied, &deleted);
}
thd->last_insert_id=next_insert_id; // Needed for correct log
@ -6209,7 +6205,6 @@ end_temporary:
static int
copy_data_between_tables(TABLE *from,TABLE *to,
List<create_field> &create,
enum enum_duplicates handle_duplicates,
bool ignore,
uint order_num, ORDER *order,
ha_rows *copied,
@ -6308,8 +6303,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
/* Tell handler that we have values for all columns in the to table */
to->use_all_columns();
init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1);
if (ignore ||
handle_duplicates == DUP_REPLACE)
if (ignore)
to->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
thd->row_count= 0;
restore_record(to, s->default_values); // Create empty record
@ -6336,7 +6330,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
}
if ((error=to->file->ha_write_row((byte*) to->record[0])))
{
if (!ignore || handle_duplicates != DUP_ERROR ||
if (!ignore ||
to->file->is_fatal_error(error, HA_CHECK_DUP))
{
if (!to->file->is_fatal_error(error, HA_CHECK_DUP))
@ -6428,7 +6422,7 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list,
DBUG_RETURN(mysql_alter_table(thd, NullS, NullS, &create_info,
table_list, lex->create_list,
lex->key_list, 0, (ORDER *) 0,
DUP_ERROR, 0, &lex->alter_info, do_send_ok));
0, &lex->alter_info, do_send_ok));
}

View File

@ -1009,8 +1009,15 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
}
/*
Let us bind Item_trigger_field objects representing access to fields
in old/new versions of row in trigger to Field objects in table being
Gather all Item_trigger_field objects representing access to fields
in old/new versions of row in trigger into lists containing all such
objects for the triggers with same action and timing.
*/
triggers->trigger_fields[lex.trg_chistics.event]
[lex.trg_chistics.action_time]=
(Item_trigger_field *)(lex.trg_table_fields.first);
/*
Also let us bind these objects to Field objects in table being
opened.
We ignore errors here, because if even something is wrong we still
@ -1523,6 +1530,44 @@ bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event,
}
/*
Mark fields of subject table which we read/set in its triggers as such.
SYNOPSIS
mark_fields_used()
thd Current thread context
event Type of event triggers for which we are going to inspect
DESCRIPTION
This method marks fields of subject table which are read/set in its
triggers as such (by properly updating TABLE::read_set/write_set)
and thus informs handler that values for these fields should be
retrieved/stored during execution of statement.
*/
void Table_triggers_list::mark_fields_used(trg_event_type event)
{
int action_time;
Item_trigger_field *trg_field;
for (action_time= 0; action_time < (int)TRG_ACTION_MAX; action_time++)
{
for (trg_field= trigger_fields[event][action_time]; trg_field;
trg_field= trg_field->next_trg_field)
{
/* We cannot mark fields which does not present in table. */
if (trg_field->field_idx != (uint)-1)
{
bitmap_set_bit(table->read_set, trg_field->field_idx);
if (trg_field->get_settable_routine_parameter())
bitmap_set_bit(table->write_set, trg_field->field_idx);
}
}
}
table->file->column_bitmaps_signal();
}
/*
Trigger BUG#14090 compatibility hook

View File

@ -25,6 +25,11 @@ class Table_triggers_list: public Sql_alloc
{
/* Triggers as SPs grouped by event, action_time */
sp_head *bodies[TRG_EVENT_MAX][TRG_ACTION_MAX];
/*
Heads of the lists linking items for all fields used in triggers
grouped by event and action_time.
*/
Item_trigger_field *trigger_fields[TRG_EVENT_MAX][TRG_ACTION_MAX];
/*
Copy of TABLE::Field array with field pointers set to TABLE::record[1]
buffer instead of TABLE::record[0] (used for OLD values in on UPDATE
@ -82,6 +87,7 @@ public:
record1_field(0), table(table_arg)
{
bzero((char *)bodies, sizeof(bodies));
bzero((char *)trigger_fields, sizeof(trigger_fields));
bzero((char *)&subject_table_grants, sizeof(subject_table_grants));
}
~Table_triggers_list();
@ -119,6 +125,8 @@ public:
void set_table(TABLE *new_table);
void mark_fields_used(trg_event_type event);
friend class Item_trigger_field;
friend int sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex,
TABLE_LIST *table);
@ -132,10 +140,6 @@ private:
const char *db_name,
LEX_STRING *old_table_name,
LEX_STRING *new_table_name);
friend void st_table::mark_columns_needed_for_insert(void);
friend void st_table::mark_columns_needed_for_update(void);
friend void st_table::mark_columns_needed_for_delete(void);
};
extern const LEX_STRING trg_action_time_type_names[];

View File

@ -3925,16 +3925,7 @@ void st_table::mark_auto_increment_column()
void st_table::mark_columns_needed_for_delete()
{
if (triggers)
{
if (triggers->bodies[TRG_EVENT_DELETE][TRG_ACTION_BEFORE] ||
triggers->bodies[TRG_EVENT_DELETE][TRG_ACTION_AFTER])
{
/* TODO: optimize to only add columns used by trigger */
use_all_columns();
return;
}
}
triggers->mark_fields_used(TRG_EVENT_DELETE);
if (file->ha_table_flags() & HA_REQUIRES_KEY_COLUMNS_FOR_DELETE)
{
Field **reg_field;
@ -3985,15 +3976,7 @@ void st_table::mark_columns_needed_for_update()
{
DBUG_ENTER("mark_columns_needed_for_update");
if (triggers)
{
if (triggers->bodies[TRG_EVENT_UPDATE][TRG_ACTION_BEFORE] ||
triggers->bodies[TRG_EVENT_UPDATE][TRG_ACTION_AFTER])
{
/* TODO: optimize to only add columns used by trigger */
use_all_columns();
DBUG_VOID_RETURN;
}
}
triggers->mark_fields_used(TRG_EVENT_UPDATE);
if (file->ha_table_flags() & HA_REQUIRES_KEY_COLUMNS_FOR_DELETE)
{
/* Mark all used key columns for read */
@ -4036,13 +4019,14 @@ void st_table::mark_columns_needed_for_insert()
{
if (triggers)
{
if (triggers->bodies[TRG_EVENT_INSERT][TRG_ACTION_BEFORE] ||
triggers->bodies[TRG_EVENT_INSERT][TRG_ACTION_AFTER])
{
/* TODO: optimize to only add columns used by trigger */
use_all_columns();
return;
}
/*
We don't need to mark columns which are used by ON DELETE and
ON UPDATE triggers, which may be invoked in case of REPLACE or
INSERT ... ON DUPLICATE KEY UPDATE, since before doing actual
row replacement or update write_record() will mark all table
fields as used.
*/
triggers->mark_fields_used(TRG_EVENT_INSERT);
}
if (found_next_number_field)
mark_auto_increment_column();