Merge mysql.com:/home/dlenev/mysql-5.0-bg18437-3
into mysql.com:/home/dlenev/mysql-5.1-bg18437
This commit is contained in:
commit
eb3ae6eb79
@ -156,7 +156,16 @@ enum ha_extra_function {
|
||||
Mark the table as a log table. For some handlers (e.g. CSV) this results
|
||||
in a special locking for the table.
|
||||
*/
|
||||
HA_EXTRA_MARK_AS_LOG_TABLE
|
||||
HA_EXTRA_MARK_AS_LOG_TABLE,
|
||||
/*
|
||||
Informs handler that write_row() which tries to insert new row into the
|
||||
table and encounters some already existing row with same primary/unique
|
||||
key can replace old row with new row instead of reporting error (basically
|
||||
it informs handler that we do REPLACE instead of simple INSERT).
|
||||
Off by default.
|
||||
*/
|
||||
HA_EXTRA_WRITE_CAN_REPLACE,
|
||||
HA_EXTRA_WRITE_CANNOT_REPLACE
|
||||
};
|
||||
|
||||
/* The following is parameter to ha_panic() */
|
||||
|
@ -1602,6 +1602,34 @@ fld_cid fld_name fld_parentid fld_delt
|
||||
DROP TABLE federated.t1;
|
||||
DROP TABLE federated.bug_17377_table;
|
||||
DROP TABLE federated.t1;
|
||||
drop table if exists federated.t1;
|
||||
create table federated.t1 (a int, b int, c int);
|
||||
drop table if exists federated.t1;
|
||||
drop table if exists federated.t2;
|
||||
create table federated.t1 (a int, b int, c int) engine=federated connection='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1';
|
||||
create trigger federated.t1_bi before insert on federated.t1 for each row set new.c= new.a * new.b;
|
||||
create table federated.t2 (a int, b int);
|
||||
insert into federated.t2 values (13, 17), (19, 23);
|
||||
insert into federated.t1 (a, b) values (1, 2), (3, 5), (7, 11);
|
||||
select * from federated.t1;
|
||||
a b c
|
||||
1 2 2
|
||||
3 5 15
|
||||
7 11 77
|
||||
delete from federated.t1;
|
||||
insert into federated.t1 (a, b) select * from federated.t2;
|
||||
select * from federated.t1;
|
||||
a b c
|
||||
13 17 221
|
||||
19 23 437
|
||||
delete from federated.t1;
|
||||
load data infile '../std_data_ln/loaddata5.dat' into table federated.t1 fields terminated by '' enclosed by '' ignore 1 lines (a, b);
|
||||
select * from federated.t1;
|
||||
a b c
|
||||
3 4 12
|
||||
5 6 30
|
||||
drop tables federated.t1, federated.t2;
|
||||
drop table federated.t1;
|
||||
DROP TABLE IF EXISTS federated.t1;
|
||||
DROP DATABASE IF EXISTS federated;
|
||||
DROP TABLE IF EXISTS federated.t1;
|
||||
|
@ -31,6 +31,7 @@ SELECT * from t1 ORDER BY i;
|
||||
i j k
|
||||
3 1 42
|
||||
17 2 NULL
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t2 (a INT(11) NOT NULL,
|
||||
b INT(11) NOT NULL,
|
||||
c INT(11) NOT NULL,
|
||||
@ -52,3 +53,47 @@ SELECT * FROM t2 ORDER BY id;
|
||||
a b c x y z id i
|
||||
1 1 1 b b b 5 2
|
||||
DROP TABLE t2;
|
||||
drop table if exists t1;
|
||||
create table t1 (pk int primary key, apk int unique, data int) engine=ndbcluster;
|
||||
insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
|
||||
replace into t1 (pk, apk) values (4, 1), (5, 2);
|
||||
select * from t1 order by pk;
|
||||
pk apk data
|
||||
3 3 3
|
||||
4 1 NULL
|
||||
5 2 NULL
|
||||
delete from t1;
|
||||
insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
|
||||
replace into t1 (pk, apk) values (1, 4), (2, 5);
|
||||
select * from t1 order by pk;
|
||||
pk apk data
|
||||
1 4 NULL
|
||||
2 5 NULL
|
||||
3 3 3
|
||||
delete from t1;
|
||||
insert into t1 values (1, 1, 1), (4, 4, 4), (6, 6, 6);
|
||||
load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (pk, apk);
|
||||
select * from t1 order by pk;
|
||||
pk apk data
|
||||
1 1 1
|
||||
3 4 NULL
|
||||
5 6 NULL
|
||||
delete from t1;
|
||||
insert into t1 values (1, 1, 1), (3, 3, 3), (5, 5, 5);
|
||||
load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (pk, apk);
|
||||
select * from t1 order by pk;
|
||||
pk apk data
|
||||
1 1 1
|
||||
3 4 NULL
|
||||
5 6 NULL
|
||||
delete from t1;
|
||||
insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
|
||||
replace into t1 (pk, apk) select 4, 1;
|
||||
replace into t1 (pk, apk) select 2, 4;
|
||||
select * from t1 order by pk;
|
||||
pk apk data
|
||||
2 4 NULL
|
||||
3 3 3
|
||||
4 1 NULL
|
||||
drop table t1;
|
||||
End of 5.0 tests.
|
||||
|
119
mysql-test/r/ndb_trigger.result
Normal file
119
mysql-test/r/ndb_trigger.result
Normal file
@ -0,0 +1,119 @@
|
||||
drop table if exists t1, t2, t3;
|
||||
create table t1 (id int primary key, a int not null, b decimal (63,30) default 0) engine=ndb;
|
||||
create table t2 (op char(1), a int not null, b decimal (63,30));
|
||||
create table t3 select 1 as i;
|
||||
create trigger t1_bu before update on t1 for each row
|
||||
begin
|
||||
insert into t2 values ("u", old.a, old.b);
|
||||
set new.b = old.b + 10;
|
||||
end;//
|
||||
create trigger t1_bd before delete on t1 for each row
|
||||
begin
|
||||
insert into t2 values ("d", old.a, old.b);
|
||||
end;//
|
||||
insert into t1 values (1, 1, 1.05), (2, 2, 2.05), (3, 3, 3.05), (4, 4, 4.05);
|
||||
update t1 set a=5 where a != 3;
|
||||
select * from t1 order by id;
|
||||
id a b
|
||||
1 5 11.050000000000000000000000000000
|
||||
2 5 12.050000000000000000000000000000
|
||||
3 3 3.050000000000000000000000000000
|
||||
4 5 14.050000000000000000000000000000
|
||||
select * from t2 order by op, a, b;
|
||||
op a b
|
||||
u 1 1.050000000000000000000000000000
|
||||
u 2 2.050000000000000000000000000000
|
||||
u 4 4.050000000000000000000000000000
|
||||
delete from t2;
|
||||
update t1, t3 set a=6 where a = 5;
|
||||
select * from t1 order by id;
|
||||
id a b
|
||||
1 6 21.050000000000000000000000000000
|
||||
2 6 22.050000000000000000000000000000
|
||||
3 3 3.050000000000000000000000000000
|
||||
4 6 24.050000000000000000000000000000
|
||||
select * from t2 order by op, a, b;
|
||||
op a b
|
||||
u 5 11.050000000000000000000000000000
|
||||
u 5 12.050000000000000000000000000000
|
||||
u 5 14.050000000000000000000000000000
|
||||
delete from t2;
|
||||
delete from t1 where a != 3;
|
||||
select * from t1 order by id;
|
||||
id a b
|
||||
3 3 3.050000000000000000000000000000
|
||||
select * from t2 order by op, a, b;
|
||||
op a b
|
||||
d 6 21.050000000000000000000000000000
|
||||
d 6 22.050000000000000000000000000000
|
||||
d 6 24.050000000000000000000000000000
|
||||
delete from t2;
|
||||
insert into t1 values (1, 1, 1.05), (2, 2, 2.05), (4, 4, 4.05);
|
||||
delete t1 from t1, t3 where a != 3;
|
||||
select * from t1 order by id;
|
||||
id a b
|
||||
3 3 3.050000000000000000000000000000
|
||||
select * from t2 order by op, a, b;
|
||||
op a b
|
||||
d 1 1.050000000000000000000000000000
|
||||
d 2 2.050000000000000000000000000000
|
||||
d 4 4.050000000000000000000000000000
|
||||
delete from t2;
|
||||
insert into t1 values (4, 4, 4.05);
|
||||
insert into t1 (id, a) values (4, 1), (3, 1) on duplicate key update a= a + 1;
|
||||
select * from t1 order by id;
|
||||
id a b
|
||||
3 4 13.050000000000000000000000000000
|
||||
4 5 14.050000000000000000000000000000
|
||||
select * from t2 order by op, a, b;
|
||||
op a b
|
||||
u 3 3.050000000000000000000000000000
|
||||
u 4 4.050000000000000000000000000000
|
||||
delete from t2;
|
||||
delete from t3;
|
||||
insert into t3 values (4), (3);
|
||||
insert into t1 (id, a) (select i, 1 from t3) on duplicate key update a= a + 1;
|
||||
select * from t1 order by id;
|
||||
id a b
|
||||
3 5 23.050000000000000000000000000000
|
||||
4 6 24.050000000000000000000000000000
|
||||
select * from t2 order by op, a, b;
|
||||
op a b
|
||||
u 4 13.050000000000000000000000000000
|
||||
u 5 14.050000000000000000000000000000
|
||||
delete from t2;
|
||||
replace into t1 (id, a) values (4, 1), (3, 1);
|
||||
select * from t1 order by id;
|
||||
id a b
|
||||
3 1 0.000000000000000000000000000000
|
||||
4 1 0.000000000000000000000000000000
|
||||
select * from t2 order by op, a, b;
|
||||
op a b
|
||||
d 5 23.050000000000000000000000000000
|
||||
d 6 24.050000000000000000000000000000
|
||||
delete from t1;
|
||||
delete from t2;
|
||||
insert into t1 values (3, 1, 1.05), (4, 1, 2.05);
|
||||
replace into t1 (id, a) (select i, 2 from t3);
|
||||
select * from t1 order by id;
|
||||
id a b
|
||||
3 2 0.000000000000000000000000000000
|
||||
4 2 0.000000000000000000000000000000
|
||||
select * from t2 order by op, a, b;
|
||||
op a b
|
||||
d 1 1.050000000000000000000000000000
|
||||
d 1 2.050000000000000000000000000000
|
||||
delete from t1;
|
||||
delete from t2;
|
||||
insert into t1 values (3, 1, 1.05), (5, 2, 2.05);
|
||||
load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (id, a);
|
||||
select * from t1 order by id;
|
||||
id a b
|
||||
3 4 0.000000000000000000000000000000
|
||||
5 6 0.000000000000000000000000000000
|
||||
select * from t2 order by op, a, b;
|
||||
op a b
|
||||
d 1 1.050000000000000000000000000000
|
||||
d 2 2.050000000000000000000000000000
|
||||
drop tables t1, t2, t3;
|
||||
End of 5.0 tests
|
@ -1363,4 +1363,46 @@ drop table federated.t1, federated.t2;
|
||||
connection master;
|
||||
--enable_parsing
|
||||
|
||||
#
|
||||
# Additional test for bug#18437 "Wrong values inserted with a before
|
||||
# update trigger on NDB table". SQL-layer didn't properly inform
|
||||
# handler about fields which were read and set in triggers. In some
|
||||
# cases this resulted in incorrect (garbage) values of OLD variables
|
||||
# and lost changes to NEW variables.
|
||||
# Since for federated engine only operation which is affected by wrong
|
||||
# fields mark-up is handler::write_row() this file constains coverage
|
||||
# for ON INSERT triggers only. Tests for other types of triggers reside
|
||||
# in ndb_trigger.test.
|
||||
#
|
||||
--disable_warnings
|
||||
drop table if exists federated.t1;
|
||||
--enable_warnings
|
||||
create table federated.t1 (a int, b int, c int);
|
||||
connection master;
|
||||
--disable_warnings
|
||||
drop table if exists federated.t1;
|
||||
drop table if exists federated.t2;
|
||||
--enable_warnings
|
||||
--replace_result $SLAVE_MYPORT SLAVE_PORT
|
||||
eval create table federated.t1 (a int, b int, c int) engine=federated connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1';
|
||||
create trigger federated.t1_bi before insert on federated.t1 for each row set new.c= new.a * new.b;
|
||||
create table federated.t2 (a int, b int);
|
||||
insert into federated.t2 values (13, 17), (19, 23);
|
||||
# Each of three statements should correctly set values for all three fields
|
||||
# insert
|
||||
insert into federated.t1 (a, b) values (1, 2), (3, 5), (7, 11);
|
||||
select * from federated.t1;
|
||||
delete from federated.t1;
|
||||
# insert ... select
|
||||
insert into federated.t1 (a, b) select * from federated.t2;
|
||||
select * from federated.t1;
|
||||
delete from federated.t1;
|
||||
# load
|
||||
load data infile '../std_data_ln/loaddata5.dat' into table federated.t1 fields terminated by '' enclosed by '' ignore 1 lines (a, b);
|
||||
select * from federated.t1;
|
||||
drop tables federated.t1, federated.t2;
|
||||
|
||||
connection slave;
|
||||
drop table federated.t1;
|
||||
|
||||
--source include/federated_cleanup.inc
|
||||
|
@ -39,6 +39,7 @@ INSERT INTO t1 VALUES (1,1,23),(2,2,24);
|
||||
REPLACE INTO t1 (j,k) VALUES (1,42);
|
||||
REPLACE INTO t1 (i,j) VALUES (17,2);
|
||||
SELECT * from t1 ORDER BY i;
|
||||
DROP TABLE t1;
|
||||
|
||||
# bug#19906
|
||||
CREATE TABLE t2 (a INT(11) NOT NULL,
|
||||
@ -64,4 +65,40 @@ SELECT * FROM t2 ORDER BY id;
|
||||
|
||||
DROP TABLE t2;
|
||||
|
||||
#
|
||||
# Bug #20728 "REPLACE does not work correctly for NDB table with PK and
|
||||
# unique index"
|
||||
#
|
||||
--disable_warnings
|
||||
drop table if exists t1;
|
||||
--enable_warnings
|
||||
create table t1 (pk int primary key, apk int unique, data int) engine=ndbcluster;
|
||||
# Test for plain replace which updates pk
|
||||
insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
|
||||
replace into t1 (pk, apk) values (4, 1), (5, 2);
|
||||
select * from t1 order by pk;
|
||||
delete from t1;
|
||||
# Another test for plain replace which doesn't touch pk
|
||||
insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
|
||||
replace into t1 (pk, apk) values (1, 4), (2, 5);
|
||||
select * from t1 order by pk;
|
||||
delete from t1;
|
||||
# Test for load data replace which updates pk
|
||||
insert into t1 values (1, 1, 1), (4, 4, 4), (6, 6, 6);
|
||||
load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (pk, apk);
|
||||
select * from t1 order by pk;
|
||||
delete from t1;
|
||||
# Now test for load data replace which doesn't touch pk
|
||||
insert into t1 values (1, 1, 1), (3, 3, 3), (5, 5, 5);
|
||||
load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (pk, apk);
|
||||
select * from t1 order by pk;
|
||||
delete from t1;
|
||||
# Finally test for both types of replace ... select
|
||||
insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
|
||||
replace into t1 (pk, apk) select 4, 1;
|
||||
replace into t1 (pk, apk) select 2, 4;
|
||||
select * from t1 order by pk;
|
||||
# Clean-up
|
||||
drop table t1;
|
||||
|
||||
--echo End of 5.0 tests.
|
||||
|
92
mysql-test/t/ndb_trigger.test
Normal file
92
mysql-test/t/ndb_trigger.test
Normal file
@ -0,0 +1,92 @@
|
||||
# Tests which involve triggers and NDB storage engine
|
||||
--source include/have_ndb.inc
|
||||
--source include/not_embedded.inc
|
||||
|
||||
#
|
||||
# Test for bug#18437 "Wrong values inserted with a before update
|
||||
# trigger on NDB table". SQL-layer didn't properly inform handler
|
||||
# about fields which were read and set in triggers. In some cases
|
||||
# this resulted in incorrect (garbage) values of OLD variables and
|
||||
# lost changes to NEW variables.
|
||||
# You can find similar tests for ON INSERT triggers in federated.test
|
||||
# since this engine so far is the only engine in MySQL which cares
|
||||
# about field mark-up during handler::write_row() operation.
|
||||
#
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t1, t2, t3;
|
||||
--enable_warnings
|
||||
|
||||
create table t1 (id int primary key, a int not null, b decimal (63,30) default 0) engine=ndb;
|
||||
create table t2 (op char(1), a int not null, b decimal (63,30));
|
||||
create table t3 select 1 as i;
|
||||
|
||||
delimiter //;
|
||||
create trigger t1_bu before update on t1 for each row
|
||||
begin
|
||||
insert into t2 values ("u", old.a, old.b);
|
||||
set new.b = old.b + 10;
|
||||
end;//
|
||||
create trigger t1_bd before delete on t1 for each row
|
||||
begin
|
||||
insert into t2 values ("d", old.a, old.b);
|
||||
end;//
|
||||
delimiter ;//
|
||||
insert into t1 values (1, 1, 1.05), (2, 2, 2.05), (3, 3, 3.05), (4, 4, 4.05);
|
||||
|
||||
# Check that usual update works as it should
|
||||
update t1 set a=5 where a != 3;
|
||||
select * from t1 order by id;
|
||||
select * from t2 order by op, a, b;
|
||||
delete from t2;
|
||||
# Check that everything works for multi-update
|
||||
update t1, t3 set a=6 where a = 5;
|
||||
select * from t1 order by id;
|
||||
select * from t2 order by op, a, b;
|
||||
delete from t2;
|
||||
# Check for delete
|
||||
delete from t1 where a != 3;
|
||||
select * from t1 order by id;
|
||||
select * from t2 order by op, a, b;
|
||||
delete from t2;
|
||||
# Check for multi-delete
|
||||
insert into t1 values (1, 1, 1.05), (2, 2, 2.05), (4, 4, 4.05);
|
||||
delete t1 from t1, t3 where a != 3;
|
||||
select * from t1 order by id;
|
||||
select * from t2 order by op, a, b;
|
||||
delete from t2;
|
||||
# Check for insert ... on duplicate key update
|
||||
insert into t1 values (4, 4, 4.05);
|
||||
insert into t1 (id, a) values (4, 1), (3, 1) on duplicate key update a= a + 1;
|
||||
select * from t1 order by id;
|
||||
select * from t2 order by op, a, b;
|
||||
delete from t2;
|
||||
# Check for insert ... select ... on duplicate key update
|
||||
delete from t3;
|
||||
insert into t3 values (4), (3);
|
||||
insert into t1 (id, a) (select i, 1 from t3) on duplicate key update a= a + 1;
|
||||
select * from t1 order by id;
|
||||
select * from t2 order by op, a, b;
|
||||
delete from t2;
|
||||
# Check for replace
|
||||
replace into t1 (id, a) values (4, 1), (3, 1);
|
||||
select * from t1 order by id;
|
||||
select * from t2 order by op, a, b;
|
||||
delete from t1;
|
||||
delete from t2;
|
||||
# Check for replace ... select ...
|
||||
insert into t1 values (3, 1, 1.05), (4, 1, 2.05);
|
||||
replace into t1 (id, a) (select i, 2 from t3);
|
||||
select * from t1 order by id;
|
||||
select * from t2 order by op, a, b;
|
||||
delete from t1;
|
||||
delete from t2;
|
||||
# Check for load data replace
|
||||
insert into t1 values (3, 1, 1.05), (5, 2, 2.05);
|
||||
load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (id, a);
|
||||
select * from t1 order by id;
|
||||
select * from t2 order by op, a, b;
|
||||
|
||||
drop tables t1, t2, t3;
|
||||
|
||||
--echo End of 5.0 tests
|
@ -3663,20 +3663,11 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
|
||||
switch (operation) {
|
||||
case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/
|
||||
DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY"));
|
||||
if (current_thd->lex->sql_command == SQLCOM_REPLACE && !m_has_unique_index)
|
||||
{
|
||||
DBUG_PRINT("info", ("Turning ON use of write instead of insert"));
|
||||
m_use_write= TRUE;
|
||||
} else
|
||||
{
|
||||
DBUG_PRINT("info", ("Ignoring duplicate key"));
|
||||
m_ignore_dup_key= TRUE;
|
||||
}
|
||||
DBUG_PRINT("info", ("Ignoring duplicate key"));
|
||||
m_ignore_dup_key= TRUE;
|
||||
break;
|
||||
case HA_EXTRA_NO_IGNORE_DUP_KEY:
|
||||
DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY"));
|
||||
DBUG_PRINT("info", ("Turning OFF use of write instead of insert"));
|
||||
m_use_write= FALSE;
|
||||
m_ignore_dup_key= FALSE;
|
||||
break;
|
||||
case HA_EXTRA_IGNORE_NO_KEY:
|
||||
@ -3689,6 +3680,19 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
|
||||
DBUG_PRINT("info", ("Turning on AO_IgnoreError at Commit/NoCommit"));
|
||||
m_ignore_no_key= FALSE;
|
||||
break;
|
||||
case HA_EXTRA_WRITE_CAN_REPLACE:
|
||||
DBUG_PRINT("info", ("HA_EXTRA_WRITE_CAN_REPLACE"));
|
||||
if (!m_has_unique_index)
|
||||
{
|
||||
DBUG_PRINT("info", ("Turning ON use of write instead of insert"));
|
||||
m_use_write= TRUE;
|
||||
}
|
||||
break;
|
||||
case HA_EXTRA_WRITE_CANNOT_REPLACE:
|
||||
DBUG_PRINT("info", ("HA_EXTRA_WRITE_CANNOT_REPLACE"));
|
||||
DBUG_PRINT("info", ("Turning OFF use of write instead of insert"));
|
||||
m_use_write= FALSE;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -5436,9 +5436,14 @@ void Item_insert_value::print(String *str)
|
||||
void Item_trigger_field::setup_field(THD *thd, TABLE *table,
|
||||
GRANT_INFO *table_grant_info)
|
||||
{
|
||||
/*
|
||||
There is no sense in marking fields used by trigger with current value
|
||||
of THD::query_id since it is completely unrelated to the THD::query_id
|
||||
value for statements which will invoke trigger. So instead we use
|
||||
Table_triggers_list::mark_fields_used() method which is called during
|
||||
execution of these statements.
|
||||
*/
|
||||
enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
|
||||
|
||||
/* TODO: Think more about consequences of this step. */
|
||||
thd->mark_used_columns= MARK_COLUMNS_NONE;
|
||||
/*
|
||||
Try to find field by its name and if it will be found
|
||||
|
@ -872,9 +872,7 @@ bool mysql_alter_table(THD *thd, char *new_db, char *new_name,
|
||||
TABLE_LIST *table_list,
|
||||
List<create_field> &fields,
|
||||
List<Key> &keys,
|
||||
uint order_num, ORDER *order,
|
||||
enum enum_duplicates handle_duplicates,
|
||||
bool ignore,
|
||||
uint order_num, ORDER *order, bool ignore,
|
||||
ALTER_INFO *alter_info, bool do_send_ok);
|
||||
bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool do_send_ok);
|
||||
bool mysql_create_like_table(THD *thd, TABLE_LIST *table,
|
||||
@ -910,6 +908,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields,
|
||||
bool ignore);
|
||||
int check_that_all_fields_are_given_values(THD *thd, TABLE *entry,
|
||||
TABLE_LIST *table_list);
|
||||
void mark_fields_used_by_triggers_for_insert_stmt(THD *thd, TABLE *table,
|
||||
enum_duplicates duplic);
|
||||
bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds);
|
||||
bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
||||
SQL_LIST *order, ha_rows rows, ulonglong options,
|
||||
|
@ -255,6 +255,33 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Mark fields used by triggers for INSERT-like statement.
|
||||
|
||||
SYNOPSIS
|
||||
mark_fields_used_by_triggers_for_insert_stmt()
|
||||
thd The current thread
|
||||
table Table to which insert will happen
|
||||
duplic Type of duplicate handling for insert which will happen
|
||||
|
||||
NOTE
|
||||
For REPLACE there is no sense in marking particular fields
|
||||
used by ON DELETE trigger as to execute it properly we have
|
||||
to retrieve and store values for all table columns anyway.
|
||||
*/
|
||||
|
||||
void mark_fields_used_by_triggers_for_insert_stmt(THD *thd, TABLE *table,
|
||||
enum_duplicates duplic)
|
||||
{
|
||||
if (table->triggers)
|
||||
{
|
||||
table->triggers->mark_fields_used(thd, TRG_EVENT_INSERT);
|
||||
if (duplic == DUP_UPDATE)
|
||||
table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool mysql_insert(THD *thd,TABLE_LIST *table_list,
|
||||
List<Item> &fields,
|
||||
List<List_item> &values_list,
|
||||
@ -415,6 +442,17 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
|
||||
thd->proc_info="update";
|
||||
if (duplic != DUP_ERROR || ignore)
|
||||
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
|
||||
if (duplic == DUP_REPLACE)
|
||||
{
|
||||
if (!table->triggers || !table->triggers->has_delete_triggers())
|
||||
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
|
||||
/*
|
||||
REPLACE should change values of all columns so we should mark
|
||||
all columns as columns to be set. As nice side effect we will
|
||||
retrieve columns which values are needed for ON DELETE triggers.
|
||||
*/
|
||||
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
|
||||
}
|
||||
/*
|
||||
let's *try* to start bulk inserts. It won't necessary
|
||||
start them as values_list.elements should be greater than
|
||||
@ -443,6 +481,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
|
||||
error= 1;
|
||||
}
|
||||
|
||||
mark_fields_used_by_triggers_for_insert_stmt(thd, table, duplic);
|
||||
|
||||
if (table_list->prepare_where(thd, 0, TRUE) ||
|
||||
table_list->prepare_check_option(thd))
|
||||
error= 1;
|
||||
@ -616,6 +656,9 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
|
||||
thd->next_insert_id=0; // Reset this if wrongly used
|
||||
if (duplic != DUP_ERROR || ignore)
|
||||
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
|
||||
if (duplic == DUP_REPLACE &&
|
||||
(!table->triggers || !table->triggers->has_delete_triggers()))
|
||||
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
|
||||
|
||||
/* Reset value of LAST_INSERT_ID if no rows where inserted */
|
||||
if (!info.copied && thd->insert_id_used)
|
||||
@ -1970,9 +2013,8 @@ bool delayed_insert::handle_inserts(void)
|
||||
{
|
||||
int error;
|
||||
ulong max_rows;
|
||||
bool using_ignore=0,
|
||||
using_bin_log= mysql_bin_log.is_open();
|
||||
|
||||
bool using_ignore= 0, using_opt_replace= 0,
|
||||
using_bin_log= mysql_bin_log.is_open();
|
||||
delayed_row *row;
|
||||
DBUG_ENTER("handle_inserts");
|
||||
|
||||
@ -2035,6 +2077,13 @@ bool delayed_insert::handle_inserts(void)
|
||||
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
|
||||
using_ignore=1;
|
||||
}
|
||||
if (info.handle_duplicates == DUP_REPLACE &&
|
||||
(!table->triggers ||
|
||||
!table->triggers->has_delete_triggers()))
|
||||
{
|
||||
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
|
||||
using_opt_replace= 1;
|
||||
}
|
||||
thd.clear_error(); // reset error for binlog
|
||||
if (write_record(&thd, table, &info))
|
||||
{
|
||||
@ -2047,6 +2096,11 @@ bool delayed_insert::handle_inserts(void)
|
||||
using_ignore=0;
|
||||
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
|
||||
}
|
||||
if (using_opt_replace)
|
||||
{
|
||||
using_opt_replace= 0;
|
||||
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
|
||||
}
|
||||
if (table->s->blob_fields)
|
||||
free_delayed_insert_blobs(table);
|
||||
thread_safe_sub(delayed_rows_in_use,1,&LOCK_delayed_status);
|
||||
@ -2292,6 +2346,12 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
|
||||
thd->cuted_fields=0;
|
||||
if (info.ignore || info.handle_duplicates != DUP_ERROR)
|
||||
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
|
||||
if (info.handle_duplicates == DUP_REPLACE)
|
||||
{
|
||||
if (!table->triggers || !table->triggers->has_delete_triggers())
|
||||
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
|
||||
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
|
||||
}
|
||||
thd->no_trans_update= 0;
|
||||
thd->abort_on_warning= (!info.ignore &&
|
||||
(thd->variables.sql_mode &
|
||||
@ -2301,6 +2361,10 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
|
||||
check_that_all_fields_are_given_values(thd, table, table_list)) ||
|
||||
table_list->prepare_where(thd, 0, TRUE) ||
|
||||
table_list->prepare_check_option(thd));
|
||||
|
||||
if (!res)
|
||||
mark_fields_used_by_triggers_for_insert_stmt(thd, table,
|
||||
info.handle_duplicates);
|
||||
DBUG_RETURN(res);
|
||||
}
|
||||
|
||||
@ -2491,6 +2555,7 @@ bool select_insert::send_eof()
|
||||
|
||||
error= (!thd->prelocked_mode) ? table->file->ha_end_bulk_insert():0;
|
||||
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
|
||||
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
|
||||
|
||||
if (info.copied || info.deleted || info.updated)
|
||||
{
|
||||
@ -2775,6 +2840,12 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
|
||||
thd->cuted_fields=0;
|
||||
if (info.ignore || info.handle_duplicates != DUP_ERROR)
|
||||
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
|
||||
if (info.handle_duplicates == DUP_REPLACE)
|
||||
{
|
||||
if (!table->triggers || !table->triggers->has_delete_triggers())
|
||||
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
|
||||
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
|
||||
}
|
||||
if (!thd->prelocked_mode)
|
||||
table->file->ha_start_bulk_insert((ha_rows) 0);
|
||||
thd->no_trans_update= 0;
|
||||
@ -2857,6 +2928,7 @@ bool select_create::send_eof()
|
||||
else
|
||||
{
|
||||
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
|
||||
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
|
||||
VOID(pthread_mutex_lock(&LOCK_open));
|
||||
mysql_unlock_tables(thd, thd->extra_lock);
|
||||
if (!table->s->tmp_table)
|
||||
@ -2882,6 +2954,7 @@ void select_create::abort()
|
||||
if (table)
|
||||
{
|
||||
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
|
||||
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
|
||||
handlerton *table_type=table->s->db_type;
|
||||
if (!table->s->tmp_table)
|
||||
{
|
||||
|
@ -232,6 +232,8 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
|
||||
mark_fields_used_by_triggers_for_insert_stmt(thd, table, handle_duplicates);
|
||||
|
||||
uint tot_length=0;
|
||||
bool use_blobs= 0, use_vars= 0;
|
||||
List_iterator_fast<Item> it(fields_vars);
|
||||
@ -362,6 +364,13 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
|
||||
if (ignore ||
|
||||
handle_duplicates == DUP_REPLACE)
|
||||
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
|
||||
if (handle_duplicates == DUP_REPLACE)
|
||||
{
|
||||
if (!table->triggers ||
|
||||
!table->triggers->has_delete_triggers())
|
||||
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
|
||||
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
|
||||
}
|
||||
if (!thd->prelocked_mode)
|
||||
table->file->ha_start_bulk_insert((ha_rows) 0);
|
||||
table->copy_blobs=1;
|
||||
@ -386,6 +395,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
|
||||
error= 1;
|
||||
}
|
||||
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
|
||||
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
|
||||
table->next_number_field=0;
|
||||
}
|
||||
ha_enable_transaction(thd, TRUE);
|
||||
|
@ -3096,8 +3096,7 @@ end_with_restore_list:
|
||||
lex->key_list,
|
||||
select_lex->order_list.elements,
|
||||
(ORDER *) select_lex->order_list.first,
|
||||
lex->duplicates, lex->ignore, &lex->alter_info,
|
||||
1);
|
||||
lex->ignore, &lex->alter_info, 1);
|
||||
break;
|
||||
}
|
||||
#endif /*DONT_ALLOW_SHOW_COMMANDS*/
|
||||
@ -7294,7 +7293,7 @@ bool mysql_create_index(THD *thd, TABLE_LIST *table_list, List<Key> &keys)
|
||||
DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->table_name,
|
||||
&create_info, table_list,
|
||||
fields, keys, 0, (ORDER*)0,
|
||||
DUP_ERROR, 0, &alter_info, 1));
|
||||
0, &alter_info, 1));
|
||||
}
|
||||
|
||||
|
||||
@ -7312,7 +7311,7 @@ bool mysql_drop_index(THD *thd, TABLE_LIST *table_list, ALTER_INFO *alter_info)
|
||||
DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->table_name,
|
||||
&create_info, table_list,
|
||||
fields, keys, 0, (ORDER*)0,
|
||||
DUP_ERROR, 0, alter_info, 1));
|
||||
0, alter_info, 1));
|
||||
}
|
||||
|
||||
|
||||
|
@ -35,9 +35,7 @@ const char *primary_key_name="PRIMARY";
|
||||
static bool check_if_keyname_exists(const char *name,KEY *start, KEY *end);
|
||||
static char *make_unique_key_name(const char *field_name,KEY *start,KEY *end);
|
||||
static int copy_data_between_tables(TABLE *from,TABLE *to,
|
||||
List<create_field> &create,
|
||||
enum enum_duplicates handle_duplicates,
|
||||
bool ignore,
|
||||
List<create_field> &create, bool ignore,
|
||||
uint order_num, ORDER *order,
|
||||
ha_rows *copied,ha_rows *deleted);
|
||||
static bool prepare_blob_field(THD *thd, create_field *sql_field);
|
||||
@ -4941,8 +4939,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
|
||||
HA_CREATE_INFO *lex_create_info,
|
||||
TABLE_LIST *table_list,
|
||||
List<create_field> &fields, List<Key> &keys,
|
||||
uint order_num, ORDER *order,
|
||||
enum enum_duplicates handle_duplicates, bool ignore,
|
||||
uint order_num, ORDER *order, bool ignore,
|
||||
ALTER_INFO *alter_info, bool do_send_ok)
|
||||
{
|
||||
TABLE *table,*new_table=0;
|
||||
@ -5780,8 +5777,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
|
||||
/* We don't want update TIMESTAMP fields during ALTER TABLE. */
|
||||
new_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
|
||||
new_table->next_number_field=new_table->found_next_number_field;
|
||||
error=copy_data_between_tables(table,new_table,create_list,
|
||||
handle_duplicates, ignore,
|
||||
error=copy_data_between_tables(table, new_table, create_list, ignore,
|
||||
order_num, order, &copied, &deleted);
|
||||
}
|
||||
thd->last_insert_id=next_insert_id; // Needed for correct log
|
||||
@ -6195,7 +6191,6 @@ end_temporary:
|
||||
static int
|
||||
copy_data_between_tables(TABLE *from,TABLE *to,
|
||||
List<create_field> &create,
|
||||
enum enum_duplicates handle_duplicates,
|
||||
bool ignore,
|
||||
uint order_num, ORDER *order,
|
||||
ha_rows *copied,
|
||||
@ -6294,8 +6289,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
|
||||
/* Tell handler that we have values for all columns in the to table */
|
||||
to->use_all_columns();
|
||||
init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1);
|
||||
if (ignore ||
|
||||
handle_duplicates == DUP_REPLACE)
|
||||
if (ignore)
|
||||
to->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
|
||||
thd->row_count= 0;
|
||||
restore_record(to, s->default_values); // Create empty record
|
||||
@ -6322,8 +6316,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
|
||||
}
|
||||
if ((error=to->file->ha_write_row((byte*) to->record[0])))
|
||||
{
|
||||
if ((!ignore &&
|
||||
handle_duplicates != DUP_REPLACE) ||
|
||||
if (!ignore ||
|
||||
(error != HA_ERR_FOUND_DUPP_KEY &&
|
||||
error != HA_ERR_FOUND_DUPP_UNIQUE))
|
||||
{
|
||||
@ -6416,7 +6409,7 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list,
|
||||
DBUG_RETURN(mysql_alter_table(thd, NullS, NullS, &create_info,
|
||||
table_list, lex->create_list,
|
||||
lex->key_list, 0, (ORDER *) 0,
|
||||
DUP_ERROR, 0, &lex->alter_info, do_send_ok));
|
||||
0, &lex->alter_info, do_send_ok));
|
||||
}
|
||||
|
||||
|
||||
|
@ -1010,8 +1010,15 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
|
||||
}
|
||||
|
||||
/*
|
||||
Let us bind Item_trigger_field objects representing access to fields
|
||||
in old/new versions of row in trigger to Field objects in table being
|
||||
Gather all Item_trigger_field objects representing access to fields
|
||||
in old/new versions of row in trigger into lists containing all such
|
||||
objects for the triggers with same action and timing.
|
||||
*/
|
||||
triggers->trigger_fields[lex.trg_chistics.event]
|
||||
[lex.trg_chistics.action_time]=
|
||||
(Item_trigger_field *)(lex.trg_table_fields.first);
|
||||
/*
|
||||
Also let us bind these objects to Field objects in table being
|
||||
opened.
|
||||
|
||||
We ignore errors here, because if even something is wrong we still
|
||||
@ -1526,6 +1533,39 @@ bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event,
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Mark fields of subject table which we read/set in its triggers as such.
|
||||
|
||||
SYNOPSIS
|
||||
mark_fields_used()
|
||||
thd Current thread context
|
||||
event Type of event triggers for which we are going to inspect
|
||||
|
||||
DESCRIPTION
|
||||
This method marks fields of subject table which are read/set in its
|
||||
triggers as such (by setting Field::query_id equal to THD::query_id)
|
||||
and thus informs handler that values for these fields should be
|
||||
retrieved/stored during execution of statement.
|
||||
*/
|
||||
|
||||
void Table_triggers_list::mark_fields_used(THD *thd, trg_event_type event)
|
||||
{
|
||||
int action_time;
|
||||
Item_trigger_field *trg_field;
|
||||
|
||||
for (action_time= 0; action_time < (int)TRG_ACTION_MAX; action_time++)
|
||||
{
|
||||
for (trg_field= trigger_fields[event][action_time]; trg_field;
|
||||
trg_field= trg_field->next_trg_field)
|
||||
{
|
||||
/* We cannot mark fields which does not present in table. */
|
||||
if (trg_field->field_idx != (uint)-1)
|
||||
table->field[trg_field->field_idx]->query_id = thd->query_id;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Trigger BUG#14090 compatibility hook
|
||||
|
||||
|
@ -25,6 +25,11 @@ class Table_triggers_list: public Sql_alloc
|
||||
{
|
||||
/* Triggers as SPs grouped by event, action_time */
|
||||
sp_head *bodies[TRG_EVENT_MAX][TRG_ACTION_MAX];
|
||||
/*
|
||||
Heads of the lists linking items for all fields used in triggers
|
||||
grouped by event and action_time.
|
||||
*/
|
||||
Item_trigger_field *trigger_fields[TRG_EVENT_MAX][TRG_ACTION_MAX];
|
||||
/*
|
||||
Copy of TABLE::Field array with field pointers set to TABLE::record[1]
|
||||
buffer instead of TABLE::record[0] (used for OLD values in on UPDATE
|
||||
@ -82,6 +87,7 @@ public:
|
||||
record1_field(0), table(table_arg)
|
||||
{
|
||||
bzero((char *)bodies, sizeof(bodies));
|
||||
bzero((char *)trigger_fields, sizeof(trigger_fields));
|
||||
bzero((char *)&subject_table_grants, sizeof(subject_table_grants));
|
||||
}
|
||||
~Table_triggers_list();
|
||||
@ -119,6 +125,8 @@ public:
|
||||
|
||||
void set_table(TABLE *new_table);
|
||||
|
||||
void mark_fields_used(THD *thd, trg_event_type event);
|
||||
|
||||
friend class Item_trigger_field;
|
||||
friend int sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex,
|
||||
TABLE_LIST *table);
|
||||
|
Loading…
x
Reference in New Issue
Block a user