Merge abarkov@bk-internal.mysql.com:/home/bk/mysql-5.1-rpl

into  mysql.com:/home/bar/mysql-work/mysql-5.1.b27287
This commit is contained in:
unknown 2007-10-15 11:16:53 +05:00
commit 9458aa48a3
32 changed files with 1150 additions and 265 deletions

View File

@ -126,7 +126,9 @@ drop table t1,t2;
#
CREATE TABLE t1 (a int NOT NULL auto_increment primary key) ENGINE=MyISAM;
CREATE TABLE t2 (a int, PRIMARY KEY (a)) ENGINE=InnoDB;
CREATE TABLE t3 (a int, PRIMARY KEY (a), b int unique);
CREATE TABLE t3 (a int, PRIMARY KEY (a), b int unique) ENGINE=MyISAM;
CREATE TABLE t4 (a int, PRIMARY KEY (a), b int unique) ENGINE=Innodb;
CREATE TABLE t5 (a int, PRIMARY KEY (a)) ENGINE=InnoDB;
#
@ -169,7 +171,7 @@ CREATE TABLE t3 (a int, PRIMARY KEY (a), b int unique);
select count(*) from t1 /* must be 2 */;
#
# UPDATE (multi-update see bug#27716)
# UPDATE inc multi-update
#
# prepare
@ -185,9 +187,48 @@ CREATE TABLE t3 (a int, PRIMARY KEY (a), b int unique);
source include/show_binlog_events.inc; # must be events of the query
select count(*) from t1 /* must be 2 */;
## multi_update::send_eof() branch
# prepare
delete from t3;
delete from t4;
insert into t3 values (1,1);
insert into t4 values (1,1),(2,2);
reset master;
# execute
--error ER_DUP_ENTRY
UPDATE t4,t3 SET t4.a=t3.a + bug27417(1) /* top level non-ta table */;
# check
source include/show_binlog_events.inc; # the offset must denote there is the query
select count(*) from t1 /* must be 4 */;
## send_error() branch of multi_update
# prepare
delete from t1;
delete from t3;
delete from t4;
insert into t3 values (1,1),(2,2);
insert into t4 values (1,1),(2,2);
reset master;
# execute
--error ER_DUP_ENTRY
UPDATE t3,t4 SET t3.a=t4.a + bug27417(1);
# check
select count(*) from t1 /* must be 1 */;
# cleanup
drop table t4;
#
# DELETE (for multi-delete see Bug #29136)
# DELETE incl multi-delete
#
# prepare
@ -203,6 +244,27 @@ CREATE TABLE t3 (a int, PRIMARY KEY (a), b int unique);
# execute
--error ER_DUP_ENTRY
delete from t2;
# check
source include/show_binlog_events.inc; # the offset must denote there is the query
select count(*) from t1 /* must be 1 */;
# cleanup
drop trigger trg_del;
# prepare
delete from t1;
delete from t2;
delete from t5;
create trigger trg_del_t2 after delete on t2 for each row
insert into t1 values (1);
insert into t2 values (2),(3);
insert into t5 values (1),(2);
reset master;
# execute
--error ER_DUP_ENTRY
delete t2.* from t2,t5 where t2.a=t5.a + 1;
# check
source include/show_binlog_events.inc; # must be events of the query
select count(*) from t1 /* must be 1 */;
@ -229,6 +291,8 @@ CREATE TABLE t3 (a int, PRIMARY KEY (a), b int unique);
#
# bug#23333 cleanup
#
drop trigger trg_del;
drop table t1,t2,t3,t4;
drop trigger trg_del_t2;
drop table t1,t2,t3,t4,t5;
drop function bug27417;

View File

@ -466,7 +466,7 @@ binary data';
select * from t2 order by f1;
select * from t3 order by f1;
select * from t4 order by f1;
select * from t31 order by f1;
select * from t31 order by f3;
connection master;

View File

@ -0,0 +1,33 @@
###################################################
#Author: Sven
#Date: 2007-10-09
#Purpose: Wait until the slave has an error in the
# sql thread, as indicated by
# "SHOW SLAVE STATUS", or at most 30
# seconds.
#Details:
# 1) Fill in and setup variables
# 2) loop, looking for sql error on slave
# 3) If it loops too long, die.
####################################################
connection slave;
let $row_number= 1;
let $run= 1;
let $counter= 300;
while ($run)
{
let $sql_result= query_get_value("SHOW SLAVE STATUS", Last_SQL_Errno, $row_number);
let $run= `SELECT '$sql_result' = '0'`;
if ($run) {
real_sleep 0.1;
if (!$counter){
--echo "Failed while waiting for slave to produce an error in its sql thread"
--replace_result $MASTER_MYPORT MASTER_PORT
--replace_column 1 # 7 # 8 # 9 # 22 # 23 # 33 #
query_vertical SHOW SLAVE STATUS;
exit;
}
dec $counter;
}
}

View File

@ -1086,6 +1086,19 @@ n d
1 30
2 20
drop table t1,t2;
drop table if exists t1, t2;
CREATE TABLE t1 (a int, PRIMARY KEY (a));
CREATE TABLE t2 (a int, PRIMARY KEY (a)) ENGINE=InnoDB;
create trigger trg_del_t2 after delete on t2 for each row
insert into t1 values (1);
insert into t1 values (1);
insert into t2 values (1),(2);
delete t2 from t2;
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
select count(*) from t2 /* must be 2 as restored after rollback caused by the error */;
count(*)
2
drop table t1, t2;
create table t1 (a int, b int) engine=innodb;
insert into t1 values(20,null);
select t2.b, ifnull(t2.b,"this is null") from t1 as t2 left join t1 as t3 on
@ -1751,10 +1764,10 @@ Variable_name Value
Innodb_page_size 16384
show status like "Innodb_rows_deleted";
Variable_name Value
Innodb_rows_deleted 70
Innodb_rows_deleted 71
show status like "Innodb_rows_inserted";
Variable_name Value
Innodb_rows_inserted 1083
Innodb_rows_inserted 1085
show status like "Innodb_rows_updated";
Variable_name Value
Innodb_rows_updated 886

View File

@ -614,6 +614,7 @@ CREATE TABLE `t2` (
`b` int(11) default NULL,
PRIMARY KEY (`a`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1 ;
set @sav_binlog_format= @@session.binlog_format;
set @@session.binlog_format= mixed;
insert into t1 values (1,1),(2,2);
insert into t2 values (1,1),(4,4);
@ -626,7 +627,7 @@ a b
4 4
show master status /* there must be the UPDATE query event */;
File Position Binlog_Do_DB Binlog_Ignore_DB
master-bin.000001 197
master-bin.000001 268
delete from t1;
delete from t2;
insert into t1 values (1,2),(3,4),(4,4);
@ -636,6 +637,24 @@ UPDATE t2,t1 SET t2.a=t2.b where t2.a=t1.a;
ERROR 23000: Duplicate entry '4' for key 'PRIMARY'
show master status /* there must be the UPDATE query event */;
File Position Binlog_Do_DB Binlog_Ignore_DB
master-bin.000001 212
master-bin.000001 283
drop table t1, t2;
set @@session.binlog_format= @sav_binlog_format;
drop table if exists t1, t2, t3;
CREATE TABLE t1 (a int, PRIMARY KEY (a));
CREATE TABLE t2 (a int, PRIMARY KEY (a));
CREATE TABLE t3 (a int, PRIMARY KEY (a)) ENGINE=MyISAM;
create trigger trg_del_t3 before delete on t3 for each row insert into t1 values (1);
insert into t2 values (1),(2);
insert into t3 values (1),(2);
reset master;
delete t3.* from t2,t3 where t2.a=t3.a;
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
select count(*) from t1 /* must be 1 */;
count(*)
1
select count(*) from t3 /* must be 1 */;
count(*)
1
drop table t1, t2, t3;
end of tests

View File

@ -520,7 +520,9 @@ count(*)
drop table t1,t2;
CREATE TABLE t1 (a int NOT NULL auto_increment primary key) ENGINE=MyISAM;
CREATE TABLE t2 (a int, PRIMARY KEY (a)) ENGINE=InnoDB;
CREATE TABLE t3 (a int, PRIMARY KEY (a), b int unique);
CREATE TABLE t3 (a int, PRIMARY KEY (a), b int unique) ENGINE=MyISAM;
CREATE TABLE t4 (a int, PRIMARY KEY (a), b int unique) ENGINE=Innodb;
CREATE TABLE t5 (a int, PRIMARY KEY (a)) ENGINE=InnoDB;
insert into t2 values (1);
reset master;
insert into t2 values (bug27417(1));
@ -559,6 +561,33 @@ master-bin.000001 # Query # # use `test`; update t3 set b=b+bug27417(1)
select count(*) from t1 /* must be 2 */;
count(*)
2
delete from t3;
delete from t4;
insert into t3 values (1,1);
insert into t4 values (1,1),(2,2);
reset master;
UPDATE t4,t3 SET t4.a=t3.a + bug27417(1) /* top level non-ta table */;
ERROR 23000: Duplicate entry '2' for key 'PRIMARY'
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Intvar # # INSERT_ID=6
master-bin.000001 # Query # # use `test`; UPDATE t4,t3 SET t4.a=t3.a + bug27417(1) /* top level non-ta table */
master-bin.000001 # Query # # use `test`; ROLLBACK
select count(*) from t1 /* must be 4 */;
count(*)
4
delete from t1;
delete from t3;
delete from t4;
insert into t3 values (1,1),(2,2);
insert into t4 values (1,1),(2,2);
reset master;
UPDATE t3,t4 SET t3.a=t4.a + bug27417(1);
ERROR 23000: Duplicate entry '2' for key 'PRIMARY'
select count(*) from t1 /* must be 1 */;
count(*)
1
drop table t4;
delete from t1;
delete from t2;
delete from t3;
@ -571,12 +600,30 @@ delete from t2;
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Intvar # # INSERT_ID=6
master-bin.000001 # Intvar # # INSERT_ID=9
master-bin.000001 # Query # # use `test`; delete from t2
master-bin.000001 # Query # # use `test`; ROLLBACK
select count(*) from t1 /* must be 1 */;
count(*)
1
drop trigger trg_del;
delete from t1;
delete from t2;
delete from t5;
create trigger trg_del_t2 after delete on t2 for each row
insert into t1 values (1);
insert into t2 values (2),(3);
insert into t5 values (1),(2);
reset master;
delete t2.* from t2,t5 where t2.a=t5.a + 1;
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # use `test`; delete t2.* from t2,t5 where t2.a=t5.a + 1
master-bin.000001 # Query # # use `test`; ROLLBACK
select count(*) from t1 /* must be 1 */;
count(*)
1
delete from t1;
create table t4 (a int default 0, b int primary key) engine=innodb;
insert into t4 values (0, 17);
@ -591,11 +638,11 @@ count(*)
2
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Intvar # # INSERT_ID=7
master-bin.000001 # Intvar # # INSERT_ID=10
master-bin.000001 # Begin_load_query # # ;file_id=1;block_len=12
master-bin.000001 # Intvar # # INSERT_ID=7
master-bin.000001 # Intvar # # INSERT_ID=10
master-bin.000001 # Execute_load_query # # use `test`; load data infile '../std_data_ln/rpl_loaddata.dat' into table t4 (a, @b) set b= @b + bug27417(2) ;file_id=1
master-bin.000001 # Query # # use `test`; ROLLBACK
drop trigger trg_del;
drop table t1,t2,t3,t4;
drop trigger trg_del_t2;
drop table t1,t2,t3,t4,t5;
drop function bug27417;

View File

@ -494,7 +494,9 @@ count(*)
drop table t1,t2;
CREATE TABLE t1 (a int NOT NULL auto_increment primary key) ENGINE=MyISAM;
CREATE TABLE t2 (a int, PRIMARY KEY (a)) ENGINE=InnoDB;
CREATE TABLE t3 (a int, PRIMARY KEY (a), b int unique);
CREATE TABLE t3 (a int, PRIMARY KEY (a), b int unique) ENGINE=MyISAM;
CREATE TABLE t4 (a int, PRIMARY KEY (a), b int unique) ENGINE=Innodb;
CREATE TABLE t5 (a int, PRIMARY KEY (a)) ENGINE=InnoDB;
insert into t2 values (1);
reset master;
insert into t2 values (bug27417(1));
@ -533,6 +535,33 @@ master-bin.000001 # Query # # use `test`; update t3 set b=b+bug27417(1)
select count(*) from t1 /* must be 2 */;
count(*)
2
delete from t3;
delete from t4;
insert into t3 values (1,1);
insert into t4 values (1,1),(2,2);
reset master;
UPDATE t4,t3 SET t4.a=t3.a + bug27417(1) /* top level non-ta table */;
ERROR 23000: Duplicate entry '2' for key 'PRIMARY'
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Intvar # # INSERT_ID=6
master-bin.000001 # Query # # use `test`; UPDATE t4,t3 SET t4.a=t3.a + bug27417(1) /* top level non-ta table */
master-bin.000001 # Query # # use `test`; ROLLBACK
select count(*) from t1 /* must be 4 */;
count(*)
4
delete from t1;
delete from t3;
delete from t4;
insert into t3 values (1,1),(2,2);
insert into t4 values (1,1),(2,2);
reset master;
UPDATE t3,t4 SET t3.a=t4.a + bug27417(1);
ERROR 23000: Duplicate entry '2' for key 'PRIMARY'
select count(*) from t1 /* must be 1 */;
count(*)
1
drop table t4;
delete from t1;
delete from t2;
delete from t3;
@ -545,12 +574,30 @@ delete from t2;
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Intvar # # INSERT_ID=6
master-bin.000001 # Intvar # # INSERT_ID=9
master-bin.000001 # Query # # use `test`; delete from t2
master-bin.000001 # Query # # use `test`; ROLLBACK
select count(*) from t1 /* must be 1 */;
count(*)
1
drop trigger trg_del;
delete from t1;
delete from t2;
delete from t5;
create trigger trg_del_t2 after delete on t2 for each row
insert into t1 values (1);
insert into t2 values (2),(3);
insert into t5 values (1),(2);
reset master;
delete t2.* from t2,t5 where t2.a=t5.a + 1;
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # use `test`; delete t2.* from t2,t5 where t2.a=t5.a + 1
master-bin.000001 # Query # # use `test`; ROLLBACK
select count(*) from t1 /* must be 1 */;
count(*)
1
delete from t1;
create table t4 (a int default 0, b int primary key) engine=innodb;
insert into t4 values (0, 17);
@ -565,13 +612,13 @@ count(*)
2
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Intvar # # INSERT_ID=7
master-bin.000001 # Intvar # # INSERT_ID=10
master-bin.000001 # Begin_load_query # # ;file_id=1;block_len=12
master-bin.000001 # Intvar # # INSERT_ID=7
master-bin.000001 # Intvar # # INSERT_ID=10
master-bin.000001 # Execute_load_query # # use `test`; load data infile '../std_data_ln/rpl_loaddata.dat' into table t4 (a, @b) set b= @b + bug27417(2) ;file_id=1
master-bin.000001 # Query # # use `test`; ROLLBACK
drop trigger trg_del;
drop table t1,t2,t3,t4;
drop trigger trg_del_t2;
drop table t1,t2,t3,t4,t5;
drop function bug27417;
set @@session.binlog_format=@@global.binlog_format;
end of tests

View File

@ -0,0 +1,136 @@
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
show slave status /* Second_behind reports 0 */;;
Slave_IO_State #
Master_Host 127.0.0.1
Master_User root
Master_Port 9306
Connect_Retry 1
Master_Log_File master-bin.000001
Read_Master_Log_Pos 106
Relay_Log_File #
Relay_Log_Pos #
Relay_Master_Log_File master-bin.000001
Slave_IO_Running Yes
Slave_SQL_Running Yes
Replicate_Do_DB
Replicate_Ignore_DB
Replicate_Do_Table
Replicate_Ignore_Table
Replicate_Wild_Do_Table
Replicate_Wild_Ignore_Table
Last_Errno 0
Last_Error
Skip_Counter 0
Exec_Master_Log_Pos 106
Relay_Log_Space #
Until_Condition None
Until_Log_File
Until_Log_Pos 0
Master_SSL_Allowed No
Master_SSL_CA_File
Master_SSL_CA_Path
Master_SSL_Cert
Master_SSL_Cipher
Master_SSL_Key
Seconds_Behind_Master 0
Master_SSL_Verify_Server_Cert No
Last_IO_Errno 0
Last_IO_Error
Last_SQL_Errno 0
Last_SQL_Error
drop table if exists t1;
Warnings:
Note 1051 Unknown table 't1'
create table t1 (f1 int);
flush logs /* contaminate rli->last_master_timestamp */;
lock table t1 write;
insert into t1 values (1);
show slave status /* bug emulated: reports slave threads starting time about 3*3 not 3 secs */;;
Slave_IO_State #
Master_Host 127.0.0.1
Master_User root
Master_Port 9306
Connect_Retry 1
Master_Log_File master-bin.000001
Read_Master_Log_Pos 367
Relay_Log_File #
Relay_Log_Pos #
Relay_Master_Log_File master-bin.000001
Slave_IO_Running Yes
Slave_SQL_Running Yes
Replicate_Do_DB
Replicate_Ignore_DB
Replicate_Do_Table
Replicate_Ignore_Table
Replicate_Wild_Do_Table
Replicate_Wild_Ignore_Table
Last_Errno 0
Last_Error
Skip_Counter 0
Exec_Master_Log_Pos 279
Relay_Log_Space #
Until_Condition None
Until_Log_File
Until_Log_Pos 0
Master_SSL_Allowed No
Master_SSL_CA_File
Master_SSL_CA_Path
Master_SSL_Cert
Master_SSL_Cipher
Master_SSL_Key
Seconds_Behind_Master 9
Master_SSL_Verify_Server_Cert No
Last_IO_Errno 0
Last_IO_Error
Last_SQL_Errno 0
Last_SQL_Error
unlock tables;
flush logs /* this time rli->last_master_timestamp is not affected */;
lock table t1 write;
insert into t1 values (2);
show slave status /* reports the correct diff with master query time about 3+3 secs */;;
Slave_IO_State #
Master_Host 127.0.0.1
Master_User root
Master_Port 9306
Connect_Retry 1
Master_Log_File master-bin.000001
Read_Master_Log_Pos 455
Relay_Log_File #
Relay_Log_Pos #
Relay_Master_Log_File master-bin.000001
Slave_IO_Running Yes
Slave_SQL_Running Yes
Replicate_Do_DB
Replicate_Ignore_DB
Replicate_Do_Table
Replicate_Ignore_Table
Replicate_Wild_Do_Table
Replicate_Wild_Ignore_Table
Last_Errno 0
Last_Error
Skip_Counter 0
Exec_Master_Log_Pos 367
Relay_Log_Space #
Until_Condition None
Until_Log_File
Until_Log_Pos 0
Master_SSL_Allowed No
Master_SSL_CA_File
Master_SSL_CA_Path
Master_SSL_Cert
Master_SSL_Cipher
Master_SSL_Key
Seconds_Behind_Master 7
Master_SSL_Verify_Server_Cert No
Last_IO_Errno 0
Last_IO_Error
Last_SQL_Errno 0
Last_SQL_Error
unlock tables;
drop table t1;

View File

@ -0,0 +1 @@
--loose-debug=d,let_first_flush_log_change_timestamp

View File

@ -0,0 +1,71 @@
#
# Testing replication delay reporting (bug#29309)
# there is an unavoidable non-determinism in the test
# please compare the results with the comments
#
source include/master-slave.inc;
connection master;
#connection slave;
sync_slave_with_master;
--replace_result $DEFAULT_MASTER_PORT DEFAULT_MASTER_PORT
--replace_column 1 # 8 # 9 # 23 #
--query_vertical show slave status /* Second_behind reports 0 */;
sleep 3;
### bug emulation
connection master;
drop table if exists t1;
create table t1 (f1 int);
sleep 3;
#connection slave;
sync_slave_with_master;
flush logs /* contaminate rli->last_master_timestamp */;
connection slave;
lock table t1 write;
connection master;
insert into t1 values (1);
sleep 3;
connection slave;
--replace_result $DEFAULT_MASTER_PORT DEFAULT_MASTER_PORT
--replace_column 1 # 8 # 9 # 23 #
--query_vertical show slave status /* bug emulated: reports slave threads starting time about 3*3 not 3 secs */;
unlock tables;
connection master;
sync_slave_with_master;
### bugfix
connection slave;
flush logs /* this time rli->last_master_timestamp is not affected */;
lock table t1 write;
connection master;
insert into t1 values (2);
sleep 3;
connection slave;
--replace_result $DEFAULT_MASTER_PORT DEFAULT_MASTER_PORT
--replace_column 1 # 8 # 9 # 23 #
--query_vertical show slave status /* reports the correct diff with master query time about 3+3 secs */;
unlock tables;
connection master;
drop table t1;
#connection slave;
sync_slave_with_master;
# End of tests

View File

@ -13,4 +13,3 @@
rpl_ddl : BUG#26418 2007-03-01 mleich Slave out of sync after CREATE/DROP TEMPORARY TABLE + ROLLBACK on master
rpl_auto_increment_11932 : Bug#29809 2007-07-16 ingo Slave SQL errors in warnings file
rpl_stm_extraColmaster_ndb : WL#3915 : Statement-based replication not supported in ndb. Enable test when supported.
rpl_row_extraColmaster_ndb : BUG#29549 : Replication of BLOBs fail for NDB

View File

@ -344,5 +344,6 @@ FLUSH LOGS;
--exec rm $MYSQLTEST_VARDIR/tmp/local.sql
DROP TABLE IF EXISTS t1, t2, t3, t04, t05, t4, t5;
sync_slave_with_master;
# End of 4.1 tests

View File

@ -28,7 +28,7 @@ insert into t1 values(NULL,'new');
save_master_pos;
connection slave;
# wait until the slave tries to run the query, fails and aborts slave thread
wait_for_slave_to_stop;
source include/wait_for_slave_sql_error.inc;
select * from t1 order by n;
delete from t1 where n = 2;
--disable_warnings

View File

@ -15,8 +15,6 @@ rpl_ndb_2innodb : BUG#19227 2006-04-20 pekka pk delete apparently not r
rpl_ndb_2myisam : BUG#19227 Seems to pass currently
rpl_ndb_2other : BUG#21842 2007-08-30 tsmith test has never worked on bigendian (sol10-sparc-a, powermacg5
rpl_ndb_dd_partitions : BUG#19259 2006-04-21 rpl_ndb_dd_partitions fails on s/AMD
rpl_ndb_innodb2ndb : Bug#29549 rpl_ndb_myisam2ndb,rpl_ndb_innodb2ndb failed on Solaris for pack_length issue
rpl_ndb_myisam2ndb : Bug#29549 rpl_ndb_myisam2ndb,rpl_ndb_innodb2ndb failed on Solaris for pack_length issue
rpl_ndb_ddl : BUG#28798 2007-05-31 lars Valgrind failure in NDB
rpl_ndb_mix_innodb : BUG#28123 rpl_ndb_mix_innodb.test casue slave to core on sol10-sparc-a
rpl_ndb_ctype_ucs2_def : BUG#27404 util thd mysql_parse sig11 when mysqld default multibyte charset

View File

@ -753,6 +753,38 @@ select * from t1;
select * from t2;
drop table t1,t2;
#
# Bug #29136 erred multi-delete on trans table does not rollback
#
# prepare
--disable_warnings
drop table if exists t1, t2;
--enable_warnings
CREATE TABLE t1 (a int, PRIMARY KEY (a));
CREATE TABLE t2 (a int, PRIMARY KEY (a)) ENGINE=InnoDB;
create trigger trg_del_t2 after delete on t2 for each row
insert into t1 values (1);
insert into t1 values (1);
insert into t2 values (1),(2);
# exec cases A, B - see multi_update.test
# A. send_error() w/o send_eof() branch
--error ER_DUP_ENTRY
delete t2 from t2;
# check
select count(*) from t2 /* must be 2 as restored after rollback caused by the error */;
# cleanup bug#29136
drop table t1, t2;
#
# Testing of IFNULL
#

View File

@ -588,6 +588,7 @@ CREATE TABLE `t2` (
) ENGINE=MyISAM DEFAULT CHARSET=latin1 ;
# as the test is about to see erroed queries in binlog
set @sav_binlog_format= @@session.binlog_format;
set @@session.binlog_format= mixed;
@ -614,5 +615,42 @@ show master status /* there must be the UPDATE query event */;
# cleanup bug#27716
drop table t1, t2;
set @@session.binlog_format= @sav_binlog_format;
#
# Bug #29136 erred multi-delete on trans table does not rollback
#
# prepare
--disable_warnings
drop table if exists t1, t2, t3;
--enable_warnings
CREATE TABLE t1 (a int, PRIMARY KEY (a));
CREATE TABLE t2 (a int, PRIMARY KEY (a));
CREATE TABLE t3 (a int, PRIMARY KEY (a)) ENGINE=MyISAM;
create trigger trg_del_t3 before delete on t3 for each row insert into t1 values (1);
insert into t2 values (1),(2);
insert into t3 values (1),(2);
reset master;
# exec cases B, A - see innodb.test
# B. send_eof() and send_error() afterward
--error ER_DUP_ENTRY
delete t3.* from t2,t3 where t2.a=t3.a;
# check
select count(*) from t1 /* must be 1 */;
select count(*) from t3 /* must be 1 */;
# cleanup bug#29136
drop table t1, t2, t3;
#
# Add further tests from here
#
--echo end of tests

View File

@ -3,7 +3,8 @@
# Simple test for the partition storage engine
# Taken fromm the select test
#
-- source include/have_partition.inc
source include/have_partition.inc;
source include/have_archive.inc;
--disable_warnings
drop table if exists t1;

View File

@ -1393,21 +1393,86 @@ int Field::store(const char *to, uint length, CHARSET_INFO *cs,
}
/**
Pack the field into a format suitable for storage and transfer.
To implement packing functionality, only the virtual function
should be overridden. The other functions are just convenience
functions and hence should not be overridden.
The value of <code>low_byte_first</code> is dependent on how the
packed data is going to be used: for local use, e.g., temporary
store on disk or in memory, use the native format since that is
faster. For data that is going to be transfered to other machines
(e.g., when writing data to the binary log), data should always be
stored in little-endian format.
@note The default method for packing fields just copy the raw bytes
of the record into the destination, but never more than
<code>max_length</code> characters.
@param to
Pointer to memory area where representation of field should be put.
@param from
Pointer to memory area where record representation of field is
stored.
@param max_length
Maximum length of the field, as given in the column definition. For
example, for <code>CHAR(1000)</code>, the <code>max_length</code>
is 1000. This information is sometimes needed to decide how to pack
the data.
@param low_byte_first
@c TRUE if integers should be stored little-endian, @c FALSE if
native format should be used. Note that for little-endian machines,
the value of this flag is a moot point since the native format is
little-endian.
*/
uchar *
Field::pack(uchar *to, const uchar *from, uint max_length,
bool low_byte_first __attribute__((unused)))
{
uint32 length= pack_length();
set_if_smaller(length, max_length);
memcpy(to, from, length);
return to+length;
}
/**
Unpack a field from row data.
This method is used to unpack a field from a master whose size
of the field is less than that of the slave.
This method is used to unpack a field from a master whose size of
the field is less than that of the slave.
The <code>param_data</code> parameter is a two-byte integer (stored
in the least significant 16 bits of the unsigned integer) usually
consisting of two parts: the real type in the most significant byte
and a original pack length in the least significant byte.
The exact layout of the <code>param_data</code> field is given by
the <code>Table_map_log_event::save_field_metadata()</code>.
This is the default method for unpacking a field. It just copies
the memory block in byte order (of original pack length bytes or
length of field, whichever is smaller).
@param to Destination of the data
@param from Source of the data
@param param_data Pack length of the field data
@param param_data Real type and original pack length of the field
data
@param low_byte_first
If this flag is @c true, all composite entities (e.g., lengths)
should be unpacked in little-endian format; otherwise, the entities
are unpacked in native order.
@return New pointer into memory based on from + length of the data
*/
const uchar *Field::unpack(uchar* to,
const uchar *from,
uint param_data)
const uchar *
Field::unpack(uchar* to, const uchar *from, uint param_data,
bool low_byte_first __attribute__((unused)))
{
uint length=pack_length();
int from_type= 0;
@ -1420,19 +1485,18 @@ const uchar *Field::unpack(uchar* to,
from_type= (param_data & 0xff00) >> 8U; // real_type.
param_data= param_data & 0x00ff; // length.
}
if ((param_data == 0) ||
(length == param_data) ||
(from_type != real_type()))
{
memcpy(to, from, length);
return from+length;
}
uint len= (param_data && (param_data < length)) ?
param_data : length;
/*
If the length is the same, use old unpack method.
If the param_data is 0, use the old unpack method.
This is possible if the table map was generated from a down-level
master or if the data was not available on the master.
If the real_types are not the same, use the old unpack method.
*/
if ((length == param_data) ||
(param_data == 0) ||
(from_type != real_type()))
return(unpack(to, from));
memcpy(to, from, param_data > length ? length : len);
return from+len;
}
@ -2805,10 +2869,15 @@ uint Field_new_decimal::is_equal(Create_field *new_field)
@return New pointer into memory based on from + length of the data
*/
const uchar *Field_new_decimal::unpack(uchar* to,
const uchar *from,
uint param_data)
const uchar *
Field_new_decimal::unpack(uchar* to,
const uchar *from,
uint param_data,
bool low_byte_first)
{
if (param_data == 0)
return Field::unpack(to, from, param_data, low_byte_first);
uint from_precision= (param_data & 0xff00) >> 8U;
uint from_decimal= param_data & 0x00ff;
uint length=pack_length();
@ -3950,6 +4019,49 @@ void Field_longlong::sql_type(String &res) const
}
/*
Floating-point numbers
*/
uchar *
Field_real::pack(uchar *to, const uchar *from,
uint max_length, bool low_byte_first)
{
DBUG_ENTER("Field_real::pack");
DBUG_ASSERT(max_length >= pack_length());
DBUG_PRINT("debug", ("pack_length(): %u", pack_length()));
#ifdef WORDS_BIGENDIAN
if (low_byte_first != table->s->db_low_byte_first)
{
const uchar *dptr= from + pack_length();
while (dptr-- > from)
*to++ = *dptr;
DBUG_RETURN(to);
}
else
#endif
DBUG_RETURN(Field::pack(to, from, max_length, low_byte_first));
}
const uchar *
Field_real::unpack(uchar *to, const uchar *from,
uint param_data, bool low_byte_first)
{
DBUG_ENTER("Field_real::unpack");
DBUG_PRINT("debug", ("pack_length(): %u", pack_length()));
#ifdef WORDS_BIGENDIAN
if (low_byte_first != table->s->db_low_byte_first)
{
const uchar *dptr= from + pack_length();
while (dptr-- > from)
*to++ = *dptr;
DBUG_RETURN(from + pack_length());
}
else
#endif
DBUG_RETURN(Field::unpack(to, from, param_data, low_byte_first));
}
/****************************************************************************
single precision float
****************************************************************************/
@ -6337,6 +6449,11 @@ int Field_longstr::store_decimal(const my_decimal *d)
return store(str.ptr(), str.length(), str.charset());
}
uint32 Field_longstr::max_data_length() const
{
return field_length + (field_length > 255 ? 2 : 1);
}
double Field_string::val_real(void)
{
@ -6481,7 +6598,9 @@ void Field_string::sql_type(String &res) const
}
uchar *Field_string::pack(uchar *to, const uchar *from, uint max_length)
uchar *Field_string::pack(uchar *to, const uchar *from,
uint max_length,
bool low_byte_first __attribute__((unused)))
{
uint length= min(field_length,max_length);
uint local_char_length= max_length/field_charset->mbmaxlen;
@ -6489,11 +6608,15 @@ uchar *Field_string::pack(uchar *to, const uchar *from, uint max_length)
local_char_length= my_charpos(field_charset, from, from+length,
local_char_length);
set_if_smaller(length, local_char_length);
while (length && from[length-1] == ' ')
while (length && from[length-1] == field_charset->pad_char)
length--;
// Length always stored little-endian
*to++= (uchar) length;
if (field_length > 255)
*to++= (uchar) (length >> 8);
// Store the actual bytes of the string
memcpy(to, from, length);
return to+length;
}
@ -6515,34 +6638,27 @@ uchar *Field_string::pack(uchar *to, const uchar *from, uint max_length)
@return New pointer into memory based on from + length of the data
*/
const uchar *Field_string::unpack(uchar *to,
const uchar *from,
uint param_data)
{
uint from_len= param_data & 0x00ff; // length.
uint length= 0;
uint f_length;
f_length= (from_len < field_length) ? from_len : field_length;
DBUG_ASSERT(f_length <= 255);
length= (uint) *from++;
bitmap_set_bit(table->write_set,field_index);
store((const char *)from, length, system_charset_info);
return from+length;
}
const uchar *Field_string::unpack(uchar *to, const uchar *from)
const uchar *
Field_string::unpack(uchar *to,
const uchar *from,
uint param_data,
bool low_byte_first __attribute__((unused)))
{
uint from_length=
param_data ? min(param_data & 0x00ff, field_length) : field_length;
uint length;
if (field_length > 255)
if (from_length > 255)
{
length= uint2korr(from);
from+= 2;
}
else
length= (uint) *from++;
memcpy(to, from, (int) length);
bfill(to+length, field_length - length, ' ');
memcpy(to, from, length);
// Pad the string with the pad character of the fields charset
bfill(to + length, field_length - length, field_charset->pad_char);
return from+length;
}
@ -6960,22 +7076,30 @@ uint32 Field_varstring::data_length()
Here the number of length bytes are depending on the given max_length
*/
uchar *Field_varstring::pack(uchar *to, const uchar *from, uint max_length)
uchar *Field_varstring::pack(uchar *to, const uchar *from,
uint max_length,
bool low_byte_first __attribute__((unused)))
{
uint length= length_bytes == 1 ? (uint) *from : uint2korr(from);
set_if_smaller(max_length, field_length);
if (length > max_length)
length=max_length;
*to++= (char) (length & 255);
/* Length always stored little-endian */
*to++= length & 0xFF;
if (max_length > 255)
*to++= (char) (length >> 8);
if (length)
*to++= (length >> 8) & 0xFF;
/* Store bytes of string */
if (length > 0)
memcpy(to, from+length_bytes, length);
return to+length;
}
uchar *Field_varstring::pack_key(uchar *to, const uchar *key, uint max_length)
uchar *
Field_varstring::pack_key(uchar *to, const uchar *key, uint max_length,
bool low_byte_first __attribute__((unused)))
{
uint length= length_bytes == 1 ? (uint) *key : uint2korr(key);
uint local_char_length= ((field_charset->mbmaxlen > 1) ?
@ -7014,8 +7138,9 @@ uchar *Field_varstring::pack_key(uchar *to, const uchar *key, uint max_length)
Pointer to end of 'key' (To the next key part if multi-segment key)
*/
const uchar *Field_varstring::unpack_key(uchar *to, const uchar *key,
uint max_length)
const uchar *
Field_varstring::unpack_key(uchar *to, const uchar *key, uint max_length,
bool low_byte_first __attribute__((unused)))
{
/* get length of the blob key */
uint32 length= *key++;
@ -7044,8 +7169,9 @@ const uchar *Field_varstring::unpack_key(uchar *to, const uchar *key,
end of key storage
*/
uchar *Field_varstring::pack_key_from_key_image(uchar *to, const uchar *from,
uint max_length)
uchar *
Field_varstring::pack_key_from_key_image(uchar *to, const uchar *from, uint max_length,
bool low_byte_first __attribute__((unused)))
{
/* Key length is always stored as 2 bytes */
uint length= uint2korr(from);
@ -7065,6 +7191,9 @@ uchar *Field_varstring::pack_key_from_key_image(uchar *to, const uchar *from,
This method is used to unpack a varstring field from a master
whose size of the field is less than that of the slave.
@note
The string length is always packed little-endian.
@param to Destination of the data
@param from Source of the data
@ -7072,9 +7201,10 @@ uchar *Field_varstring::pack_key_from_key_image(uchar *to, const uchar *from,
@return New pointer into memory based on from + length of the data
*/
const uchar *Field_varstring::unpack(uchar *to,
const uchar *from,
uint param_data)
const uchar *
Field_varstring::unpack(uchar *to, const uchar *from,
uint param_data,
bool low_byte_first __attribute__((unused)))
{
uint length;
uint l_bytes= (param_data && (param_data < field_length)) ?
@ -7086,28 +7216,7 @@ const uchar *Field_varstring::unpack(uchar *to,
if (length_bytes == 2)
to[1]= 0;
}
else
{
length= uint2korr(from);
to[0]= *from++;
to[1]= *from++;
}
if (length)
memcpy(to+ length_bytes, from, length);
return from+length;
}
/*
unpack field packed with Field_varstring::pack()
*/
const uchar *Field_varstring::unpack(uchar *to, const uchar *from)
{
uint length;
if (length_bytes == 1)
length= (uint) (*to= *from++);
else
else /* l_bytes == 2 */
{
length= uint2korr(from);
to[0]= *from++;
@ -7356,9 +7465,9 @@ void Field_blob::store_length(uchar *i_ptr,
}
uint32 Field_blob::get_length(const uchar *pos, bool low_byte_first)
uint32 Field_blob::get_length(const uchar *pos, uint packlength_arg, bool low_byte_first)
{
switch (packlength) {
switch (packlength_arg) {
case 1:
return (uint32) pos[0];
case 2:
@ -7789,26 +7898,37 @@ void Field_blob::sql_type(String &res) const
}
}
uchar *Field_blob::pack(uchar *to, const uchar *from, uint max_length)
uchar *Field_blob::pack(uchar *to, const uchar *from,
uint max_length, bool low_byte_first)
{
DBUG_ENTER("Field_blob::pack");
DBUG_PRINT("enter", ("to: 0x%lx; from: 0x%lx;"
" max_length: %u; low_byte_first: %d",
(ulong) to, (ulong) from,
max_length, low_byte_first));
DBUG_DUMP("record", from, table->s->reclength);
uchar *save= ptr;
ptr= (uchar*) from;
uint32 length=get_length(); // Length of from string
if (length > max_length)
{
length=max_length;
store_length(to,packlength,length,TRUE);
}
else
memcpy(to,from,packlength); // Copy length
if (length)
/*
Store max length, which will occupy packlength bytes. If the max
length given is smaller than the actual length of the blob, we
just store the initial bytes of the blob.
*/
store_length(to, packlength, min(length, max_length), low_byte_first);
/*
Store the actual blob data, which will occupy 'length' bytes.
*/
if (length > 0)
{
get_ptr((uchar**) &from);
memcpy(to+packlength, from,length);
}
ptr=save; // Restore org row pointer
return to+packlength+length;
DBUG_DUMP("packed", to, packlength + length);
DBUG_RETURN(to+packlength+length);
}
@ -7823,28 +7943,30 @@ uchar *Field_blob::pack(uchar *to, const uchar *from, uint max_length)
@param to Destination of the data
@param from Source of the data
@param param_data not used
@param param_data @c TRUE if base types should be stored in little-
endian format, @c FALSE if native format should
be used.
@return New pointer into memory based on from + length of the data
*/
const uchar *Field_blob::unpack(uchar *to,
const uchar *from,
uint param_data)
uint param_data,
bool low_byte_first)
{
return unpack(to, from);
}
const uchar *Field_blob::unpack(uchar *to, const uchar *from)
{
uint32 length=get_length(from);
memcpy(to,from,packlength);
from+=packlength;
if (length)
memcpy_fixed(to+packlength, &from, sizeof(from));
else
bzero(to+packlength,sizeof(from));
return from+length;
DBUG_ENTER("Field_blob::unpack");
DBUG_PRINT("enter", ("to: 0x%lx; from: 0x%lx;"
" param_data: %u; low_byte_first: %d",
(ulong) to, (ulong) from, param_data, low_byte_first));
uint const master_packlength=
param_data > 0 ? param_data & 0xFF : packlength;
uint32 const length= get_length(from, master_packlength, low_byte_first);
DBUG_DUMP("packed", from, length + master_packlength);
bitmap_set_bit(table->write_set, field_index);
store(reinterpret_cast<const char*>(from) + master_packlength,
length, field_charset);
DBUG_DUMP("record", to, table->s->reclength);
DBUG_RETURN(from + master_packlength + length);
}
/* Keys for blobs are like keys on varchars */
@ -7894,7 +8016,9 @@ int Field_blob::pack_cmp(const uchar *b, uint key_length_arg,
/* Create a packed key that will be used for storage from a MySQL row */
uchar *Field_blob::pack_key(uchar *to, const uchar *from, uint max_length)
uchar *
Field_blob::pack_key(uchar *to, const uchar *from, uint max_length,
bool low_byte_first __attribute__((unused)))
{
uchar *save= ptr;
ptr= (uchar*) from;
@ -7939,8 +8063,9 @@ uchar *Field_blob::pack_key(uchar *to, const uchar *from, uint max_length)
Pointer into 'from' past the last byte copied from packed key.
*/
const uchar *Field_blob::unpack_key(uchar *to, const uchar *from,
uint max_length)
const uchar *
Field_blob::unpack_key(uchar *to, const uchar *from, uint max_length,
bool low_byte_first __attribute__((unused)))
{
/* get length of the blob key */
uint32 length= *from++;
@ -7963,8 +8088,9 @@ const uchar *Field_blob::unpack_key(uchar *to, const uchar *from,
/* Create a packed key that will be used for storage from a MySQL key */
uchar *Field_blob::pack_key_from_key_image(uchar *to, const uchar *from,
uint max_length)
uchar *
Field_blob::pack_key_from_key_image(uchar *to, const uchar *from, uint max_length,
bool low_byte_first __attribute__((unused)))
{
uint length=uint2korr(from);
if (length > max_length)
@ -8941,9 +9067,11 @@ void Field_bit::sql_type(String &res) const
}
uchar *Field_bit::pack(uchar *to, const uchar *from, uint max_length)
uchar *
Field_bit::pack(uchar *to, const uchar *from, uint max_length,
bool low_byte_first __attribute__((unused)))
{
DBUG_ASSERT(max_length);
DBUG_ASSERT(max_length > 0);
uint length;
if (bit_len > 0)
{
@ -8978,28 +9106,44 @@ uchar *Field_bit::pack(uchar *to, const uchar *from, uint max_length)
/**
Unpack a bit field from row data.
This method is used to unpack a bit field from a master whose size
This method is used to unpack a bit field from a master whose size
of the field is less than that of the slave.
@param to Destination of the data
@param from Source of the data
@param param_data Bit length (upper) and length (lower) values
@return New pointer into memory based on from + length of the data
*/
const uchar *Field_bit::unpack(uchar *to,
const uchar *from,
uint param_data)
const uchar *
Field_bit::unpack(uchar *to, const uchar *from, uint param_data,
bool low_byte_first __attribute__((unused)))
{
uint const from_len= (param_data >> 8U) & 0x00ff;
uint const from_bit_len= param_data & 0x00ff;
/*
If the master and slave have the same sizes, then use the old
unpack() method.
If the parameter data is zero (i.e., undefined), or if the master
and slave have the same sizes, then use the old unpack() method.
*/
if ((from_bit_len == bit_len) &&
(from_len == bytes_in_rec))
return(unpack(to, from));
if (param_data == 0 ||
(from_bit_len == bit_len) && (from_len == bytes_in_rec))
{
if (bit_len > 0)
{
/*
set_rec_bits is a macro, don't put the post-increment in the
argument since that might cause strange side-effects.
For the choice of the second argument, see the explanation for
Field_bit::pack().
*/
set_rec_bits(*from, bit_ptr + (to - ptr), bit_ofs, bit_len);
from++;
}
memcpy(to, from, bytes_in_rec);
return from + bytes_in_rec;
}
/*
We are converting a smaller bit field to a larger one here.
To do that, we first need to construct a raw value for the original
@ -9027,25 +9171,6 @@ const uchar *Field_bit::unpack(uchar *to,
}
const uchar *Field_bit::unpack(uchar *to, const uchar *from)
{
if (bit_len > 0)
{
/*
set_rec_bits is a macro, don't put the post-increment in the
argument since that might cause strange side-effects.
For the choice of the second argument, see the explanation for
Field_bit::pack().
*/
set_rec_bits(*from, bit_ptr + (to - ptr), bit_ofs, bit_len);
from++;
}
memcpy(to, from, bytes_in_rec);
return from + bytes_in_rec;
}
void Field_bit::set_default()
{
if (bit_len > 0)

View File

@ -175,6 +175,17 @@ public:
*/
virtual uint32 data_length() { return pack_length(); }
virtual uint32 sort_length() const { return pack_length(); }
/**
Get the maximum size of the data in packed format.
@return Maximum data length of the field when packed using the
Field::pack() function.
*/
virtual uint32 max_data_length() const {
return pack_length();
};
virtual int reset(void) { bzero(ptr,pack_length()); return 0; }
virtual void reset_fields() {}
virtual void set_default()
@ -357,32 +368,45 @@ public:
return str;
}
virtual bool send_binary(Protocol *protocol);
virtual uchar *pack(uchar *to, const uchar *from, uint max_length=~(uint) 0)
virtual uchar *pack(uchar *to, const uchar *from,
uint max_length, bool low_byte_first);
/**
@overload Field::pack(uchar*, const uchar*, uint, bool)
*/
uchar *pack(uchar *to, const uchar *from)
{
uint32 length=pack_length();
memcpy(to,from,length);
return to+length;
DBUG_ENTER("Field::pack");
uchar *result= this->pack(to, from, UINT_MAX, table->s->db_low_byte_first);
DBUG_RETURN(result);
}
virtual const uchar *unpack(uchar* to, const uchar *from, uint param_data);
virtual const uchar *unpack(uchar* to, const uchar *from)
virtual const uchar *unpack(uchar* to, const uchar *from,
uint param_data, bool low_byte_first);
/**
@overload Field::unpack(uchar*, const uchar*, uint, bool)
*/
const uchar *unpack(uchar* to, const uchar *from)
{
uint length=pack_length();
memcpy(to,from,length);
return from+length;
DBUG_ENTER("Field::unpack");
const uchar *result= unpack(to, from, 0U, table->s->db_low_byte_first);
DBUG_RETURN(result);
}
virtual uchar *pack_key(uchar* to, const uchar *from, uint max_length)
virtual uchar *pack_key(uchar* to, const uchar *from,
uint max_length, bool low_byte_first)
{
return pack(to,from,max_length);
return pack(to, from, max_length, low_byte_first);
}
virtual uchar *pack_key_from_key_image(uchar* to, const uchar *from,
uint max_length)
uint max_length, bool low_byte_first)
{
return pack(to,from,max_length);
return pack(to, from, max_length, low_byte_first);
}
virtual const uchar *unpack_key(uchar* to, const uchar *from,
uint max_length)
uint max_length, bool low_byte_first)
{
return unpack(to,from);
return unpack(to, from, max_length, low_byte_first);
}
virtual uint packed_col_length(const uchar *to, uint length)
{ return length;}
@ -567,6 +591,7 @@ public:
{}
int store_decimal(const my_decimal *d);
uint32 max_data_length() const;
};
/* base class for float and double and decimal (old one) */
@ -587,6 +612,10 @@ public:
int truncate(double *nr, double max_length);
uint32 max_display_length() { return field_length; }
uint size_of() const { return sizeof(*this); }
virtual const uchar *unpack(uchar* to, const uchar *from,
uint param_data, bool low_byte_first);
virtual uchar *pack(uchar* to, const uchar *from,
uint max_length, bool low_byte_first);
};
@ -615,6 +644,16 @@ public:
void overflow(bool negative);
bool zero_pack() const { return 0; }
void sql_type(String &str) const;
virtual const uchar *unpack(uchar* to, const uchar *from,
uint param_data, bool low_byte_first)
{
return Field::unpack(to, from, param_data, low_byte_first);
}
virtual uchar *pack(uchar* to, const uchar *from,
uint max_length, bool low_byte_first)
{
return Field::pack(to, from, max_length, low_byte_first);
}
};
@ -665,7 +704,8 @@ public:
uint row_pack_length() { return pack_length(); }
int compatible_field_size(uint field_metadata);
uint is_equal(Create_field *new_field);
virtual const uchar *unpack(uchar* to, const uchar *from, uint param_data);
virtual const uchar *unpack(uchar* to, const uchar *from,
uint param_data, bool low_byte_first);
};
@ -696,6 +736,20 @@ public:
uint32 pack_length() const { return 1; }
void sql_type(String &str) const;
uint32 max_display_length() { return 4; }
virtual uchar *pack(uchar* to, const uchar *from,
uint max_length, bool low_byte_first)
{
*to= *from;
return to + 1;
}
virtual const uchar *unpack(uchar* to, const uchar *from,
uint param_data, bool low_byte_first)
{
*to= *from;
return from + 1;
}
};
@ -731,8 +785,47 @@ public:
uint32 pack_length() const { return 2; }
void sql_type(String &str) const;
uint32 max_display_length() { return 6; }
};
virtual uchar *pack(uchar* to, const uchar *from,
uint max_length, bool low_byte_first)
{
int16 val;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
val = sint2korr(from);
else
#endif
shortget(val, from);
#ifdef WORDS_BIGENDIAN
if (low_byte_first)
int2store(to, val);
else
#endif
shortstore(to, val);
return to + sizeof(val);
}
virtual const uchar *unpack(uchar* to, const uchar *from,
uint param_data, bool low_byte_first)
{
int16 val;
#ifdef WORDS_BIGENDIAN
if (low_byte_first)
val = sint2korr(from);
else
#endif
shortget(val, from);
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
int2store(to, val);
else
#endif
shortstore(to, val);
return from + sizeof(val);
}
};
class Field_medium :public Field_num {
public:
@ -761,6 +854,18 @@ public:
uint32 pack_length() const { return 3; }
void sql_type(String &str) const;
uint32 max_display_length() { return 8; }
virtual uchar *pack(uchar* to, const uchar *from,
uint max_length, bool low_byte_first)
{
return Field::pack(to, from, max_length, low_byte_first);
}
virtual const uchar *unpack(uchar* to, const uchar *from,
uint param_data, bool low_byte_first)
{
return Field::unpack(to, from, param_data, low_byte_first);
}
};
@ -796,6 +901,45 @@ public:
uint32 pack_length() const { return 4; }
void sql_type(String &str) const;
uint32 max_display_length() { return MY_INT32_NUM_DECIMAL_DIGITS; }
virtual uchar *pack(uchar* to, const uchar *from,
uint max_length, bool low_byte_first)
{
int32 val;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
val = sint4korr(from);
else
#endif
longget(val, from);
#ifdef WORDS_BIGENDIAN
if (low_byte_first)
int4store(to, val);
else
#endif
longstore(to, val);
return to + sizeof(val);
}
virtual const uchar *unpack(uchar* to, const uchar *from,
uint param_data, bool low_byte_first)
{
int32 val;
#ifdef WORDS_BIGENDIAN
if (low_byte_first)
val = sint4korr(from);
else
#endif
longget(val, from);
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
int4store(to, val);
else
#endif
longstore(to, val);
return from + sizeof(val);
}
};
@ -838,6 +982,45 @@ public:
void sql_type(String &str) const;
bool can_be_compared_as_longlong() const { return TRUE; }
uint32 max_display_length() { return 20; }
virtual uchar *pack(uchar* to, const uchar *from,
uint max_length, bool low_byte_first)
{
int64 val;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
val = sint8korr(from);
else
#endif
longlongget(val, from);
#ifdef WORDS_BIGENDIAN
if (low_byte_first)
int8store(to, val);
else
#endif
longlongstore(to, val);
return to + sizeof(val);
}
virtual const uchar *unpack(uchar* to, const uchar *from,
uint param_data, bool low_byte_first)
{
int64 val;
#ifdef WORDS_BIGENDIAN
if (low_byte_first)
val = sint8korr(from);
else
#endif
longlongget(val, from);
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
int8store(to, val);
else
#endif
longlongstore(to, val);
return from + sizeof(val);
}
};
#endif
@ -1218,9 +1401,10 @@ public:
int cmp(const uchar *,const uchar *);
void sort_string(uchar *buff,uint length);
void sql_type(String &str) const;
uchar *pack(uchar *to, const uchar *from, uint max_length=~(uint) 0);
virtual const uchar *unpack(uchar* to, const uchar *from, uint param_data);
const uchar *unpack(uchar* to, const uchar *from);
virtual uchar *pack(uchar *to, const uchar *from,
uint max_length, bool low_byte_first);
virtual const uchar *unpack(uchar* to, const uchar *from,
uint param_data, bool low_byte_first);
uint pack_length_from_metadata(uint field_metadata)
{ return (field_metadata & 0x00ff); }
uint row_pack_length() { return (field_length + 1); }
@ -1298,13 +1482,15 @@ public:
uint get_key_image(uchar *buff,uint length, imagetype type);
void set_key_image(const uchar *buff,uint length);
void sql_type(String &str) const;
uchar *pack(uchar *to, const uchar *from, uint max_length=~(uint) 0);
uchar *pack_key(uchar *to, const uchar *from, uint max_length);
virtual uchar *pack(uchar *to, const uchar *from,
uint max_length, bool low_byte_first);
uchar *pack_key(uchar *to, const uchar *from, uint max_length, bool low_byte_first);
uchar *pack_key_from_key_image(uchar* to, const uchar *from,
uint max_length);
virtual const uchar *unpack(uchar* to, const uchar *from, uint param_data);
const uchar *unpack(uchar* to, const uchar *from);
const uchar *unpack_key(uchar* to, const uchar *from, uint max_length);
uint max_length, bool low_byte_first);
virtual const uchar *unpack(uchar* to, const uchar *from,
uint param_data, bool low_byte_first);
const uchar *unpack_key(uchar* to, const uchar *from,
uint max_length, bool low_byte_first);
int pack_cmp(const uchar *a, const uchar *b, uint key_length,
my_bool insert_or_update);
int pack_cmp(const uchar *b, uint key_length,my_bool insert_or_update);
@ -1397,7 +1583,7 @@ public:
{ return (uint32) (packlength); }
uint row_pack_length() { return pack_length_no_ptr(); }
uint32 sort_length() const;
inline uint32 max_data_length() const
virtual uint32 max_data_length() const
{
return (uint32) (((ulonglong) 1 << (packlength*8)) -1);
}
@ -1425,13 +1611,13 @@ public:
@returns The length in the row plus the size of the data.
*/
uint32 get_packed_size(const uchar *ptr_arg, bool low_byte_first)
{return packlength + get_length(ptr_arg, low_byte_first);}
{return packlength + get_length(ptr_arg, packlength, low_byte_first);}
inline uint32 get_length(uint row_offset= 0)
{ return get_length(ptr+row_offset, table->s->db_low_byte_first); }
uint32 get_length(const uchar *ptr, bool low_byte_first);
{ return get_length(ptr+row_offset, this->packlength, table->s->db_low_byte_first); }
uint32 get_length(const uchar *ptr, uint packlength, bool low_byte_first);
uint32 get_length(const uchar *ptr_arg)
{ return get_length(ptr_arg, table->s->db_low_byte_first); }
{ return get_length(ptr_arg, this->packlength, table->s->db_low_byte_first); }
void put_length(uchar *pos, uint32 length);
inline void get_ptr(uchar **str)
{
@ -1472,13 +1658,16 @@ public:
memcpy_fixed(ptr+packlength,&tmp,sizeof(char*));
return 0;
}
uchar *pack(uchar *to, const uchar *from, uint max_length= ~(uint) 0);
uchar *pack_key(uchar *to, const uchar *from, uint max_length);
virtual uchar *pack(uchar *to, const uchar *from,
uint max_length, bool low_byte_first);
uchar *pack_key(uchar *to, const uchar *from,
uint max_length, bool low_byte_first);
uchar *pack_key_from_key_image(uchar* to, const uchar *from,
uint max_length);
virtual const uchar *unpack(uchar *to, const uchar *from, uint param_data);
const uchar *unpack(uchar *to, const uchar *from);
const uchar *unpack_key(uchar* to, const uchar *from, uint max_length);
uint max_length, bool low_byte_first);
virtual const uchar *unpack(uchar *to, const uchar *from,
uint param_data, bool low_byte_first);
const uchar *unpack_key(uchar* to, const uchar *from,
uint max_length, bool low_byte_first);
int pack_cmp(const uchar *a, const uchar *b, uint key_length,
my_bool insert_or_update);
int pack_cmp(const uchar *b, uint key_length,my_bool insert_or_update);
@ -1631,6 +1820,7 @@ public:
enum_field_types type() const { return MYSQL_TYPE_BIT; }
enum ha_base_keytype key_type() const { return HA_KEYTYPE_BIT; }
uint32 key_length() const { return (uint32) (field_length + 7) / 8; }
uint32 max_data_length() const { return (field_length + 7) / 8; }
uint32 max_display_length() { return field_length; }
uint size_of() const { return sizeof(*this); }
Item_result result_type () const { return INT_RESULT; }
@ -1672,9 +1862,10 @@ public:
{ return (bytes_in_rec + ((bit_len > 0) ? 1 : 0)); }
int compatible_field_size(uint field_metadata);
void sql_type(String &str) const;
uchar *pack(uchar *to, const uchar *from, uint max_length=~(uint) 0);
virtual const uchar *unpack(uchar *to, const uchar *from, uint param_data);
const uchar *unpack(uchar* to, const uchar *from);
virtual uchar *pack(uchar *to, const uchar *from,
uint max_length, bool low_byte_first);
virtual const uchar *unpack(uchar *to, const uchar *from,
uint param_data, bool low_byte_first);
virtual void set_default();
Field *new_key_field(MEM_ROOT *root, struct st_table *new_table,

View File

@ -3577,9 +3577,6 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
(!binlog_filter->db_ok(local_db)))
{
VOID(pthread_mutex_unlock(&LOCK_log));
DBUG_PRINT("info",("OPTION_BIN_LOG is %s, db_ok('%s') == %d",
(thd->options & OPTION_BIN_LOG) ? "set" : "clear",
local_db, binlog_filter->db_ok(local_db)));
DBUG_RETURN(0);
}
#endif /* HAVE_REPLICATION */

View File

@ -114,6 +114,9 @@ private:
flag_set m_flags;
};
#ifndef DBUG_OFF
uint debug_not_change_ts_if_art_event= 1; // bug#29309 simulation
#endif
/*
pretty_print_str()
@ -555,8 +558,32 @@ int Log_event::do_update_pos(Relay_log_info *rli)
Matz: I don't think we will need this check with this refactoring.
*/
if (rli)
rli->stmt_done(log_pos, when);
{
/*
bug#29309 simulation: resetting the flag to force
wrong behaviour of artificial event to update
rli->last_master_timestamp for only one time -
the first FLUSH LOGS in the test.
*/
DBUG_EXECUTE_IF("let_first_flush_log_change_timestamp",
if (debug_not_change_ts_if_art_event == 1
&& is_artificial_event())
{
debug_not_change_ts_if_art_event= 0;
});
#ifndef DBUG_OFF
rli->stmt_done(log_pos,
is_artificial_event() &&
debug_not_change_ts_if_art_event > 0 ? 0 : when);
#else
rli->stmt_done(log_pos, is_artificial_event()? 0 : when);
#endif
DBUG_EXECUTE_IF("let_first_flush_log_change_timestamp",
if (debug_not_change_ts_if_art_event == 0)
{
debug_not_change_ts_if_art_event= 2;
});
}
return 0; // Cannot fail currently
}

View File

@ -65,6 +65,8 @@ pack_row(TABLE *table, MY_BITMAP const* cols,
my_ptrdiff_t const rec_offset= record - table->record[0];
my_ptrdiff_t const def_offset= table->s->default_values - table->record[0];
DBUG_ENTER("pack_row");
/*
We write the null bits and the packed records using one pass
through all the fields. The null bytes are written little-endian,
@ -96,26 +98,14 @@ pack_row(TABLE *table, MY_BITMAP const* cols,
For big-endian machines, we have to make sure that the
length is stored in little-endian format, since this is the
format used for the binlog.
We do this by setting the db_low_byte_first, which is used
inside some store_length() to decide what order to write the
bytes in.
In reality, db_log_byte_first is only set for legacy table
type Isam, but in the event of a bug, we need to guarantee
the endianess when writing to the binlog.
This is currently broken for NDB due to BUG#29549, so we
will fix it when NDB has fixed their way of handling BLOBs.
*/
#if 0
bool save= table->s->db_low_byte_first;
table->s->db_low_byte_first= TRUE;
#endif
pack_ptr= field->pack(pack_ptr, field->ptr + offset);
#if 0
table->s->db_low_byte_first= save;
#endif
const uchar *old_pack_ptr= pack_ptr;
pack_ptr= field->pack(pack_ptr, field->ptr + offset,
field->max_data_length(), TRUE);
DBUG_PRINT("debug", ("field: %s; pack_ptr: 0x%lx;"
" pack_ptr':0x%lx; bytes: %d",
field->field_name, (ulong) old_pack_ptr,
(ulong) pack_ptr, pack_ptr - old_pack_ptr));
}
null_mask <<= 1;
@ -143,8 +133,8 @@ pack_row(TABLE *table, MY_BITMAP const* cols,
packed data. If it doesn't, something is very wrong.
*/
DBUG_ASSERT(null_ptr == row_data + null_byte_count);
return static_cast<size_t>(pack_ptr - row_data);
DBUG_DUMP("row_data", row_data, pack_ptr - row_data);
DBUG_RETURN(static_cast<size_t>(pack_ptr - row_data));
}
#endif
@ -242,18 +232,14 @@ unpack_row(Relay_log_info const *rli,
Use the master's size information if available else call
normal unpack operation.
*/
#if 0
bool save= table->s->db_low_byte_first;
table->s->db_low_byte_first= TRUE;
#endif
uint16 const metadata= tabledef->field_metadata(i);
if (tabledef && metadata)
pack_ptr= f->unpack(f->ptr, pack_ptr, metadata);
else
pack_ptr= f->unpack(f->ptr, pack_ptr);
#if 0
table->s->db_low_byte_first= save;
#endif
uchar const *const old_pack_ptr= pack_ptr;
pack_ptr= f->unpack(f->ptr, pack_ptr, metadata, TRUE);
DBUG_PRINT("debug", ("field: %s; metadata: 0x%x;"
" pack_ptr: 0x%lx; pack_ptr': 0x%lx; bytes: %d",
f->field_name, metadata,
(ulong) old_pack_ptr, (ulong) pack_ptr,
pack_ptr - old_pack_ptr));
}
null_mask <<= 1;
@ -289,6 +275,8 @@ unpack_row(Relay_log_info const *rli,
*/
DBUG_ASSERT(null_ptr == row_data + master_null_byte_count);
DBUG_DUMP("row_data", row_data, pack_ptr - row_data);
*row_end = pack_ptr;
if (master_reclength)
{

View File

@ -1082,6 +1082,9 @@ bool Relay_log_info::cached_charset_compare(char *charset) const
void Relay_log_info::stmt_done(my_off_t event_master_log_pos,
time_t event_creation_time)
{
#ifndef DBUG_OFF
extern uint debug_not_change_ts_if_art_event;
#endif
clear_flag(IN_STMT);
/*
@ -1121,7 +1124,12 @@ void Relay_log_info::stmt_done(my_off_t event_master_log_pos,
is that value may take some time to display in
Seconds_Behind_Master - not critical).
*/
last_master_timestamp= event_creation_time;
#ifndef DBUG_OFF
if (!(event_creation_time == 0 && debug_not_change_ts_if_art_event > 0))
#else
if (event_creation_time != 0)
#endif
last_master_timestamp= event_creation_time;
}
}

View File

@ -3581,7 +3581,16 @@ static Log_event* next_event(Relay_log_info* rli)
a new event and is queuing it; the false "0" will exist until SQL
finishes executing the new event; it will be look abnormal only if
the events have old timestamps (then you get "many", 0, "many").
Transient phases like this can't really be fixed.
Transient phases like this can be fixed with implemeting
Heartbeat event which provides the slave the status of the
master at time the master does not have any new update to send.
Seconds_Behind_Master would be zero only when master has no
more updates in binlog for slave. The heartbeat can be sent
in a (small) fraction of slave_net_timeout. Until it's done
rli->last_master_timestamp is temporarely (for time of
waiting for the following event) reset whenever EOF is
reached.
*/
time_t save_timestamp= rli->last_master_timestamp;
rli->last_master_timestamp= 0;

View File

@ -2436,6 +2436,11 @@ class multi_delete :public select_result_interceptor
/* True if at least one table we delete from is not transactional */
bool normal_tables;
bool delete_while_scanning;
/*
error handling (rollback and binlogging) can happen in send_eof()
so that afterward send_error() needs to find out that.
*/
bool error_handled;
public:
multi_delete(TABLE_LIST *dt, uint num_of_tables);
@ -2471,6 +2476,11 @@ class multi_update :public select_result_interceptor
/* True if the update operation has made a change in a transactional table */
bool transactional_tables;
bool ignore;
/*
error handling (rollback and binlogging) can happen in send_eof()
so that afterward send_error() needs to find out that.
*/
bool error_handled;
public:
multi_update(TABLE_LIST *ut, TABLE_LIST *leaves_list,

View File

@ -541,7 +541,7 @@ bool mysql_multi_delete_prepare(THD *thd)
multi_delete::multi_delete(TABLE_LIST *dt, uint num_of_tables_arg)
: delete_tables(dt), deleted(0), found(0),
num_of_tables(num_of_tables_arg), error(0),
do_delete(0), transactional_tables(0), normal_tables(0)
do_delete(0), transactional_tables(0), normal_tables(0), error_handled(0)
{
tempfiles= (Unique **) sql_calloc(sizeof(Unique *) * num_of_tables);
}
@ -720,12 +720,14 @@ void multi_delete::send_error(uint errcode,const char *err)
/* First send error what ever it is ... */
my_message(errcode, err, MYF(0));
/* If nothing deleted return */
if (!deleted)
/* the error was handled or nothing deleted and no side effects return */
if (error_handled ||
!thd->transaction.stmt.modified_non_trans_table && !deleted)
DBUG_VOID_RETURN;
/* Something already deleted so we have to invalidate cache */
query_cache_invalidate3(thd, delete_tables, 1);
if (deleted)
query_cache_invalidate3(thd, delete_tables, 1);
/*
If rows from the first table only has been deleted and it is
@ -745,12 +747,29 @@ void multi_delete::send_error(uint errcode,const char *err)
*/
error= 1;
send_eof();
DBUG_ASSERT(error_handled);
DBUG_VOID_RETURN;
}
if (thd->transaction.stmt.modified_non_trans_table)
{
/*
there is only side effects; to binlog with the error
*/
if (mysql_bin_log.is_open())
{
thd->binlog_query(THD::ROW_QUERY_TYPE,
thd->query, thd->query_length,
transactional_tables, FALSE);
}
thd->transaction.all.modified_non_trans_table= true;
}
DBUG_ASSERT(!normal_tables || !deleted || thd->transaction.stmt.modified_non_trans_table);
DBUG_VOID_RETURN;
}
/*
Do delete from other tables.
Returns values:
@ -880,6 +899,8 @@ bool multi_delete::send_eof()
if (thd->transaction.stmt.modified_non_trans_table)
thd->transaction.all.modified_non_trans_table= TRUE;
}
if (local_error != 0)
error_handled= TRUE; // to force early leave from ::send_error()
/* Commit or rollback the current SQL statement */
if (transactional_tables)

View File

@ -2919,6 +2919,13 @@ end_with_restore_list:
SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK |
OPTION_SETUP_TABLES_DONE,
del_result, unit, select_lex);
res|= thd->net.report_error;
if (unlikely(res))
{
/* If we had a another error reported earlier then this will be ignored */
del_result->send_error(ER_UNKNOWN_ERROR, "Execution of the query failed");
del_result->abort();
}
delete del_result;
}
else

View File

@ -29,6 +29,8 @@
#include "event_data_objects.h"
#include <my_dir.h>
#define STR_OR_NIL(S) ((S) ? (S) : "<nil>")
#ifdef WITH_PARTITION_STORAGE_ENGINE
#include "ha_partition.h"
#endif
@ -3115,8 +3117,8 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
schema_table_idx= get_schema_table_idx(schema_table);
get_lookup_field_values(thd, cond, tables, &lookup_field_vals);
DBUG_PRINT("INDEX VALUES",("db_name='%s', table_name='%s'",
lookup_field_vals.db_value.str,
lookup_field_vals.table_value.str));
STR_OR_NIL(lookup_field_vals.db_value.str),
STR_OR_NIL(lookup_field_vals.table_value.str)));
if (!lookup_field_vals.wild_db_value && !lookup_field_vals.wild_table_value)
{

View File

@ -1215,8 +1215,8 @@ multi_update::multi_update(TABLE_LIST *table_list,
:all_tables(table_list), leaves(leaves_list), update_tables(0),
tmp_tables(0), updated(0), found(0), fields(field_list),
values(value_list), table_count(0), copy_field(0),
handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(0),
transactional_tables(1), ignore(ignore_arg)
handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(1),
transactional_tables(1), ignore(ignore_arg), error_handled(0)
{}
@ -1418,7 +1418,6 @@ multi_update::initialize_tables(JOIN *join)
if ((thd->options & OPTION_SAFE_UPDATES) && error_if_full_join(join))
DBUG_RETURN(1);
main_table=join->join_tab->table;
trans_safe= transactional_tables= main_table->file->has_transactions();
table_to_update= 0;
/* Any update has at least one pair (field, value) */
@ -1713,12 +1712,14 @@ void multi_update::send_error(uint errcode,const char *err)
/* First send error what ever it is ... */
my_error(errcode, MYF(0), err);
/* If nothing updated return */
if (updated == 0) /* the counter might be reset in send_eof */
return; /* and then the query has been binlogged */
/* the error was handled or nothing deleted and no side effects return */
if (error_handled ||
!thd->transaction.stmt.modified_non_trans_table && !updated)
return;
/* Something already updated so we have to invalidate cache */
query_cache_invalidate3(thd, update_tables, 1);
if (updated)
query_cache_invalidate3(thd, update_tables, 1);
/*
If all tables that has been updated are trans safe then just do rollback.
If not attempt to do remaining updates.
@ -1754,8 +1755,7 @@ void multi_update::send_error(uint errcode,const char *err)
thd->query, thd->query_length,
transactional_tables, FALSE);
}
if (!trans_safe)
thd->transaction.all.modified_non_trans_table= TRUE;
thd->transaction.all.modified_non_trans_table= TRUE;
}
DBUG_ASSERT(trans_safe || !updated || thd->transaction.stmt.modified_non_trans_table);
@ -1978,8 +1978,6 @@ bool multi_update::send_eof()
{
if (local_error == 0)
thd->clear_error();
else
updated= 0; /* if there's an error binlog it here not in ::send_error */
if (thd->binlog_query(THD::ROW_QUERY_TYPE,
thd->query, thd->query_length,
transactional_tables, FALSE) &&
@ -1991,6 +1989,8 @@ bool multi_update::send_eof()
if (thd->transaction.stmt.modified_non_trans_table)
thd->transaction.all.modified_non_trans_table= TRUE;
}
if (local_error != 0)
error_handled= TRUE; // to force early leave from ::send_error()
if (transactional_tables)
{