Manual merge of mysql-5.1-bugteam to mysql-trunk-merge.

Conflicts:

   conflict      Makefile.am
   conflict      mysql-test/suite/rpl/r/rpl_stm_mixing_engines.result
   conflict      mysql-test/suite/rpl/t/rpl_tmp_table_and_DDL.test
   conflict      sql/opt_sum.cc
   conflict      sql/set_var.cc
   conflict      sql/sql_base.cc
   conflict      sql/sql_priv.h
   conflict      sql/sql_show.cc
This commit is contained in:
Alexey Kopytov 2010-05-24 00:41:18 +04:00
commit b69a31fad5
38 changed files with 784 additions and 269 deletions

View File

@ -154,8 +154,8 @@ test-bt:
-if [ -e bin/ndbd -o -e storage/ndb/src/kernel/ndbd ] ; then \
cd mysql-test ; \
MTR_BUILD_THREAD=auto \
@PERL@ ./mysql-test-run.pl --comment=ndb+rpl_ndb+ps --force --timer \
--ps-protocol --mysqld=--binlog-format=row --suite=ndb,rpl_ndb $(EXP) ; \
@PERL@ ./mysql-test-run.pl --comment=ndb+ps --force --timer \
--ps-protocol --mysqld=--binlog-format=row --suite=ndb $(EXP) ; \
MTR_BUILD_THREAD=auto \
@PERL@ ./mysql-test-run.pl --comment=ndb --force --timer \
--with-ndbcluster-only $(EXP) ; \

View File

@ -1715,10 +1715,7 @@ void _db_end_()
while ((discard= cs->stack))
{
if (discard == &init_settings)
{
FreeState (cs, discard, 0);
break;
}
cs->stack= discard->next;
FreeState(cs, discard, 1);
}

View File

@ -214,7 +214,7 @@ CREATE TABLE t5 (a int, PRIMARY KEY (a)) ENGINE=InnoDB;
# execute
--error ER_DUP_ENTRY
UPDATE t3,t4 SET t3.a=t4.a + bug27417(1);
UPDATE t3,t4 SET t3.a = t4.a + bug27417(1) where t3.a = 1;
# check
select count(*) from t1 /* must be 1 */;

View File

@ -0,0 +1,68 @@
#
# BUG#52868: Wrong handling of NULL value during update, replication out of sync
#
-- echo ## case #1 - last_null_bit_pos==0 in record_compare without X bit
-- source include/master-slave-reset.inc
-- connection master
-- eval CREATE TABLE t1 (c1 bigint(20) DEFAULT 0, c2 bigint(20) DEFAULT 0, c3 bigint(20) DEFAULT 0, c4 varchar(1) DEFAULT '', c5 bigint(20) DEFAULT 0, c6 bigint(20) DEFAULT 0, c7 bigint(20) DEFAULT 0, c8 bigint(20) DEFAULT 0) ENGINE=$engine DEFAULT CHARSET=latin1
INSERT INTO t1 ( c5, c6 ) VALUES ( 1 , 35 );
INSERT INTO t1 ( c5, c6 ) VALUES ( NULL, 35 );
-- disable_warnings
UPDATE t1 SET c5 = 'a';
-- enable_warnings
-- sync_slave_with_master
-- let $diff_table_1= master:test.t1
-- let $diff_table_2= slave:test.t1
-- source include/diff_tables.inc
--connection master
DROP TABLE t1;
-- sync_slave_with_master
-- echo ## case #1.1 - last_null_bit_pos==0 in record_compare with X bit
-- echo ## (1 column less and no varchar)
-- source include/master-slave-reset.inc
-- connection master
-- eval CREATE TABLE t1 (c1 bigint(20) DEFAULT 0, c2 bigint(20) DEFAULT 0, c3 bigint(20) DEFAULT 0, c4 bigint(20) DEFAULT 0, c5 bigint(20) DEFAULT 0, c6 bigint(20) DEFAULT 0, c7 bigint(20) DEFAULT 0) ENGINE=$engine DEFAULT CHARSET=latin1
INSERT INTO t1 ( c5, c6 ) VALUES ( 1 , 35 );
INSERT INTO t1 ( c5, c6 ) VALUES ( NULL, 35 );
-- disable_warnings
UPDATE t1 SET c5 = 'a';
-- enable_warnings
-- sync_slave_with_master
-- let $diff_table_1= master:test.t1
-- let $diff_table_2= slave:test.t1
-- source include/diff_tables.inc
--connection master
DROP TABLE t1;
-- sync_slave_with_master
-- echo ## case #2 - X bit is wrongly set.
-- source include/master-slave-reset.inc
-- connection master
-- eval CREATE TABLE t1 (c1 int, c2 varchar(1) default '') ENGINE=$engine DEFAULT CHARSET= latin1
INSERT INTO t1(c1) VALUES (10);
INSERT INTO t1(c1) VALUES (NULL);
UPDATE t1 SET c1= 0;
-- sync_slave_with_master
-- let $diff_table_1= master:test.t1
-- let $diff_table_2= slave:test.t1
-- source include/diff_tables.inc
-- connection master
DROP TABLE t1;
-- sync_slave_with_master

View File

@ -490,4 +490,13 @@ END |
DELETE IGNORE FROM t1;
ERROR HY000: Can't update table 't1' in stored function/trigger because it is already used by statement which invoked this stored function/trigger.
DROP TABLE t1;
#
# Bug #53450: Crash/assertion
# "virtual int ha_myisam::index_first(uchar*)") at assert.c:81
#
CREATE TABLE t1 (a INT, b INT, c INT,
INDEX(a), INDEX(b), INDEX(c));
INSERT INTO t1 VALUES (1,2,3), (4,5,6), (7,8,9);
DELETE FROM t1 WHERE a = 10 OR b = 20 ORDER BY c LIMIT 1;
DROP TABLE t1;
End of 5.1 tests

View File

@ -1791,4 +1791,24 @@ aa b COUNT( b)
1 10 1
DROP TABLE t1, t2;
#
# Bug#52051: Aggregate functions incorrectly returns NULL from outer
# join query
#
CREATE TABLE t1 (a INT PRIMARY KEY);
CREATE TABLE t2 (a INT PRIMARY KEY);
INSERT INTO t2 VALUES (1), (2);
EXPLAIN SELECT MIN(t2.a) FROM t2 LEFT JOIN t1 ON t2.a = t1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
SELECT MIN(t2.a) FROM t2 LEFT JOIN t1 ON t2.a = t1.a;
MIN(t2.a)
1
EXPLAIN SELECT MAX(t2.a) FROM t2 LEFT JOIN t1 ON t2.a = t1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
SELECT MAX(t2.a) FROM t2 LEFT JOIN t1 ON t2.a = t1.a;
MAX(t2.a)
2
DROP TABLE t1, t2;
#
# End of 5.1 tests

View File

@ -308,6 +308,37 @@ SET @@global.general_log = @old_general_log;
SET @@global.general_log_file = @old_general_log_file;
SET @@global.slow_query_log = @old_slow_query_log;
SET @@global.slow_query_log_file = @old_slow_query_log_file;
#
# Bug #49756 Rows_examined is always 0 in the slow query log
# for update statements
#
SET @old_log_output = @@global.log_output;
SET GLOBAL log_output = "TABLE";
SET GLOBAL slow_query_log = ON;
SET GLOBAL long_query_time = 0.001;
TRUNCATE TABLE mysql.slow_log;
CREATE TABLE t1 (a INT);
CREATE TABLE t2 (b INT, PRIMARY KEY (b));
INSERT INTO t2 VALUES (3),(4);
INSERT INTO t1 VALUES (1+sleep(.01)),(2);
INSERT INTO t1 SELECT b+sleep(.01) from t2;
UPDATE t1 SET a=a+sleep(.01) WHERE a>2;
UPDATE t1 SET a=a+sleep(.01) ORDER BY a DESC;
UPDATE t2 set b=b+sleep(.01) limit 1;
UPDATE t1 SET a=a+sleep(.01) WHERE a in (SELECT b from t2);
DELETE FROM t1 WHERE a=a+sleep(.01) ORDER BY a LIMIT 2;
SELECT rows_examined,sql_text FROM mysql.slow_log;
rows_examined sql_text
0 INSERT INTO t1 VALUES (1+sleep(.01)),(2)
2 INSERT INTO t1 SELECT b+sleep(.01) from t2
4 UPDATE t1 SET a=a+sleep(.01) WHERE a>2
8 UPDATE t1 SET a=a+sleep(.01) ORDER BY a DESC
2 UPDATE t2 set b=b+sleep(.01) limit 1
4 UPDATE t1 SET a=a+sleep(.01) WHERE a in (SELECT b from t2)
6 DELETE FROM t1 WHERE a=a+sleep(.01) ORDER BY a LIMIT 2
DROP TABLE t1,t2;
TRUNCATE TABLE mysql.slow_log;
# end of bug#49756
End of 5.1 tests
# --
@ -334,6 +365,8 @@ SELECT @@general_log_file = @my_glf;
1
SET GLOBAL general_log_file = @old_general_log_file;
# Close connection con1
SET GLOBAL long_query_time = DEFAULT;
SET GLOBAL log_output = @old_log_output;
SET global general_log = @old_general_log;
SET global general_log_file = @old_general_log_file;
SET global slow_query_log = @old_slow_query_log;

View File

@ -7,6 +7,6 @@ ERROR HY000: Incorrect usage of ALTER DATABASE UPGRADE DATA DIRECTORY NAME and n
ALTER DATABASE `#mysql51#not-yet` UPGRADE DATA DIRECTORY NAME;
ERROR HY000: Incorrect usage of ALTER DATABASE UPGRADE DATA DIRECTORY NAME and name
ALTER DATABASE `#mysql50#` UPGRADE DATA DIRECTORY NAME;
ERROR HY000: Incorrect usage of ALTER DATABASE UPGRADE DATA DIRECTORY NAME and name
ERROR 42000: Incorrect database name '#mysql50#'
ALTER DATABASE `#mysql50#upgrade-me` UPGRADE DATA DIRECTORY NAME;
ERROR 42000: Unknown database '#mysql50#upgrade-me'

View File

@ -112,3 +112,31 @@ select * from `a-b-c`.v1;
f1
drop database `a-b-c`;
use test;
# End of 5.0 tests
#
# Bug #53804: serious flaws in the alter database .. upgrade data
# directory name command
#
ALTER DATABASE `#mysql50#:` UPGRADE DATA DIRECTORY NAME;
ERROR 42000: Unknown database '#mysql50#:'
ALTER DATABASE `#mysql50#.` UPGRADE DATA DIRECTORY NAME;
ERROR 42000: Incorrect database name '#mysql50#.'
ALTER DATABASE `#mysql50#../` UPGRADE DATA DIRECTORY NAME;
ERROR 42000: Incorrect database name '#mysql50#../'
ALTER DATABASE `#mysql50#../..` UPGRADE DATA DIRECTORY NAME;
ERROR 42000: Incorrect database name '#mysql50#../..'
ALTER DATABASE `#mysql50#../../` UPGRADE DATA DIRECTORY NAME;
ERROR 42000: Incorrect database name '#mysql50#../../'
ALTER DATABASE `#mysql50#./blablabla` UPGRADE DATA DIRECTORY NAME;
ERROR 42000: Incorrect database name '#mysql50#./blablabla'
ALTER DATABASE `#mysql50#../blablabla` UPGRADE DATA DIRECTORY NAME;
ERROR 42000: Incorrect database name '#mysql50#../blablabla'
ALTER DATABASE `#mysql50#/` UPGRADE DATA DIRECTORY NAME;
ERROR 42000: Incorrect database name '#mysql50#/'
ALTER DATABASE `#mysql50#/.` UPGRADE DATA DIRECTORY NAME;
ERROR 42000: Incorrect database name '#mysql50#/.'
USE `#mysql50#.`;
ERROR 42000: Incorrect database name '#mysql50#.'
USE `#mysql50#../blablabla`;
ERROR 42000: Incorrect database name '#mysql50#../blablabla'
# End of 5.1 tests

View File

@ -1,3 +1,4 @@
SET @old_debug = @@GLOBAL.debug;
set debug= 'T';
select @@debug;
@@debug
@ -29,4 +30,5 @@ SET GLOBAL debug='';
SELECT @@global.debug;
@@global.debug
SET GLOBAL debug=@old_debug;
End of 5.1 tests

View File

@ -848,11 +848,11 @@ delete from t4;
insert into t3 values (1,1),(2,2);
insert into t4 values (1,1),(2,2);
reset master;
UPDATE t3,t4 SET t3.a=t4.a + bug27417(1);
UPDATE t3,t4 SET t3.a = t4.a + bug27417(1) where t3.a = 1;
ERROR 23000: Duplicate entry '2' for key 'PRIMARY'
select count(*) from t1 /* must be 1 */;
count(*)
2
1
drop table t4;
delete from t1;
delete from t2;

View File

@ -886,7 +886,7 @@ delete from t4;
insert into t3 values (1,1),(2,2);
insert into t4 values (1,1),(2,2);
reset master;
UPDATE t3,t4 SET t3.a=t4.a + bug27417(1);
UPDATE t3,t4 SET t3.a = t4.a + bug27417(1) where t3.a = 1;
ERROR 23000: Duplicate entry '2' for key 'PRIMARY'
select count(*) from t1 /* must be 1 */;
count(*)

View File

@ -0,0 +1,46 @@
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
## case #1 - last_null_bit_pos==0 in record_compare without X bit
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
CREATE TABLE t1 (c1 bigint(20) DEFAULT 0, c2 bigint(20) DEFAULT 0, c3 bigint(20) DEFAULT 0, c4 varchar(1) DEFAULT '', c5 bigint(20) DEFAULT 0, c6 bigint(20) DEFAULT 0, c7 bigint(20) DEFAULT 0, c8 bigint(20) DEFAULT 0) ENGINE=InnoDB DEFAULT CHARSET=latin1;
INSERT INTO t1 ( c5, c6 ) VALUES ( 1 , 35 );
INSERT INTO t1 ( c5, c6 ) VALUES ( NULL, 35 );
UPDATE t1 SET c5 = 'a';
Comparing tables master:test.t1 and slave:test.t1
DROP TABLE t1;
## case #1.1 - last_null_bit_pos==0 in record_compare with X bit
## (1 column less and no varchar)
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
CREATE TABLE t1 (c1 bigint(20) DEFAULT 0, c2 bigint(20) DEFAULT 0, c3 bigint(20) DEFAULT 0, c4 bigint(20) DEFAULT 0, c5 bigint(20) DEFAULT 0, c6 bigint(20) DEFAULT 0, c7 bigint(20) DEFAULT 0) ENGINE=InnoDB DEFAULT CHARSET=latin1;
INSERT INTO t1 ( c5, c6 ) VALUES ( 1 , 35 );
INSERT INTO t1 ( c5, c6 ) VALUES ( NULL, 35 );
UPDATE t1 SET c5 = 'a';
Comparing tables master:test.t1 and slave:test.t1
DROP TABLE t1;
## case #2 - X bit is wrongly set.
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
CREATE TABLE t1 (c1 int, c2 varchar(1) default '') ENGINE=InnoDB DEFAULT CHARSET= latin1;
INSERT INTO t1(c1) VALUES (10);
INSERT INTO t1(c1) VALUES (NULL);
UPDATE t1 SET c1= 0;
Comparing tables master:test.t1 and slave:test.t1
DROP TABLE t1;

View File

@ -0,0 +1,60 @@
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
## case #1 - last_null_bit_pos==0 in record_compare without X bit
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
CREATE TABLE t1 (c1 bigint(20) DEFAULT 0, c2 bigint(20) DEFAULT 0, c3 bigint(20) DEFAULT 0, c4 varchar(1) DEFAULT '', c5 bigint(20) DEFAULT 0, c6 bigint(20) DEFAULT 0, c7 bigint(20) DEFAULT 0, c8 bigint(20) DEFAULT 0) ENGINE=MyISAM DEFAULT CHARSET=latin1;
INSERT INTO t1 ( c5, c6 ) VALUES ( 1 , 35 );
INSERT INTO t1 ( c5, c6 ) VALUES ( NULL, 35 );
UPDATE t1 SET c5 = 'a';
Comparing tables master:test.t1 and slave:test.t1
DROP TABLE t1;
## case #1.1 - last_null_bit_pos==0 in record_compare with X bit
## (1 column less and no varchar)
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
CREATE TABLE t1 (c1 bigint(20) DEFAULT 0, c2 bigint(20) DEFAULT 0, c3 bigint(20) DEFAULT 0, c4 bigint(20) DEFAULT 0, c5 bigint(20) DEFAULT 0, c6 bigint(20) DEFAULT 0, c7 bigint(20) DEFAULT 0) ENGINE=MyISAM DEFAULT CHARSET=latin1;
INSERT INTO t1 ( c5, c6 ) VALUES ( 1 , 35 );
INSERT INTO t1 ( c5, c6 ) VALUES ( NULL, 35 );
UPDATE t1 SET c5 = 'a';
Comparing tables master:test.t1 and slave:test.t1
DROP TABLE t1;
## case #2 - X bit is wrongly set.
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
CREATE TABLE t1 (c1 int, c2 varchar(1) default '') ENGINE=MyISAM DEFAULT CHARSET= latin1;
INSERT INTO t1(c1) VALUES (10);
INSERT INTO t1(c1) VALUES (NULL);
UPDATE t1 SET c1= 0;
Comparing tables master:test.t1 and slave:test.t1
DROP TABLE t1;
## coverage purposes - Field_bits
## 1 X bit + 2 Null bits + 5 bits => last_null_bit_pos==0
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
CREATE TABLE t1 (c1 bigint(20) DEFAULT 0, c2 bit(5)) ENGINE=MyISAM DEFAULT CHARSET=latin1;
INSERT INTO t1(c1,c2) VALUES (10, b'1');
INSERT INTO t1(c1,c2) VALUES (NULL, b'1');
UPDATE t1 SET c1= 0;
Comparing tables master:test.t1 and slave:test.t1
DROP TABLE t1;

View File

@ -11314,3 +11314,35 @@ master-bin.000001 # Query # # use `test`; DROP TEMPORARY TABLE IF EXISTS tt_xx_1
###################################################################################
# CLEAN
###################################################################################
DROP TABLE tt_1;
DROP TABLE tt_2;
DROP TABLE tt_3;
DROP TABLE tt_4;
DROP TABLE nt_1;
DROP TABLE nt_2;
DROP TABLE nt_3;
DROP TABLE nt_4;
DROP PROCEDURE pc_i_tt_3;
DROP FUNCTION f1;
DROP FUNCTION f2;
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
CREATE TABLE `t1` (
`c1` int(10) unsigned NOT NULL AUTO_INCREMENT,
`c2` tinyint(1) unsigned DEFAULT NULL,
`c3` varchar(300) DEFAULT NULL,
`c4` int(10) unsigned NOT NULL,
`c5` int(10) unsigned DEFAULT NULL,
PRIMARY KEY (`c1`)) ENGINE=MyISAM DEFAULT CHARSET=latin1;
ALTER TABLE `t1` Engine=InnoDB;
SET AUTOCOMMIT=0;
INSERT INTO t1 (c1,c2,c3,c4,c5) VALUES (1, 1, 'X', 1, NULL);
COMMIT;
ROLLBACK;
SET AUTOCOMMIT=1;
Comparing tables master:test.t1 and slave:test.t1
DROP TABLE `t1`;

View File

@ -0,0 +1,10 @@
-- source include/have_binlog_format_row.inc
-- source include/master-slave.inc
-- source include/have_innodb.inc
#
# BUG#52868 Wrong handling of NULL value during update, replication out of sync
#
-- let $engine= InnoDB
-- source extra/rpl_tests/rpl_record_compare.test

View File

@ -0,0 +1,31 @@
-- source include/have_binlog_format_row.inc
-- source include/master-slave.inc
#
# BUG#52868 Wrong handling of NULL value during update, replication out of sync
#
-- let $engine= MyISAM
-- source extra/rpl_tests/rpl_record_compare.test
-- echo ## coverage purposes - Field_bits
-- echo ## 1 X bit + 2 Null bits + 5 bits => last_null_bit_pos==0
## Added here because AFAIK it's only MyISAM and NDB that use Field_bits
-- source include/master-slave-reset.inc
-- connection master
-- eval CREATE TABLE t1 (c1 bigint(20) DEFAULT 0, c2 bit(5)) ENGINE=$engine DEFAULT CHARSET=latin1
INSERT INTO t1(c1,c2) VALUES (10, b'1');
INSERT INTO t1(c1,c2) VALUES (NULL, b'1');
UPDATE t1 SET c1= 0;
-- sync_slave_with_master
-- let $diff_table_1= master:test.t1
-- let $diff_table_2= slave:test.t1
-- source include/diff_tables.inc
-- connection master
DROP TABLE t1;
-- sync_slave_with_master

View File

@ -8,3 +8,41 @@
let $engine_type=Innodb;
--source extra/rpl_tests/rpl_mixing_engines.test
#
# BUG#49522: Replication problem with mixed MyISAM/InnoDB
#
-- source include/master-slave-reset.inc
-- connection master
CREATE TABLE `t1` (
`c1` int(10) unsigned NOT NULL AUTO_INCREMENT,
`c2` tinyint(1) unsigned DEFAULT NULL,
`c3` varchar(300) DEFAULT NULL,
`c4` int(10) unsigned NOT NULL,
`c5` int(10) unsigned DEFAULT NULL,
PRIMARY KEY (`c1`)) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-- sync_slave_with_master
ALTER TABLE `t1` Engine=InnoDB;
-- connection master
SET AUTOCOMMIT=0;
INSERT INTO t1 (c1,c2,c3,c4,c5) VALUES (1, 1, 'X', 1, NULL);
COMMIT;
ROLLBACK;
SET AUTOCOMMIT=1;
-- sync_slave_with_master
-- let $diff_table_1=master:test.t1
-- let $diff_table_2=slave:test.t1
-- source include/diff_tables.inc
-- connection master
DROP TABLE `t1`;
-- sync_slave_with_master
-- source include/master-slave-end.inc

View File

@ -19,6 +19,8 @@ mysqld=
ndbcluster
# Turn on bin logging
log-bin= master-bin
# Cluster only supports row format
binlog-format= row
[mysqld.1.1]
@ -32,6 +34,8 @@ log-bin= master-bin
log-bin= slave-bin
relay-log= slave-relay-bin
# Cluster only supports row format
binlog-format= row
init-rpl-role= slave
log-slave-updates

View File

@ -4,6 +4,7 @@ reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
SET binlog_format = STATEMENT;
*** Test 1 ***

View File

@ -27,9 +27,11 @@
--disable_query_log
--source include/have_ndb.inc
--source include/have_innodb.inc
--source include/have_binlog_format_statement.inc
--source include/ndb_master-slave.inc
--enable_query_log
# statement format is supported because master uses innodb
SET binlog_format = STATEMENT;
let $off_set = 6;
let $rpl_format = 'SBR';
disable_query_log;

View File

@ -527,5 +527,17 @@ DELETE IGNORE FROM t1;
DROP TABLE t1;
--echo #
--echo # Bug #53450: Crash/assertion
--echo # "virtual int ha_myisam::index_first(uchar*)") at assert.c:81
--echo #
CREATE TABLE t1 (a INT, b INT, c INT,
INDEX(a), INDEX(b), INDEX(c));
INSERT INTO t1 VALUES (1,2,3), (4,5,6), (7,8,9);
DELETE FROM t1 WHERE a = 10 OR b = 20 ORDER BY c LIMIT 1;
DROP TABLE t1;
--echo End of 5.1 tests

View File

@ -1206,6 +1206,21 @@ SELECT (SELECT t2.a FROM t2 WHERE t2.a = t1.a) aa, b, COUNT( b)
DROP TABLE t1, t2;
--echo #
--echo # Bug#52051: Aggregate functions incorrectly returns NULL from outer
--echo # join query
--echo #
CREATE TABLE t1 (a INT PRIMARY KEY);
CREATE TABLE t2 (a INT PRIMARY KEY);
INSERT INTO t2 VALUES (1), (2);
EXPLAIN SELECT MIN(t2.a) FROM t2 LEFT JOIN t1 ON t2.a = t1.a;
SELECT MIN(t2.a) FROM t2 LEFT JOIN t1 ON t2.a = t1.a;
EXPLAIN SELECT MAX(t2.a) FROM t2 LEFT JOIN t1 ON t2.a = t1.a;
SELECT MAX(t2.a) FROM t2 LEFT JOIN t1 ON t2.a = t1.a;
DROP TABLE t1, t2;
--echo #
--echo # End of 5.1 tests

View File

@ -313,6 +313,42 @@ SET @@global.general_log_file = @old_general_log_file;
SET @@global.slow_query_log = @old_slow_query_log;
SET @@global.slow_query_log_file = @old_slow_query_log_file;
###########################################################################
--echo #
--echo # Bug #49756 Rows_examined is always 0 in the slow query log
--echo # for update statements
--echo #
SET @old_log_output = @@global.log_output;
SET GLOBAL log_output = "TABLE";
SET GLOBAL slow_query_log = ON;
SET GLOBAL long_query_time = 0.001;
# clear slow_log of any residual slow queries
TRUNCATE TABLE mysql.slow_log;
CREATE TABLE t1 (a INT);
CREATE TABLE t2 (b INT, PRIMARY KEY (b));
INSERT INTO t2 VALUES (3),(4);
connect (con2,localhost,root,,);
INSERT INTO t1 VALUES (1+sleep(.01)),(2);
INSERT INTO t1 SELECT b+sleep(.01) from t2;
UPDATE t1 SET a=a+sleep(.01) WHERE a>2;
UPDATE t1 SET a=a+sleep(.01) ORDER BY a DESC;
UPDATE t2 set b=b+sleep(.01) limit 1;
UPDATE t1 SET a=a+sleep(.01) WHERE a in (SELECT b from t2);
DELETE FROM t1 WHERE a=a+sleep(.01) ORDER BY a LIMIT 2;
SELECT rows_examined,sql_text FROM mysql.slow_log;
disconnect con2;
connection default;
DROP TABLE t1,t2;
TRUNCATE TABLE mysql.slow_log;
--echo # end of bug#49756
--echo End of 5.1 tests
@ -361,6 +397,8 @@ disconnect con1;
connection default;
# Reset global system variables to initial values if forgotten somewhere above.
SET GLOBAL long_query_time = DEFAULT;
SET GLOBAL log_output = @old_log_output;
SET global general_log = @old_general_log;
SET global general_log_file = @old_general_log_file;
SET global slow_query_log = @old_slow_query_log;

View File

@ -1988,6 +1988,7 @@ drop table if exists `load`;
create table `load` (a varchar(255));
--copy_file std_data/words.dat $MYSQLTEST_VARDIR/tmp/load.txt
--chmod 0644 $MYSQLTEST_VARDIR/tmp/load.txt
--exec $MYSQL_IMPORT --ignore test $MYSQLTEST_VARDIR/tmp/load.txt

View File

@ -44,7 +44,7 @@ ALTER DATABASE `#mysql41#not-supported` UPGRADE DATA DIRECTORY NAME;
--error ER_WRONG_USAGE
ALTER DATABASE `#mysql51#not-yet` UPGRADE DATA DIRECTORY NAME;
--error ER_WRONG_USAGE
--error ER_WRONG_DB_NAME
ALTER DATABASE `#mysql50#` UPGRADE DATA DIRECTORY NAME;
--error ER_BAD_DB_ERROR

View File

@ -137,3 +137,37 @@ select * from `a-b-c`.v1;
--enable_ps_protocol
drop database `a-b-c`;
use test;
--echo # End of 5.0 tests
--echo #
--echo # Bug #53804: serious flaws in the alter database .. upgrade data
--echo # directory name command
--echo #
--error ER_BAD_DB_ERROR
ALTER DATABASE `#mysql50#:` UPGRADE DATA DIRECTORY NAME;
--error ER_WRONG_DB_NAME
ALTER DATABASE `#mysql50#.` UPGRADE DATA DIRECTORY NAME;
--error ER_WRONG_DB_NAME
ALTER DATABASE `#mysql50#../` UPGRADE DATA DIRECTORY NAME;
--error ER_WRONG_DB_NAME
ALTER DATABASE `#mysql50#../..` UPGRADE DATA DIRECTORY NAME;
--error ER_WRONG_DB_NAME
ALTER DATABASE `#mysql50#../../` UPGRADE DATA DIRECTORY NAME;
--error ER_WRONG_DB_NAME
ALTER DATABASE `#mysql50#./blablabla` UPGRADE DATA DIRECTORY NAME;
--error ER_WRONG_DB_NAME
ALTER DATABASE `#mysql50#../blablabla` UPGRADE DATA DIRECTORY NAME;
--error ER_WRONG_DB_NAME
ALTER DATABASE `#mysql50#/` UPGRADE DATA DIRECTORY NAME;
--error ER_WRONG_DB_NAME
ALTER DATABASE `#mysql50#/.` UPGRADE DATA DIRECTORY NAME;
--error ER_WRONG_DB_NAME
USE `#mysql50#.`;
--error ER_WRONG_DB_NAME
USE `#mysql50#../blablabla`;
--echo # End of 5.1 tests

View File

@ -1,5 +1,7 @@
--source include/have_debug.inc
SET @old_debug = @@GLOBAL.debug;
#
# Bug#34678 @@debug variable's incremental mode
#
@ -31,5 +33,6 @@ SELECT @@global.debug;
SET GLOBAL debug='';
SELECT @@global.debug;
SET GLOBAL debug=@old_debug;
--echo End of 5.1 tests

View File

@ -8927,11 +8927,28 @@ static bool record_compare(TABLE *table)
{
for (int i = 0 ; i < 2 ; ++i)
{
saved_x[i]= table->record[i][0];
saved_filler[i]= table->record[i][table->s->null_bytes - 1];
table->record[i][0]|= 1U;
table->record[i][table->s->null_bytes - 1]|=
256U - (1U << table->s->last_null_bit_pos);
/*
If we have an X bit then we need to take care of it.
*/
if (!(table->s->db_options_in_use & HA_OPTION_PACK_RECORD))
{
saved_x[i]= table->record[i][0];
table->record[i][0]|= 1U;
}
/*
If (last_null_bit_pos == 0 && null_bytes > 1), then:
X bit (if any) + N nullable fields + M Field_bit fields = 8 bits
Ie, the entire byte is used.
*/
if (table->s->last_null_bit_pos > 0)
{
saved_filler[i]= table->record[i][table->s->null_bytes - 1];
table->record[i][table->s->null_bytes - 1]|=
256U - (1U << table->s->last_null_bit_pos);
}
}
}
@ -8971,8 +8988,11 @@ record_compare_exit:
{
for (int i = 0 ; i < 2 ; ++i)
{
table->record[i][0]= saved_x[i];
table->record[i][table->s->null_bytes - 1]= saved_filler[i];
if (!(table->s->db_options_in_use & HA_OPTION_PACK_RECORD))
table->record[i][0]= saved_x[i];
if (table->s->last_null_bit_pos)
table->record[i][table->s->null_bytes - 1]= saved_filler[i];
}
}

View File

@ -314,12 +314,29 @@ static bool record_compare(TABLE *table)
if (table->s->null_bytes > 0)
{
for (int i = 0 ; i < 2 ; ++i)
{
saved_x[i]= table->record[i][0];
saved_filler[i]= table->record[i][table->s->null_bytes - 1];
table->record[i][0]|= 1U;
table->record[i][table->s->null_bytes - 1]|=
256U - (1U << table->s->last_null_bit_pos);
{
/*
If we have an X bit then we need to take care of it.
*/
if (!(table->s->db_options_in_use & HA_OPTION_PACK_RECORD))
{
saved_x[i]= table->record[i][0];
table->record[i][0]|= 1U;
}
/*
If (last_null_bit_pos == 0 && null_bytes > 1), then:
X bit (if any) + N nullable fields + M Field_bit fields = 8 bits
Ie, the entire byte is used.
*/
if (table->s->last_null_bit_pos > 0)
{
saved_filler[i]= table->record[i][table->s->null_bytes - 1];
table->record[i][table->s->null_bytes - 1]|=
256U - (1U << table->s->last_null_bit_pos);
}
}
}
@ -359,8 +376,11 @@ record_compare_exit:
{
for (int i = 0 ; i < 2 ; ++i)
{
table->record[i][0]= saved_x[i];
table->record[i][table->s->null_bytes - 1]= saved_filler[i];
if (!(table->s->db_options_in_use & HA_OPTION_PACK_RECORD))
table->record[i][0]= saved_x[i];
if (table->s->last_null_bit_pos > 0)
table->record[i][table->s->null_bytes - 1]= saved_filler[i];
}
}

View File

@ -89,6 +89,123 @@ static ulonglong get_exact_record_count(TABLE_LIST *tables)
}
/**
Use index to read MIN(field) value.
@param table Table object
@param ref Reference to the structure where we store the key value
@item_field Field used in MIN()
@range_fl Whether range endpoint is strict less than
@prefix_len Length of common key part for the range
@retval
0 No errors
HA_ERR_... Otherwise
*/
static int get_index_min_value(TABLE *table, TABLE_REF *ref,
Item_field *item_field, uint range_fl,
uint prefix_len)
{
int error;
if (!ref->key_length)
error= table->file->index_first(table->record[0]);
else
{
/*
Use index to replace MIN/MAX functions with their values
according to the following rules:
1) Insert the minimum non-null values where the WHERE clause still
matches, or
2) a NULL value if there are only NULL values for key_part_k.
3) Fail, producing a row of nulls
Implementation: Read the smallest value using the search key. If
the interval is open, read the next value after the search
key. If read fails, and we're looking for a MIN() value for a
nullable column, test if there is an exact match for the key.
*/
if (!(range_fl & NEAR_MIN))
/*
Closed interval: Either The MIN argument is non-nullable, or
we have a >= predicate for the MIN argument.
*/
error= table->file->index_read_map(table->record[0],
ref->key_buff,
make_prev_keypart_map(ref->key_parts),
HA_READ_KEY_OR_NEXT);
else
{
/*
Open interval: There are two cases:
1) We have only MIN() and the argument column is nullable, or
2) there is a > predicate on it, nullability is irrelevant.
We need to scan the next bigger record first.
*/
error= table->file->index_read_map(table->record[0],
ref->key_buff,
make_prev_keypart_map(ref->key_parts),
HA_READ_AFTER_KEY);
/*
If the found record is outside the group formed by the search
prefix, or there is no such record at all, check if all
records in that group have NULL in the MIN argument
column. If that is the case return that NULL.
Check if case 1 from above holds. If it does, we should read
the skipped tuple.
*/
if (item_field->field->real_maybe_null() &&
ref->key_buff[prefix_len] == 1 &&
/*
Last keypart (i.e. the argument to MIN) is set to NULL by
find_key_for_maxmin only if all other keyparts are bound
to constants in a conjunction of equalities. Hence, we
can detect this by checking only if the last keypart is
NULL.
*/
(error == HA_ERR_KEY_NOT_FOUND ||
key_cmp_if_same(table, ref->key_buff, ref->key, prefix_len)))
{
DBUG_ASSERT(item_field->field->real_maybe_null());
error= table->file->index_read_map(table->record[0],
ref->key_buff,
make_prev_keypart_map(ref->key_parts),
HA_READ_KEY_EXACT);
}
}
}
return error;
}
/**
Use index to read MAX(field) value.
@param table Table object
@param ref Reference to the structure where we store the key value
@range_fl Whether range endpoint is strict greater than
@retval
0 No errors
HA_ERR_... Otherwise
*/
static int get_index_max_value(TABLE *table, TABLE_REF *ref, uint range_fl)
{
return (ref->key_length ?
table->file->index_read_map(table->record[0], ref->key_buff,
make_prev_keypart_map(ref->key_parts),
range_fl & NEAR_MAX ?
HA_READ_BEFORE_KEY :
HA_READ_PREFIX_LAST_OR_PREV) :
table->file->index_last(table->record[0]));
}
/**
Substitutes constants for some COUNT(), MIN() and MAX() functions.
@ -220,9 +337,11 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
const_result= 0;
break;
case Item_sum::MIN_FUNC:
case Item_sum::MAX_FUNC:
{
int is_max= test(item_sum->sum_func() == Item_sum::MAX_FUNC);
/*
If MIN(expr) is the first part of a key or if all previous
If MIN/MAX(expr) is the first part of a key or if all previous
parts of the key is found in the COND, then we can use
indexes to find the key.
*/
@ -241,89 +360,26 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
Look for a partial key that can be used for optimization.
If we succeed, ref.key_length will contain the length of
this key, while prefix_len will contain the length of
the beginning of this key without field used in MIN().
the beginning of this key without field used in MIN/MAX().
Type of range for the key part for this field will be
returned in range_fl.
*/
if (table->file->inited || (outer_tables & table->map) ||
!find_key_for_maxmin(0, &ref, item_field->field, conds,
!find_key_for_maxmin(is_max, &ref, item_field->field, conds,
&range_fl, &prefix_len))
{
const_result= 0;
break;
}
error= table->file->ha_index_init((uint) ref.key, 1);
table->file->ha_index_init((uint) ref.key, 1);
if (!ref.key_length)
error= table->file->index_first(table->record[0]);
else
{
/*
Use index to replace MIN/MAX functions with their values
according to the following rules:
1) Insert the minimum non-null values where the WHERE clause still
matches, or
2) a NULL value if there are only NULL values for key_part_k.
3) Fail, producing a row of nulls
error= is_max ?
get_index_max_value(table, &ref, range_fl) :
get_index_min_value(table, &ref, item_field, range_fl,
prefix_len);
Implementation: Read the smallest value using the search key. If
the interval is open, read the next value after the search
key. If read fails, and we're looking for a MIN() value for a
nullable column, test if there is an exact match for the key.
*/
if (!(range_fl & NEAR_MIN))
/*
Closed interval: Either The MIN argument is non-nullable, or
we have a >= predicate for the MIN argument.
*/
error= table->file->index_read_map(table->record[0],
ref.key_buff,
make_prev_keypart_map(ref.key_parts),
HA_READ_KEY_OR_NEXT);
else
{
/*
Open interval: There are two cases:
1) We have only MIN() and the argument column is nullable, or
2) there is a > predicate on it, nullability is irrelevant.
We need to scan the next bigger record first.
*/
error= table->file->index_read_map(table->record[0],
ref.key_buff,
make_prev_keypart_map(ref.key_parts),
HA_READ_AFTER_KEY);
/*
If the found record is outside the group formed by the search
prefix, or there is no such record at all, check if all
records in that group have NULL in the MIN argument
column. If that is the case return that NULL.
Check if case 1 from above holds. If it does, we should read
the skipped tuple.
*/
if (item_field->field->real_maybe_null() &&
ref.key_buff[prefix_len] == 1 &&
/*
Last keypart (i.e. the argument to MIN) is set to NULL by
find_key_for_maxmin only if all other keyparts are bound
to constants in a conjunction of equalities. Hence, we
can detect this by checking only if the last keypart is
NULL.
*/
(error == HA_ERR_KEY_NOT_FOUND ||
key_cmp_if_same(table, ref.key_buff, ref.key, prefix_len)))
{
DBUG_ASSERT(item_field->field->real_maybe_null());
error= table->file->index_read_map(table->record[0],
ref.key_buff,
make_prev_keypart_map(ref.key_parts),
HA_READ_KEY_EXACT);
}
}
}
/* Verify that the read tuple indeed matches the search key */
if (!error && reckey_in_range(0, &ref, item_field->field,
if (!error && reckey_in_range(is_max, &ref, item_field->field,
conds, range_fl, prefix_len))
error= HA_ERR_KEY_NOT_FOUND;
table->set_keyread(FALSE);
@ -355,100 +411,18 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
item_sum->set_aggregator(item_sum->has_with_distinct() ?
Aggregator::DISTINCT_AGGREGATOR :
Aggregator::SIMPLE_AGGREGATOR);
if (!count)
{
/* If count == 0, then we know that is_exact_count == TRUE. */
((Item_sum_min*) item_sum)->aggregator_clear(); /* Set to NULL. */
}
else
((Item_sum_min*) item_sum)->reset(); /* Set to the constant value. */
((Item_sum_min*) item_sum)->make_const();
recalc_const_item= 1;
break;
}
case Item_sum::MAX_FUNC:
{
/*
If MAX(expr) is the first part of a key or if all previous
parts of the key is found in the COND, then we can use
indexes to find the key.
If count == 0 (so is_exact_count == TRUE) and
there're no outer joins, set to NULL,
otherwise set to the constant value.
*/
Item *expr=item_sum->get_arg(0);
if (expr->real_item()->type() == Item::FIELD_ITEM)
if (!count && !outer_tables)
{
uchar key_buff[MAX_KEY_LENGTH];
TABLE_REF ref;
uint range_fl, prefix_len;
ref.key_buff= key_buff;
Item_field *item_field= (Item_field*) (expr->real_item());
TABLE *table= item_field->field->table;
/*
Look for a partial key that can be used for optimization.
If we succeed, ref.key_length will contain the length of
this key, while prefix_len will contain the length of
the beginning of this key without field used in MAX().
Type of range for the key part for this field will be
returned in range_fl.
*/
if (table->file->inited || (outer_tables & table->map) ||
!find_key_for_maxmin(1, &ref, item_field->field, conds,
&range_fl, &prefix_len))
{
const_result= 0;
break;
}
error= table->file->ha_index_init((uint) ref.key, 1);
if (!ref.key_length)
error= table->file->index_last(table->record[0]);
else
error= table->file->index_read_map(table->record[0], key_buff,
make_prev_keypart_map(ref.key_parts),
range_fl & NEAR_MAX ?
HA_READ_BEFORE_KEY :
HA_READ_PREFIX_LAST_OR_PREV);
if (!error && reckey_in_range(1, &ref, item_field->field,
conds, range_fl, prefix_len))
error= HA_ERR_KEY_NOT_FOUND;
table->set_keyread(FALSE);
table->file->ha_index_end();
if (error)
{
if (error == HA_ERR_KEY_NOT_FOUND || error == HA_ERR_END_OF_FILE)
return HA_ERR_KEY_NOT_FOUND; // No rows matching WHERE
/* HA_ERR_LOCK_DEADLOCK or some other error */
table->file->print_error(error, MYF(ME_FATALERROR));
return(error);
}
removed_tables|= table->map;
}
else if (!expr->const_item() || !is_exact_count)
{
/*
The optimization is not applicable in both cases:
(a) 'expr' is a non-constant expression. Then we can't
replace 'expr' by a constant.
(b) 'expr' is a costant. According to ANSI, MIN/MAX must return
NULL if the query does not return any rows. Thus, if we are not
able to determine if the query returns any rows, we can't apply
the optimization and replace MIN/MAX with a constant.
*/
const_result= 0;
break;
}
item_sum->set_aggregator(item_sum->has_with_distinct() ?
Aggregator::DISTINCT_AGGREGATOR :
Aggregator::SIMPLE_AGGREGATOR);
if (!count)
{
/* If count != 1, then we know that is_exact_count == TRUE. */
((Item_sum_max*) item_sum)->aggregator_clear(); /* Set to NULL. */
item_sum->aggregator_clear();
}
else
((Item_sum_max*) item_sum)->reset(); /* Set to the constant value. */
((Item_sum_max*) item_sum)->make_const();
item_sum->reset();
item_sum->make_const();
recalc_const_item= 1;
break;
}

View File

@ -2051,8 +2051,15 @@ public:
*/
ha_rows sent_row_count;
/*
number of rows we read, sent or not, including in create_sort_index()
/**
Number of rows read and/or evaluated for a statement. Used for
slow log reporting.
An examined row is defined as a row that is read and/or evaluated
according to a statement condition, including in
create_sort_index(). Rows may be counted more than once, e.g., a
statement including ORDER BY could possibly evaluate the row in
filesort() before reading it for e.g. update.
*/
ha_rows examined_row_count;

View File

@ -262,6 +262,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
free_underlaid_joins(thd, &thd->lex->select_lex);
DBUG_RETURN(TRUE);
}
thd->examined_row_count+= examined_rows;
/*
Filesort has already found and selected the rows we want to delete,
so we don't need the where clause
@ -279,7 +280,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
free_underlaid_joins(thd, select_lex);
DBUG_RETURN(TRUE);
}
if (usable_index==MAX_KEY)
if (usable_index==MAX_KEY || (select && select->quick))
init_read_record(&info, thd, table, select, 1, 1, FALSE);
else
init_read_record_idx(&info, thd, table, 1, usable_index);
@ -318,6 +319,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
while (!(error=info.read_record(&info)) && !thd->killed &&
! thd->is_error())
{
thd->examined_row_count++;
// thd->is_error() is tested to disallow delete row on error
if (!(select && select->skip_record())&& ! thd->is_error() )
{

View File

@ -4348,6 +4348,15 @@ int fill_schema_coll_charset_app(THD *thd, TABLE_LIST *tables, COND *cond)
}
static inline void copy_field_as_string(Field *to_field, Field *from_field)
{
char buff[MAX_FIELD_WIDTH];
String tmp_str(buff, sizeof(buff), system_charset_info);
from_field->val_str(&tmp_str);
to_field->store(tmp_str.ptr(), tmp_str.length(), system_charset_info);
}
/**
@brief Store record into I_S.PARAMETERS table
@ -4514,18 +4523,26 @@ bool store_schema_params(THD *thd, TABLE *table, TABLE *proc_table,
bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table,
const char *wild, bool full_access, const char *sp_user)
{
String tmp_string;
String sp_db, sp_name, definer;
MYSQL_TIME time;
LEX *lex= thd->lex;
CHARSET_INFO *cs= system_charset_info;
get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_DB], &sp_db);
get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_NAME], &sp_name);
get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_DEFINER],&definer);
char sp_db_buff[NAME_LEN + 1], sp_name_buff[NAME_LEN + 1],
definer_buff[USERNAME_LENGTH + HOSTNAME_LENGTH + 2],
returns_buff[MAX_FIELD_WIDTH];
String sp_db(sp_db_buff, sizeof(sp_db_buff), cs);
String sp_name(sp_name_buff, sizeof(sp_name_buff), cs);
String definer(definer_buff, sizeof(definer_buff), cs);
String returns(returns_buff, sizeof(returns_buff), cs);
proc_table->field[MYSQL_PROC_FIELD_DB]->val_str(&sp_db);
proc_table->field[MYSQL_PROC_FIELD_NAME]->val_str(&sp_name);
proc_table->field[MYSQL_PROC_FIELD_DEFINER]->val_str(&definer);
if (!full_access)
full_access= !strcmp(sp_user, definer.ptr());
if (!full_access &&
check_some_routine_access(thd, sp_db.ptr(), sp_name.ptr(),
full_access= !strcmp(sp_user, definer.c_ptr_safe());
if (!full_access &&
check_some_routine_access(thd, sp_db.c_ptr_safe(), sp_name.c_ptr_safe(),
proc_table->field[MYSQL_PROC_MYSQL_TYPE]->
val_int() == TYPE_ENUM_PROCEDURE))
return 0;
@ -4539,32 +4556,30 @@ bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table,
(sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) == 0)
{
restore_record(table, s->default_values);
if (!wild || !wild[0] || !wild_compare(sp_name.ptr(), wild, 0))
if (!wild || !wild[0] || !wild_compare(sp_name.c_ptr_safe(), wild, 0))
{
int enum_idx= (int) proc_table->field[MYSQL_PROC_FIELD_ACCESS]->val_int();
table->field[3]->store(sp_name.ptr(), sp_name.length(), cs);
get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_SPECIFIC_NAME],
&tmp_string);
table->field[0]->store(tmp_string.ptr(), tmp_string.length(), cs);
copy_field_as_string(table->field[0],
proc_table->field[MYSQL_PROC_FIELD_SPECIFIC_NAME]);
table->field[1]->store(STRING_WITH_LEN("def"), cs);
table->field[2]->store(sp_db.ptr(), sp_db.length(), cs);
get_field(thd->mem_root, proc_table->field[MYSQL_PROC_MYSQL_TYPE],
&tmp_string);
table->field[4]->store(tmp_string.ptr(), tmp_string.length(), cs);
copy_field_as_string(table->field[4],
proc_table->field[MYSQL_PROC_MYSQL_TYPE]);
if (proc_table->field[MYSQL_PROC_MYSQL_TYPE]->val_int() ==
TYPE_ENUM_FUNCTION)
{
sp_head *sp;
bool free_sp_head;
get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_RETURNS],
&tmp_string);
proc_table->field[MYSQL_PROC_FIELD_RETURNS]->val_str(&returns);
sp= sp_load_for_information_schema(thd, proc_table, &sp_db, &sp_name,
(ulong) proc_table->
field[MYSQL_PROC_FIELD_SQL_MODE]->
val_int(),
TYPE_ENUM_FUNCTION,
tmp_string.c_ptr_safe(),
returns.c_ptr_safe(),
"", &free_sp_head);
if (sp)
@ -4595,24 +4610,19 @@ bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table,
if (full_access)
{
get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_BODY_UTF8],
&tmp_string);
table->field[14]->store(tmp_string.ptr(), tmp_string.length(), cs);
copy_field_as_string(table->field[14],
proc_table->field[MYSQL_PROC_FIELD_BODY_UTF8]);
table->field[14]->set_notnull();
}
table->field[13]->store(STRING_WITH_LEN("SQL"), cs);
table->field[17]->store(STRING_WITH_LEN("SQL"), cs);
get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_DETERMINISTIC],
&tmp_string);
table->field[18]->store(tmp_string.ptr(), tmp_string.length(), cs);
copy_field_as_string(table->field[18],
proc_table->field[MYSQL_PROC_FIELD_DETERMINISTIC]);
table->field[19]->store(sp_data_access_name[enum_idx].str,
sp_data_access_name[enum_idx].length , cs);
copy_field_as_string(table->field[21],
proc_table->field[MYSQL_PROC_FIELD_SECURITY_TYPE]);
get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_SECURITY_TYPE],
&tmp_string);
table->field[21]->store(tmp_string.ptr(), tmp_string.length(), cs);
bzero((char *)&time, sizeof(time));
((Field_timestamp *) proc_table->field[MYSQL_PROC_FIELD_CREATED])->
get_time(&time);
@ -4621,29 +4631,20 @@ bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table,
((Field_timestamp *) proc_table->field[MYSQL_PROC_FIELD_MODIFIED])->
get_time(&time);
table->field[23]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
copy_field_as_string(table->field[24],
proc_table->field[MYSQL_PROC_FIELD_SQL_MODE]);
copy_field_as_string(table->field[25],
proc_table->field[MYSQL_PROC_FIELD_COMMENT]);
get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_SQL_MODE],
&tmp_string);
table->field[24]->store(tmp_string.ptr(), tmp_string.length(), cs);
get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_COMMENT],
&tmp_string);
table->field[25]->store(tmp_string.ptr(), tmp_string.length(), cs);
table->field[26]->store(definer.ptr(), definer.length(), cs);
get_field(thd->mem_root,
proc_table->field[MYSQL_PROC_FIELD_CHARACTER_SET_CLIENT],
&tmp_string);
table->field[27]->store(tmp_string.ptr(), tmp_string.length(), cs);
get_field(thd->mem_root,
proc_table->field[ MYSQL_PROC_FIELD_COLLATION_CONNECTION],
&tmp_string);
table->field[28]->store(tmp_string.ptr(), tmp_string.length(), cs);
get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_DB_COLLATION],
&tmp_string);
table->field[29]->store(tmp_string.ptr(), tmp_string.length(), cs);
copy_field_as_string(table->field[27],
proc_table->
field[MYSQL_PROC_FIELD_CHARACTER_SET_CLIENT]);
copy_field_as_string(table->field[28],
proc_table->
field[MYSQL_PROC_FIELD_COLLATION_CONNECTION]);
copy_field_as_string(table->field[29],
proc_table->field[MYSQL_PROC_FIELD_DB_COLLATION]);
return schema_table_store_record(thd, table);
}

View File

@ -425,6 +425,25 @@ uint filename_to_tablename(const char *from, char *to, uint to_length)
}
/**
Check if given string begins with "#mysql50#" prefix
@param name string to check cut
@retval
FALSE no prefix found
@retval
TRUE prefix found
*/
bool check_mysql50_prefix(const char *name)
{
return (name[0] == '#' &&
!strncmp(name, MYSQL50_TABLE_NAME_PREFIX,
MYSQL50_TABLE_NAME_PREFIX_LENGTH));
}
/**
Check if given string begins with "#mysql50#" prefix, cut it if so.
@ -440,9 +459,7 @@ uint filename_to_tablename(const char *from, char *to, uint to_length)
uint check_n_cut_mysql50_prefix(const char *from, char *to, uint to_length)
{
if (from[0] == '#' &&
!strncmp(from, MYSQL50_TABLE_NAME_PREFIX,
MYSQL50_TABLE_NAME_PREFIX_LENGTH))
if (check_mysql50_prefix(from))
return (uint) (strmake(to, from + MYSQL50_TABLE_NAME_PREFIX_LENGTH,
to_length - 1) - to);
return 0;

View File

@ -126,6 +126,7 @@ enum enum_explain_filename_mode
uint filename_to_tablename(const char *from, char *to, uint to_length);
uint tablename_to_filename(const char *from, char *to, uint to_length);
uint check_n_cut_mysql50_prefix(const char *from, char *to, uint to_length);
bool check_mysql50_prefix(const char *name);
uint build_table_filename(char *buff, size_t bufflen, const char *db,
const char *table, const char *ext, uint flags);
uint build_table_shadow_filename(char *buff, size_t bufflen,

View File

@ -436,6 +436,7 @@ int mysql_update(THD *thd,
{
goto err;
}
thd->examined_row_count+= examined_rows;
/*
Filesort has already found and selected the rows we want to update,
so we don't need the where clause
@ -482,6 +483,7 @@ int mysql_update(THD *thd,
while (!(error=info.read_record(&info)) && !thd->killed)
{
thd->examined_row_count++;
if (!(select && select->skip_record()))
{
if (table->file->was_semi_consistent_read())
@ -588,6 +590,7 @@ int mysql_update(THD *thd,
while (!(error=info.read_record(&info)) && !thd->killed)
{
thd->examined_row_count++;
if (!(select && select->skip_record()))
{
if (table->file->was_semi_consistent_read())

View File

@ -2747,44 +2747,30 @@ bool check_db_name(LEX_STRING *org_name)
{
char *name= org_name->str;
uint name_length= org_name->length;
bool check_for_path_chars;
if (!name_length || name_length > NAME_LEN)
return 1;
if ((check_for_path_chars= check_mysql50_prefix(name)))
{
name+= MYSQL50_TABLE_NAME_PREFIX_LENGTH;
name_length-= MYSQL50_TABLE_NAME_PREFIX_LENGTH;
}
if (lower_case_table_names && name != any_db)
my_casedn_str(files_charset_info, name);
#if defined(USE_MB) && defined(USE_MB_IDENT)
if (use_mb(system_charset_info))
{
name_length= 0;
bool last_char_is_space= TRUE;
char *end= name + org_name->length;
while (name < end)
{
int len;
last_char_is_space= my_isspace(system_charset_info, *name);
len= my_ismbchar(system_charset_info, name, end);
if (!len)
len= 1;
name+= len;
name_length++;
}
return (last_char_is_space || name_length > NAME_CHAR_LEN);
}
else
#endif
return ((org_name->str[org_name->length - 1] != ' ') ||
(name_length > NAME_CHAR_LEN)); /* purecov: inspected */
return check_table_name(name, name_length, check_for_path_chars);
}
/*
Allow anything as a table name, as long as it doesn't contain an
' ' at the end
returns 1 on error
*/
bool check_table_name(const char *name, uint length, bool check_for_path_chars)
{
uint name_length= 0; // name length in symbols
@ -2812,10 +2798,10 @@ bool check_table_name(const char *name, uint length, bool check_for_path_chars)
continue;
}
}
#endif
if (check_for_path_chars &&
(*name == '/' || *name == '\\' || *name == '~' || *name == FN_EXTCHAR))
return 1;
#endif
name++;
name_length++;
}