mysql 5.5.23 merge

This commit is contained in:
Sergei Golubchik 2012-04-10 08:28:13 +02:00
commit 16c5c53fc2
118 changed files with 3926 additions and 747 deletions

View File

@ -1,4 +1,4 @@
MYSQL_VERSION_MAJOR=5 MYSQL_VERSION_MAJOR=5
MYSQL_VERSION_MINOR=5 MYSQL_VERSION_MINOR=5
MYSQL_VERSION_PATCH=22 MYSQL_VERSION_PATCH=23
MYSQL_VERSION_EXTRA= MYSQL_VERSION_EXTRA=

View File

@ -120,8 +120,13 @@ CONFIGURE_FILE(${CMAKE_BINARY_DIR}/sql/sql_yacc.cc
${PACKAGE_DIR}/sql/sql_yacc.cc COPYONLY) ${PACKAGE_DIR}/sql/sql_yacc.cc COPYONLY)
# Copy spec files # Copy spec files
CONFIGURE_FILE(${CMAKE_BINARY_DIR}/support-files/mysql.${VERSION}.spec SET(SPECFILENAME "mysql.${VERSION}.spec")
${PACKAGE_DIR}/support-files/mysql.${VERSION}.spec COPYONLY) IF("${VERSION}" MATCHES "-ndb-")
STRING(REGEX REPLACE "^.*-ndb-" "" NDBVERSION "${VERSION}")
SET(SPECFILENAME "mysql-cluster-${NDBVERSION}.spec")
ENDIF()
CONFIGURE_FILE(${CMAKE_BINARY_DIR}/support-files/${SPECFILENAME}
${PACKAGE_DIR}/support-files/${SPECFILENAME} COPYONLY)
# Add documentation, if user has specified where to find them # Add documentation, if user has specified where to find them
IF(MYSQL_DOCS_LOCATION) IF(MYSQL_DOCS_LOCATION)

View File

@ -57,7 +57,10 @@ MACRO(GET_MYSQL_VERSION)
MESSAGE("-- MariaDB ${VERSION}") MESSAGE("-- MariaDB ${VERSION}")
SET(MYSQL_BASE_VERSION "${MAJOR_VERSION}.${MINOR_VERSION}" CACHE INTERNAL "MySQL Base version") SET(MYSQL_BASE_VERSION "${MAJOR_VERSION}.${MINOR_VERSION}" CACHE INTERNAL "MySQL Base version")
SET(MYSQL_NO_DASH_VERSION "${MAJOR_VERSION}.${MINOR_VERSION}.${PATCH_VERSION}") SET(MYSQL_NO_DASH_VERSION "${MAJOR_VERSION}.${MINOR_VERSION}.${PATCH_VERSION}")
STRING(REPLACE "-" "_" MYSQL_RPM_VERSION "${VERSION}") # Use NDBVERSION irregardless of whether this is Cluster or not, if not
# then the regex will be ignored anyway.
STRING(REGEX REPLACE "^.*-ndb-" "" NDBVERSION "${VERSION}")
STRING(REPLACE "-" "_" MYSQL_RPM_VERSION "${NDBVERSION}")
MATH(EXPR MYSQL_VERSION_ID "10000*${MAJOR_VERSION} + 100*${MINOR_VERSION} + ${PATCH_VERSION}") MATH(EXPR MYSQL_VERSION_ID "10000*${MAJOR_VERSION} + 100*${MINOR_VERSION} + ${PATCH_VERSION}")
MARK_AS_ADVANCED(VERSION MYSQL_VERSION_ID MYSQL_BASE_VERSION) MARK_AS_ADVANCED(VERSION MYSQL_VERSION_ID MYSQL_BASE_VERSION)
SET(CPACK_PACKAGE_VERSION_MAJOR ${MAJOR_VERSION}) SET(CPACK_PACKAGE_VERSION_MAJOR ${MAJOR_VERSION})
@ -93,6 +96,10 @@ ENDIF()
IF(NOT CPACK_SOURCE_PACKAGE_FILE_NAME) IF(NOT CPACK_SOURCE_PACKAGE_FILE_NAME)
SET(CPACK_SOURCE_PACKAGE_FILE_NAME "mariadb-${VERSION}") SET(CPACK_SOURCE_PACKAGE_FILE_NAME "mariadb-${VERSION}")
IF("${VERSION}" MATCHES "-ndb-")
STRING(REGEX REPLACE "^.*-ndb-" "" NDBVERSION "${VERSION}")
SET(CPACK_SOURCE_PACKAGE_FILE_NAME "mysql-cluster-gpl-${NDBVERSION}")
ENDIF()
ENDIF() ENDIF()
SET(CPACK_PACKAGE_CONTACT "MariaDB team <info@montyprogram.com>") SET(CPACK_PACKAGE_CONTACT "MariaDB team <info@montyprogram.com>")
SET(CPACK_PACKAGE_VENDOR "Monty Program AB") SET(CPACK_PACKAGE_VENDOR "Monty Program AB")

View File

@ -123,8 +123,15 @@ IF(NOT VERSION)
SET(PRODUCT_TAG) SET(PRODUCT_TAG)
ENDIF() ENDIF()
SET(package_name "mariadb${PRODUCT_TAG}-${VERSION}-${SYSTEM_NAME_AND_PROCESSOR}") IF("${VERSION}" MATCHES "-ndb-")
STRING(REGEX REPLACE "^.*-ndb-" "" NDBVERSION "${VERSION}")
SET(package_name "mysql-cluster${PRODUCT_TAG}-${NDBVERSION}-${SYSTEM_NAME_AND_PROCESSOR}")
ELSE()
SET(package_name "mariadb${PRODUCT_TAG}-${VERSION}-${SYSTEM_NAME_AND_PROCESSOR}")
ENDIF()
MESSAGE("-- Packaging as: ${package_name}")
# Sometimes package suffix is added (something like "-icc-glibc23") # Sometimes package suffix is added (something like "-icc-glibc23")
IF(PACKAGE_SUFFIX) IF(PACKAGE_SUFFIX)
SET(package_name "${package_name}${PACKAGE_SUFFIX}") SET(package_name "${package_name}${PACKAGE_SUFFIX}")

View File

@ -4,6 +4,9 @@
# $engine_type -- storage engine to be tested # $engine_type -- storage engine to be tested
# $test_foreign_keys -- 0, skip foreign key tests # $test_foreign_keys -- 0, skip foreign key tests
# -- 1, do not skip foreign key tests # -- 1, do not skip foreign key tests
# $partitions_a -- partition by column 'a'
# $partitions_id -- partition by column 'id'
# $partitions_s1 -- partition by column 's1'
# have to be set before sourcing this script. # have to be set before sourcing this script.
# #
# Last update: # Last update:
@ -19,47 +22,61 @@ eval SET SESSION STORAGE_ENGINE = $engine_type;
drop table if exists t1,t2,t3; drop table if exists t1,t2,t3;
--enable_warnings --enable_warnings
set @save_query_cache_size = @@global.query_cache_size;
set GLOBAL query_cache_size = 1355776;
# #
# Without auto_commit. # Without auto_commit.
# #
flush status; flush status;
set autocommit=0; set autocommit=0;
create table t1 (a int not null); eval create table t1 (a int not null)$partitions_a;
insert into t1 values (1),(2),(3); insert into t1 values (1),(2),(3);
--sorted_result
select * from t1; select * from t1;
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
drop table t1; drop table t1;
commit; commit;
set autocommit=1; set autocommit=1;
begin; begin;
create table t1 (a int not null); eval create table t1 (a int not null)$partitions_a;
insert into t1 values (1),(2),(3); insert into t1 values (1),(2),(3);
--sorted_result
select * from t1; select * from t1;
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
drop table t1; drop table t1;
commit; commit;
create table t1 (a int not null); eval create table t1 (a int not null)$partitions_a;
create table t2 (a int not null); eval create table t2 (a int not null)$partitions_a;
create table t3 (a int not null); eval create table t3 (a int not null)$partitions_a;
insert into t1 values (1),(2); insert into t1 values (1),(2);
insert into t2 values (1),(2); insert into t2 values (1),(2);
insert into t3 values (1),(2); insert into t3 values (1),(2);
--sorted_result
select * from t1; select * from t1;
--sorted_result
select * from t2; select * from t2;
--sorted_result
select * from t3; select * from t3;
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
show status like "Qcache_hits"; show status like "Qcache_hits";
begin; begin;
--sorted_result
select * from t1; select * from t1;
--sorted_result
select * from t2; select * from t2;
--sorted_result
select * from t3; select * from t3;
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
show status like "Qcache_hits"; show status like "Qcache_hits";
insert into t1 values (3); insert into t1 values (3);
insert into t2 values (3); insert into t2 values (3);
insert into t1 values (4); insert into t1 values (4);
--sorted_result
select * from t1; select * from t1;
--sorted_result
select * from t2; select * from t2;
--sorted_result
select * from t3; select * from t3;
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
show status like "Qcache_hits"; show status like "Qcache_hits";
@ -67,7 +84,7 @@ commit;
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
drop table t3,t2,t1; drop table t3,t2,t1;
CREATE TABLE t1 (id int(11) NOT NULL auto_increment, PRIMARY KEY (id)); eval CREATE TABLE t1 (id int(11) NOT NULL auto_increment, PRIMARY KEY (id))$partitions_id;
select count(*) from t1; select count(*) from t1;
insert into t1 (id) values (0); insert into t1 (id) values (0);
select count(*) from t1; select count(*) from t1;
@ -78,8 +95,6 @@ if ($test_foreign_keys)
# #
# one statement roll back inside transation # one statement roll back inside transation
# #
let $save_query_cache_size=`select @@global.query_cache_size`;
set GLOBAL query_cache_size=1355776;
CREATE TABLE t1 ( id int(10) NOT NULL auto_increment, a varchar(25) default NULL, PRIMARY KEY (id), UNIQUE KEY a (a)); CREATE TABLE t1 ( id int(10) NOT NULL auto_increment, a varchar(25) default NULL, PRIMARY KEY (id), UNIQUE KEY a (a));
CREATE TABLE t2 ( id int(10) NOT NULL auto_increment, b varchar(25) default NULL, PRIMARY KEY (id), UNIQUE KEY b (b)); CREATE TABLE t2 ( id int(10) NOT NULL auto_increment, b varchar(25) default NULL, PRIMARY KEY (id), UNIQUE KEY b (b));
CREATE TABLE t3 ( id int(10) NOT NULL auto_increment, t1_id int(10) NOT NULL default '0', t2_id int(10) NOT NULL default '0', state int(11) default NULL, PRIMARY KEY (id), UNIQUE KEY t1_id (t1_id,t2_id), KEY t2_id (t2_id,t1_id), CONSTRAINT `t3_ibfk_1` FOREIGN KEY (`t1_id`) REFERENCES `t1` (`id`), CONSTRAINT `t3_ibfk_2` FOREIGN KEY (`t2_id`) REFERENCES `t2` (`id`)); CREATE TABLE t3 ( id int(10) NOT NULL auto_increment, t1_id int(10) NOT NULL default '0', t2_id int(10) NOT NULL default '0', state int(11) default NULL, PRIMARY KEY (id), UNIQUE KEY t1_id (t1_id,t2_id), KEY t2_id (t2_id,t1_id), CONSTRAINT `t3_ibfk_1` FOREIGN KEY (`t1_id`) REFERENCES `t1` (`id`), CONSTRAINT `t3_ibfk_2` FOREIGN KEY (`t2_id`) REFERENCES `t2` (`id`));
@ -95,9 +110,6 @@ insert into t3 VALUES ( NULL, 1, 1, 2 );
commit; commit;
select t1.* from t1, t2, t3 where t3.state & 1 = 0 and t3.t1_id = t1.id and t3.t2_id = t2.id and t1.id = 1 order by t1.a asc; select t1.* from t1, t2, t3 where t3.state & 1 = 0 and t3.t1_id = t1.id and t3.t2_id = t2.id and t1.id = 1 order by t1.a asc;
drop table t3,t2,t1; drop table t3,t2,t1;
--disable_query_log
eval set GLOBAL query_cache_size=$save_query_cache_size;
--enable_query_log
} }
# #
@ -118,7 +130,7 @@ SET GLOBAL query_cache_size = 204800;
flush status; flush status;
SET @@autocommit=1; SET @@autocommit=1;
eval SET SESSION STORAGE_ENGINE = $engine_type; eval SET SESSION STORAGE_ENGINE = $engine_type;
CREATE TABLE t2 (s1 int, s2 varchar(1000), key(s1)); eval CREATE TABLE t2 (s1 int, s2 varchar(1000), key(s1))$partitions_s1;
INSERT INTO t2 VALUES (1,repeat('a',10)),(2,repeat('a',10)),(3,repeat('a',10)),(4,repeat('a',10)); INSERT INTO t2 VALUES (1,repeat('a',10)),(2,repeat('a',10)),(3,repeat('a',10)),(4,repeat('a',10));
COMMIT; COMMIT;
START TRANSACTION; START TRANSACTION;
@ -176,8 +188,8 @@ show status like "Qcache_queries_in_cache";
show status like "Qcache_hits"; show status like "Qcache_hits";
# Final cleanup # Final cleanup
eval set GLOBAL query_cache_size=$save_query_cache_size;
disconnect connection1; disconnect connection1;
--source include/wait_until_disconnected.inc --source include/wait_until_disconnected.inc
connection default; connection default;
set @@global.query_cache_size = @save_query_cache_size;
drop table t2; drop table t2;

View File

@ -0,0 +1,3 @@
--replace_result $SLAVE_MYPORT SLAVE_PORT $DEFAULT_MASTER_PORT DEFAULT_PORT
--replace_regex /[0-9, a-f]{8}-[0-9, a-f]{4}-[0-9, a-f]{4}-[0-9, a-f]{4}-[0-9, a-f]{12}/SLAVE_UUID/
SHOW SLAVE HOSTS;

View File

@ -1,5 +1,7 @@
SET SESSION STORAGE_ENGINE = InnoDB; SET SESSION STORAGE_ENGINE = InnoDB;
drop table if exists t1,t2,t3; drop table if exists t1,t2,t3;
set @save_query_cache_size = @@global.query_cache_size;
set GLOBAL query_cache_size = 1355776;
flush status; flush status;
set autocommit=0; set autocommit=0;
create table t1 (a int not null); create table t1 (a int not null);
@ -100,7 +102,7 @@ show status like "Qcache_queries_in_cache";
Variable_name Value Variable_name Value
Qcache_queries_in_cache 2 Qcache_queries_in_cache 2
drop table t3,t2,t1; drop table t3,t2,t1;
CREATE TABLE t1 (id int(11) NOT NULL auto_increment, PRIMARY KEY (id)); CREATE TABLE t1 (id int(11) NOT NULL auto_increment, PRIMARY KEY (id));
select count(*) from t1; select count(*) from t1;
count(*) count(*)
0 0
@ -109,7 +111,6 @@ select count(*) from t1;
count(*) count(*)
1 1
drop table t1; drop table t1;
set GLOBAL query_cache_size=1355776;
CREATE TABLE t1 ( id int(10) NOT NULL auto_increment, a varchar(25) default NULL, PRIMARY KEY (id), UNIQUE KEY a (a)); CREATE TABLE t1 ( id int(10) NOT NULL auto_increment, a varchar(25) default NULL, PRIMARY KEY (id), UNIQUE KEY a (a));
CREATE TABLE t2 ( id int(10) NOT NULL auto_increment, b varchar(25) default NULL, PRIMARY KEY (id), UNIQUE KEY b (b)); CREATE TABLE t2 ( id int(10) NOT NULL auto_increment, b varchar(25) default NULL, PRIMARY KEY (id), UNIQUE KEY b (b));
CREATE TABLE t3 ( id int(10) NOT NULL auto_increment, t1_id int(10) NOT NULL default '0', t2_id int(10) NOT NULL default '0', state int(11) default NULL, PRIMARY KEY (id), UNIQUE KEY t1_id (t1_id,t2_id), KEY t2_id (t2_id,t1_id), CONSTRAINT `t3_ibfk_1` FOREIGN KEY (`t1_id`) REFERENCES `t1` (`id`), CONSTRAINT `t3_ibfk_2` FOREIGN KEY (`t2_id`) REFERENCES `t2` (`id`)); CREATE TABLE t3 ( id int(10) NOT NULL auto_increment, t1_id int(10) NOT NULL default '0', t2_id int(10) NOT NULL default '0', state int(11) default NULL, PRIMARY KEY (id), UNIQUE KEY t1_id (t1_id,t2_id), KEY t2_id (t2_id,t1_id), CONSTRAINT `t3_ibfk_1` FOREIGN KEY (`t1_id`) REFERENCES `t1` (`id`), CONSTRAINT `t3_ibfk_2` FOREIGN KEY (`t2_id`) REFERENCES `t2` (`id`));
@ -218,7 +219,7 @@ Qcache_queries_in_cache 1
show status like "Qcache_hits"; show status like "Qcache_hits";
Variable_name Value Variable_name Value
Qcache_hits 1 Qcache_hits 1
set GLOBAL query_cache_size=1048576; set @@global.query_cache_size = @save_query_cache_size;
drop table t2; drop table t2;
CREATE TABLE t1 (a INT) ENGINE=InnoDB; CREATE TABLE t1 (a INT) ENGINE=InnoDB;
BEGIN; BEGIN;

View File

@ -4157,5 +4157,16 @@ DROP TABLE t1;
SET sql_mode=default; SET sql_mode=default;
SET NAMES latin1; SET NAMES latin1;
# #
# Bug #13832953 MY_STRNXFRM_UNICODE: ASSERTION `SRC' FAILED
#
CREATE TABLE t1 (c1 SET('','') CHARACTER SET ucs2);
Warnings:
Note 1291 Column 'c1' has duplicated value '' in SET
INSERT INTO t1 VALUES ('');
SELECT COALESCE(c1) FROM t1 ORDER BY 1;
COALESCE(c1)
DROP TABLE t1;
#
# End of 5.5 tests # End of 5.5 tests
# #

View File

@ -31,3 +31,46 @@ SET DEBUG_SYNC='now SIGNAL filesort_killed';
# connection default # connection default
SET DEBUG_SYNC= "RESET"; SET DEBUG_SYNC= "RESET";
DROP TABLE t1; DROP TABLE t1;
#
# Bug#13832772 ASSERTION `THD->IS_ERROR() || KILL_ERRNO'
# FAILED IN FILESORT/MYSQL_DELETE
#
CREATE TABLE t1 (
c1 BLOB,
c2 TEXT,
c3 TEXT,
c4 TEXT,
c5 TEXT,
c6 TEXT,
c7 TEXT,
c8 BLOB,
c9 TEXT,
c19 TEXT,
pk INT,
c20 TEXT,
c21 BLOB,
c22 TEXT,
c23 TEXT,
c24 TEXT,
c25 TEXT,
c26 BLOB,
c27 TEXT,
c28 TEXT,
primary key (pk)
);
CALL mtr.add_suppression("Out of sort memory");
DELETE IGNORE FROM t1 ORDER BY c26,c7,c23,c4,c25,c5,c20,
c19,c21,c8,c1,c27,c28,c3,c9,c22,c24,c6,c2,pk LIMIT 2;
ERROR HY001: Out of sort memory, consider increasing server sort buffer size
SHOW WARNINGS;
Level Code Message
Error 1038 Out of sort memory, consider increasing server sort buffer size
Error 1028 Sort aborted: Out of sort memory, consider increasing server sort buffer size
DELETE FROM t1 ORDER BY c26,c7,c23,c4,c25,c5,c20,
c19,c21,c8,c1,c27,c28,c3,c9,c22,c24,c6,c2,pk LIMIT 2;
ERROR HY001: Out of sort memory, consider increasing server sort buffer size
SHOW WARNINGS;
Level Code Message
Error 1038 Out of sort memory, consider increasing server sort buffer size
Error 1028 Sort aborted: Out of sort memory, consider increasing server sort buffer size
DROP TABLE t1;

View File

@ -778,4 +778,14 @@ SELECT 1 IN (YEAR(FROM_UNIXTIME(NULL)) ,1);
1 IN (YEAR(FROM_UNIXTIME(NULL)) ,1) 1 IN (YEAR(FROM_UNIXTIME(NULL)) ,1)
1 1
# #
#
# Bug#13012483: EXPLAIN EXTENDED, PREPARED STATEMENT, CRASH IN CHECK_SIMPLE_EQUALITY
#
CREATE TABLE t1 (a INT);
PREPARE s FROM "SELECT 1 FROM t1 WHERE 1 < ALL (SELECT @:= (1 IN (SELECT 1 FROM t1)) FROM t1)";
EXECUTE s;
1
DROP TABLE t1;
# End of test BUG#13012483
#
End of 5.1 tests End of 5.1 tests

View File

@ -1036,7 +1036,7 @@ replicate-events-marked-for-skip replicate
replicate-same-server-id FALSE replicate-same-server-id FALSE
report-host (No default value) report-host (No default value)
report-password (No default value) report-password (No default value)
report-port 3306 report-port 0
report-user (No default value) report-user (No default value)
rowid-merge-buff-size 8388608 rowid-merge-buff-size 8388608
rpl-recovery-rank 0 rpl-recovery-rank 0

View File

@ -0,0 +1,205 @@
SET SESSION STORAGE_ENGINE = InnoDB;
drop table if exists t1,t2,t3;
set @save_query_cache_size = @@global.query_cache_size;
set GLOBAL query_cache_size = 1355776;
flush status;
set autocommit=0;
create table t1 (a int not null) PARTITION BY KEY (a) PARTITIONS 3;
insert into t1 values (1),(2),(3);
select * from t1;
a
1
2
3
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
drop table t1;
commit;
set autocommit=1;
begin;
create table t1 (a int not null) PARTITION BY KEY (a) PARTITIONS 3;
insert into t1 values (1),(2),(3);
select * from t1;
a
1
2
3
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
drop table t1;
commit;
create table t1 (a int not null) PARTITION BY KEY (a) PARTITIONS 3;
create table t2 (a int not null) PARTITION BY KEY (a) PARTITIONS 3;
create table t3 (a int not null) PARTITION BY KEY (a) PARTITIONS 3;
insert into t1 values (1),(2);
insert into t2 values (1),(2);
insert into t3 values (1),(2);
select * from t1;
a
1
2
select * from t2;
a
1
2
select * from t3;
a
1
2
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
begin;
select * from t1;
a
1
2
select * from t2;
a
1
2
select * from t3;
a
1
2
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
insert into t1 values (3);
insert into t2 values (3);
insert into t1 values (4);
select * from t1;
a
1
2
3
4
select * from t2;
a
1
2
3
select * from t3;
a
1
2
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
commit;
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
drop table t3,t2,t1;
CREATE TABLE t1 (id int(11) NOT NULL auto_increment, PRIMARY KEY (id)) PARTITION BY HASH (id) PARTITIONS 3;
select count(*) from t1;
count(*)
0
insert into t1 (id) values (0);
select count(*) from t1;
count(*)
1
drop table t1;
SET SESSION STORAGE_ENGINE = InnoDB;
SET @@autocommit=1;
connection default
SHOW VARIABLES LIKE 'have_query_cache';
Variable_name Value
have_query_cache YES
SET GLOBAL query_cache_size = 204800;
flush status;
SET @@autocommit=1;
SET SESSION STORAGE_ENGINE = InnoDB;
CREATE TABLE t2 (s1 int, s2 varchar(1000), key(s1)) PARTITION BY KEY (s1) PARTITIONS 3;
INSERT INTO t2 VALUES (1,repeat('a',10)),(2,repeat('a',10)),(3,repeat('a',10)),(4,repeat('a',10));
COMMIT;
START TRANSACTION;
SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w';
count(*)
0
UPDATE t2 SET s2 = 'w' WHERE s1 = 3;
SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w';
count(*)
1
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
connection connection1
START TRANSACTION;
SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w';
count(*)
0
INSERT INTO t2 VALUES (5,'w');
SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w';
count(*)
1
COMMIT;
SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w';
count(*)
1
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
connection default
SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w';
count(*)
1
COMMIT;
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w';
count(*)
2
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
connection connection1
SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w';
count(*)
2
START TRANSACTION;
SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w';
count(*)
2
INSERT INTO t2 VALUES (6,'w');
SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w';
count(*)
3
connection default
SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w';
count(*)
2
START TRANSACTION;
SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w';
count(*)
2
DELETE from t2 WHERE s1=3;
SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w';
count(*)
1
COMMIT;
connection connection1
COMMIT;
SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w';
count(*)
2
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
set @@global.query_cache_size = @save_query_cache_size;
drop table t2;

View File

@ -2,6 +2,35 @@ set global default_storage_engine='innodb';
set session default_storage_engine='innodb'; set session default_storage_engine='innodb';
drop table if exists t1, t2; drop table if exists t1, t2;
# #
# Bug#13694811: THE OPTIMIZER WRONGLY USES THE FIRST
# INNODB PARTITION STATISTICS
#
CREATE TABLE t1
(a INT,
b varchar(64),
PRIMARY KEY (a),
KEY (b))
ENGINE = InnoDB
PARTITION BY RANGE (a)
SUBPARTITION BY HASH (a) SUBPARTITIONS 10
(PARTITION pNeg VALUES LESS THAN (0),
PARTITION p0 VALUES LESS THAN (1000),
PARTITION pMAX VALUES LESS THAN MAXVALUE);
# Only one row in the first 10 subpartitions
INSERT INTO t1 VALUES (-1, 'Only negative pk value');
INSERT INTO t1 VALUES (0, 'Mod Zero'), (1, 'One'), (2, 'Two'), (3, 'Three'),
(10, 'Zero'), (11, 'Mod One'), (12, 'Mod Two'), (13, 'Mod Three'),
(20, '0'), (21, '1'), (22, '2'), (23, '3'),
(4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9');
INSERT INTO t1 SELECT a + 30, b FROM t1 WHERE a >= 0;
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
EXPLAIN SELECT b FROM t1 WHERE b between 'L' and 'N' AND a > -100;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY,b b 67 NULL 18 Using where; Using index
DROP TABLE t1;
#
# Bug#56287: crash when using Partition datetime in sub in query # Bug#56287: crash when using Partition datetime in sub in query
# #
CREATE TABLE t1 CREATE TABLE t1

View File

@ -18,7 +18,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
# # # # # # # # # 3 # # # # # # # # # # 3 #
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < 7; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < 7;
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
# # # # # # # # # 9 # # # # # # # # # # 10 #
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 1; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 1;
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
# # # # # # # # # 3 # # # # # # # # # # 3 #
@ -105,7 +105,7 @@ a
6 6
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < 7; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < 7;
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0,p1,p2,p3,p4,p5,max range PRIMARY PRIMARY 4 NULL 9 Using where; Using index 1 SIMPLE t1 p0,p1,p2,p3,p4,p5,max index PRIMARY PRIMARY 4 NULL 10 Using where; Using index
SELECT * FROM t1 WHERE a <= 1; SELECT * FROM t1 WHERE a <= 1;
a a
-1 -1
@ -168,7 +168,7 @@ a
6 6
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 6; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 6;
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0,p1,p2,p3,p4,p5,max range PRIMARY PRIMARY 4 NULL 9 Using where; Using index 1 SIMPLE t1 p0,p1,p2,p3,p4,p5,max index PRIMARY PRIMARY 4 NULL 10 Using where; Using index
SELECT * FROM t1 WHERE a <= 7; SELECT * FROM t1 WHERE a <= 7;
a a
-1 -1
@ -182,7 +182,7 @@ a
7 7
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 7; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 7;
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0,p1,p2,p3,p4,p5,max range PRIMARY PRIMARY 4 NULL 9 Using where; Using index 1 SIMPLE t1 p0,p1,p2,p3,p4,p5,max index PRIMARY PRIMARY 4 NULL 10 Using where; Using index
SELECT * FROM t1 WHERE a = 1; SELECT * FROM t1 WHERE a = 1;
a a
1 1
@ -424,7 +424,7 @@ a
5 5
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < 6; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < 6;
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0,p1,p2,p3,p4,max range PRIMARY PRIMARY 4 NULL 8 Using where; Using index 1 SIMPLE t1 p0,p1,p2,p3,p4,max index PRIMARY PRIMARY 4 NULL 9 Using where; Using index
SELECT * FROM t1 WHERE a <= 1; SELECT * FROM t1 WHERE a <= 1;
a a
-1 -1
@ -474,7 +474,7 @@ a
5 5
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 5; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 5;
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0,p1,p2,p3,p4,max range PRIMARY PRIMARY 4 NULL 8 Using where; Using index 1 SIMPLE t1 p0,p1,p2,p3,p4,max index PRIMARY PRIMARY 4 NULL 9 Using where; Using index
SELECT * FROM t1 WHERE a <= 6; SELECT * FROM t1 WHERE a <= 6;
a a
-1 -1
@ -487,7 +487,7 @@ a
6 6
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 6; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 6;
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0,p1,p2,p3,p4,max range PRIMARY PRIMARY 4 NULL 8 Using where; Using index 1 SIMPLE t1 p0,p1,p2,p3,p4,max index PRIMARY PRIMARY 4 NULL 9 Using where; Using index
SELECT * FROM t1 WHERE a = 1; SELECT * FROM t1 WHERE a = 1;
a a
1 1
@ -744,13 +744,13 @@ a
1001-01-01 1001-01-01
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1001-01-01'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 range a a 4 NULL 3 Using where; Using index 1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 index a a 4 NULL 6 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-01-01'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 range a a 4 NULL 3 Using where; Using index 1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 index a a 4 NULL 6 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-01-01'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 range a a 4 NULL 4 Using where; Using index 1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 range a a 4 NULL 3 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1001-01-01'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL,p2001-01-01 range a a 4 NULL 3 Using where; Using index 1 SIMPLE t1 pNULL,p2001-01-01 range a a 4 NULL 3 Using where; Using index
@ -759,26 +759,26 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p1001-01-01 system a NULL NULL NULL 1 1 SIMPLE t1 p1001-01-01 system a NULL NULL NULL 1
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1001-00-00'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1001-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 range a a 4 NULL 3 Using where; Using index 1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 index a a 4 NULL 6 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-00-00'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 range a a 4 NULL 3 Using where; Using index 1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 index a a 4 NULL 6 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-00-00'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 range a a 4 NULL 4 Using where; Using index 1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 range a a 4 NULL 3 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1001-00-00'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1001-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 range a a 4 NULL 4 Using where; Using index 1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 range a a 4 NULL 3 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1001-00-00'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1001-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL ref a a 4 const 1 Using index 1 SIMPLE t1 pNULL ref a a 4 const 1 Using index
# Disabling warnings for the invalid date # Disabling warnings for the invalid date
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1999-02-31'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1999-02-31';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01,p2001-01-01 range a a 4 NULL 5 Using where; Using index 1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01,p2001-01-01 range a a 4 NULL 4 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1999-02-31'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1999-02-31';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01,p2001-01-01 range a a 4 NULL 5 Using where; Using index 1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01,p2001-01-01 range a a 4 NULL 4 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1999-02-31'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1999-02-31';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL,p2001-01-01 index a a 4 NULL 4 Using where; Using index 1 SIMPLE t1 pNULL,p2001-01-01 index a a 4 NULL 4 Using where; Using index
@ -790,16 +790,16 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL ref a a 4 const 1 Using where; Using index 1 SIMPLE t1 pNULL ref a a 4 const 1 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1002-00-00'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1002-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01,p2001-01-01 range a a 4 NULL 5 Using where; Using index 1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01,p2001-01-01 range a a 4 NULL 4 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1001-01-01'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 range a a 4 NULL 3 Using where; Using index 1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 index a a 4 NULL 6 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-02' AND '1002-00-00'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-02' AND '1002-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 range a a 4 NULL 3 Using where; Using index 1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 index a a 4 NULL 5 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-01' AND '1001-01-01'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-01' AND '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 range a a 4 NULL 3 Using where; Using index 1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 index a a 4 NULL 6 Using where; Using index
# test without index # test without index
ALTER TABLE t1 DROP KEY a; ALTER TABLE t1 DROP KEY a;
SELECT * FROM t1 WHERE a < '1001-01-01'; SELECT * FROM t1 WHERE a < '1001-01-01';
@ -1076,7 +1076,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 range a a 4 NULL 3 Using where; Using index 1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 range a a 4 NULL 3 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-01-01'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 4 Using where; Using index 1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 3 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-01-01'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 range a a 4 NULL 4 Using where; Using index 1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 range a a 4 NULL 4 Using where; Using index
@ -1104,10 +1104,10 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
# Disabling warnings for the invalid date # Disabling warnings for the invalid date
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1999-02-31'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1999-02-31';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 5 Using where; Using index 1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 4 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1999-02-31'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1999-02-31';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 5 Using where; Using index 1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 4 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1999-02-31'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1999-02-31';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p2001-01-01,pNULL index a a 4 NULL 4 Using where; Using index 1 SIMPLE t1 p2001-01-01,pNULL index a a 4 NULL 4 Using where; Using index
@ -1119,10 +1119,10 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL ref a a 4 const 1 Using where; Using index 1 SIMPLE t1 pNULL ref a a 4 const 1 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1002-00-00'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1002-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 5 Using where; Using index 1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 4 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1001-01-01'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 4 Using where; Using index 1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 3 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-02' AND '1002-00-00'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-02' AND '1002-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL,p1001-01-01 index a a 4 NULL 4 Using where; Using index 1 SIMPLE t1 pNULL,p1001-01-01 index a a 4 NULL 4 Using where; Using index
@ -1405,7 +1405,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 range a a 4 NULL 3 Using where; Using index 1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 range a a 4 NULL 3 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-01-01'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 4 Using where; Using index 1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 3 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-01-01'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 range a a 4 NULL 4 Using where; Using index 1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 range a a 4 NULL 4 Using where; Using index
@ -1433,10 +1433,10 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
# Disabling warnings for the invalid date # Disabling warnings for the invalid date
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1999-02-31'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1999-02-31';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 5 Using where; Using index 1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 4 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1999-02-31'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1999-02-31';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 5 Using where; Using index 1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 4 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1999-02-31'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1999-02-31';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p2001-01-01,pNULL index a a 4 NULL 4 Using where; Using index 1 SIMPLE t1 p2001-01-01,pNULL index a a 4 NULL 4 Using where; Using index
@ -1448,10 +1448,10 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL ref a a 4 const 1 Using where; Using index 1 SIMPLE t1 pNULL ref a a 4 const 1 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1002-00-00'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1002-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 5 Using where; Using index 1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 4 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1001-01-01'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 4 Using where; Using index 1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 3 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-02' AND '1002-00-00'; EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-02' AND '1002-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL,p1001-01-01 index a a 4 NULL 4 Using where; Using index 1 SIMPLE t1 pNULL,p1001-01-01 index a a 4 NULL 4 Using where; Using index
@ -2867,18 +2867,18 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 p0,p1,p2 ALL NULL NULL NULL NULL 510 Using where 1 SIMPLE t2 p0,p1,p2 ALL NULL NULL NULL NULL 510 Using where
explain partitions select * from t2 where b = 4; explain partitions select * from t2 where b = 4;
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 p0,p1,p2,p3,p4 ref b b 5 const 76 1 SIMPLE t2 p0,p1,p2,p3,p4 ref b b 5 const 96
explain extended select * from t2 where b = 6; explain extended select * from t2 where b = 6;
id select_type table type possible_keys key key_len ref rows filtered Extra id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ref b b 5 const 76 100.00 1 SIMPLE t2 ref b b 5 const 96 100.00
Warnings: Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where (`test`.`t2`.`b` = 6) Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where (`test`.`t2`.`b` = 6)
explain partitions select * from t2 where b = 6; explain partitions select * from t2 where b = 6;
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 p0,p1,p2,p3,p4 ref b b 5 const 76 1 SIMPLE t2 p0,p1,p2,p3,p4 ref b b 5 const 96
explain extended select * from t2 where b in (1,3,5); explain extended select * from t2 where b in (1,3,5);
id select_type table type possible_keys key key_len ref rows filtered Extra id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL b NULL NULL NULL 910 40.66 Using where 1 SIMPLE t2 ALL b NULL NULL NULL 910 51.65 Using where
Warnings: Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where (`test`.`t2`.`b` in (1,3,5)) Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where (`test`.`t2`.`b` in (1,3,5))
explain partitions select * from t2 where b in (1,3,5); explain partitions select * from t2 where b in (1,3,5);
@ -2886,7 +2886,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 p0,p1,p2,p3,p4 ALL b NULL NULL NULL 910 Using where 1 SIMPLE t2 p0,p1,p2,p3,p4 ALL b NULL NULL NULL 910 Using where
explain extended select * from t2 where b in (2,4,6); explain extended select * from t2 where b in (2,4,6);
id select_type table type possible_keys key key_len ref rows filtered Extra id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL b NULL NULL NULL 910 25.05 Using where 1 SIMPLE t2 ALL b NULL NULL NULL 910 31.65 Using where
Warnings: Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where (`test`.`t2`.`b` in (2,4,6)) Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where (`test`.`t2`.`b` in (2,4,6))
explain partitions select * from t2 where b in (2,4,6); explain partitions select * from t2 where b in (2,4,6);
@ -2894,7 +2894,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 p0,p1,p2,p3,p4 ALL b NULL NULL NULL 910 Using where 1 SIMPLE t2 p0,p1,p2,p3,p4 ALL b NULL NULL NULL 910 Using where
explain extended select * from t2 where b in (7,8,9); explain extended select * from t2 where b in (7,8,9);
id select_type table type possible_keys key key_len ref rows filtered Extra id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL b NULL NULL NULL 910 36.70 Using where 1 SIMPLE t2 ALL b NULL NULL NULL 910 19.12 Using where
Warnings: Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where (`test`.`t2`.`b` in (7,8,9)) Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where (`test`.`t2`.`b` in (7,8,9))
explain partitions select * from t2 where b in (7,8,9); explain partitions select * from t2 where b in (7,8,9);
@ -2902,7 +2902,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 p0,p1,p2,p3,p4 ALL b NULL NULL NULL 910 Using where 1 SIMPLE t2 p0,p1,p2,p3,p4 ALL b NULL NULL NULL 910 Using where
explain extended select * from t2 where b > 5; explain extended select * from t2 where b > 5;
id select_type table type possible_keys key key_len ref rows filtered Extra id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL b NULL NULL NULL 910 44.84 Using where 1 SIMPLE t2 ALL b NULL NULL NULL 910 29.23 Using where
Warnings: Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where (`test`.`t2`.`b` > 5) Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where (`test`.`t2`.`b` > 5)
explain partitions select * from t2 where b > 5; explain partitions select * from t2 where b > 5;
@ -2910,7 +2910,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 p0,p1,p2,p3,p4 ALL b NULL NULL NULL 910 Using where 1 SIMPLE t2 p0,p1,p2,p3,p4 ALL b NULL NULL NULL 910 Using where
explain extended select * from t2 where b > 5 and b < 8; explain extended select * from t2 where b > 5 and b < 8;
id select_type table type possible_keys key key_len ref rows filtered Extra id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL b NULL NULL NULL 910 22.09 Using where 1 SIMPLE t2 ALL b NULL NULL NULL 910 28.13 Using where
Warnings: Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where ((`test`.`t2`.`b` > 5) and (`test`.`t2`.`b` < 8)) Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where ((`test`.`t2`.`b` > 5) and (`test`.`t2`.`b` < 8))
explain partitions select * from t2 where b > 5 and b < 8; explain partitions select * from t2 where b > 5 and b < 8;
@ -2918,15 +2918,15 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 p0,p1,p2,p3,p4 ALL b NULL NULL NULL 910 Using where 1 SIMPLE t2 p0,p1,p2,p3,p4 ALL b NULL NULL NULL 910 Using where
explain extended select * from t2 where b > 5 and b < 7; explain extended select * from t2 where b > 5 and b < 7;
id select_type table type possible_keys key key_len ref rows filtered Extra id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 range b b 5 NULL 76 100.00 Using where 1 SIMPLE t2 range b b 5 NULL 96 100.00 Using where
Warnings: Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where ((`test`.`t2`.`b` > 5) and (`test`.`t2`.`b` < 7)) Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where ((`test`.`t2`.`b` > 5) and (`test`.`t2`.`b` < 7))
explain partitions select * from t2 where b > 5 and b < 7; explain partitions select * from t2 where b > 5 and b < 7;
id select_type table partitions type possible_keys key key_len ref rows Extra id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 p0,p1,p2,p3,p4 range b b 5 NULL 76 Using where 1 SIMPLE t2 p0,p1,p2,p3,p4 range b b 5 NULL 96 Using where
explain extended select * from t2 where b > 0 and b < 5; explain extended select * from t2 where b > 0 and b < 5;
id select_type table type possible_keys key key_len ref rows filtered Extra id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL b NULL NULL NULL 910 41.65 Using where 1 SIMPLE t2 ALL b NULL NULL NULL 910 53.19 Using where
Warnings: Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where ((`test`.`t2`.`b` > 0) and (`test`.`t2`.`b` < 5)) Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where ((`test`.`t2`.`b` > 0) and (`test`.`t2`.`b` < 5))
explain partitions select * from t2 where b > 0 and b < 5; explain partitions select * from t2 where b > 0 and b < 5;
@ -2960,10 +2960,10 @@ flush status;
delete from t2 where b = 7; delete from t2 where b = 7;
show status like 'Handler_read_rnd_next'; show status like 'Handler_read_rnd_next';
Variable_name Value Variable_name Value
Handler_read_rnd_next 0 Handler_read_rnd_next 915
show status like 'Handler_read_key'; show status like 'Handler_read_key';
Variable_name Value Variable_name Value
Handler_read_key 5 Handler_read_key 0
flush status; flush status;
delete from t2 where b > 5; delete from t2 where b > 5;
show status like 'Handler_read_rnd_next'; show status like 'Handler_read_rnd_next';

View File

@ -3755,6 +3755,109 @@ FROM (SELECT 1 UNION SELECT 2) t;
1 1
1 1
2 2
# Bug#13805127: Stored program cache produces wrong result in same THD
PREPARE s1 FROM
"
SELECT c1, t2.c2, count(c3)
FROM
(
SELECT 3 as c2 FROM dual WHERE @x = 1
UNION
SELECT 2 FROM dual WHERE @x = 1 OR @x = 2
) AS t1,
(
SELECT '2012-03-01 01:00:00' AS c1, 3 as c2, 1 as c3 FROM dual
UNION
SELECT '2012-03-01 02:00:00', 3, 2 FROM dual
UNION
SELECT '2012-03-01 01:00:00', 2, 1 FROM dual
) AS t2
WHERE t2.c2 = t1.c2
GROUP BY c1, c2
";
SET @x = 1;
SELECT c1, t2.c2, count(c3)
FROM
(
SELECT 3 as c2 FROM dual WHERE @x = 1
UNION
SELECT 2 FROM dual WHERE @x = 1 OR @x = 2
) AS t1,
(
SELECT '2012-03-01 01:00:00' AS c1, 3 as c2, 1 as c3 FROM dual
UNION
SELECT '2012-03-01 02:00:00', 3, 2 FROM dual
UNION
SELECT '2012-03-01 01:00:00', 2, 1 FROM dual
) AS t2
WHERE t2.c2 = t1.c2
GROUP BY c1, c2;
c1 c2 count(c3)
2012-03-01 01:00:00 2 1
2012-03-01 01:00:00 3 1
2012-03-01 02:00:00 3 1
EXECUTE s1;
c1 c2 count(c3)
2012-03-01 01:00:00 2 1
2012-03-01 01:00:00 3 1
2012-03-01 02:00:00 3 1
SET @x = 2;
SELECT c1, t2.c2, count(c3)
FROM
(
SELECT 3 as c2 FROM dual WHERE @x = 1
UNION
SELECT 2 FROM dual WHERE @x = 1 OR @x = 2
) AS t1,
(
SELECT '2012-03-01 01:00:00' AS c1, 3 as c2, 1 as c3 FROM dual
UNION
SELECT '2012-03-01 02:00:00', 3, 2 FROM dual
UNION
SELECT '2012-03-01 01:00:00', 2, 1 FROM dual
) AS t2
WHERE t2.c2 = t1.c2
GROUP BY c1, c2;
c1 c2 count(c3)
2012-03-01 01:00:00 2 1
EXECUTE s1;
c1 c2 count(c3)
2012-03-01 01:00:00 2 1
SET @x = 1;
SELECT c1, t2.c2, count(c3)
FROM
(
SELECT 3 as c2 FROM dual WHERE @x = 1
UNION
SELECT 2 FROM dual WHERE @x = 1 OR @x = 2
) AS t1,
(
SELECT '2012-03-01 01:00:00' AS c1, 3 as c2, 1 as c3 FROM dual
UNION
SELECT '2012-03-01 02:00:00', 3, 2 FROM dual
UNION
SELECT '2012-03-01 01:00:00', 2, 1 FROM dual
) AS t2
WHERE t2.c2 = t1.c2
GROUP BY c1, c2;
c1 c2 count(c3)
2012-03-01 01:00:00 2 1
2012-03-01 01:00:00 3 1
2012-03-01 02:00:00 3 1
EXECUTE s1;
c1 c2 count(c3)
2012-03-01 01:00:00 2 1
2012-03-01 01:00:00 3 1
2012-03-01 02:00:00 3 1
DEALLOCATE PREPARE s1;
# #
# End of 5.5 tests. # End of 5.5 tests.
prepare stmt from "select date('2010-10-10') between '2010-09-09' and ?"; prepare stmt from "select date('2010-10-10') between '2010-09-09' and ?";

View File

@ -111,6 +111,7 @@ DROP FUNCTION db1.f1;
DROP TABLE db1.t1; DROP TABLE db1.t1;
DROP DATABASE db1; DROP DATABASE db1;
DROP DATABASE db2; DROP DATABASE db2;
USE test;
# #
# Bug#13105873:valgrind warning:possible crash in foreign # Bug#13105873:valgrind warning:possible crash in foreign
# key handling on subsequent create table if not exists # key handling on subsequent create table if not exists
@ -130,4 +131,94 @@ CALL p1();
Warnings: Warnings:
Note 1050 Table 't2' already exists Note 1050 Table 't2' already exists
DROP DATABASE testdb; DROP DATABASE testdb;
USE test;
End of 5.1 tests End of 5.1 tests
#
# Bug#11763507 - 56224: FUNCTION NAME IS CASE-SENSITIVE
#
SET @@SQL_MODE = '';
CREATE FUNCTION testf_bug11763507() RETURNS INT
BEGIN
RETURN 0;
END
$
CREATE PROCEDURE testp_bug11763507()
BEGIN
SELECT "PROCEDURE testp_bug11763507";
END
$
SELECT testf_bug11763507();
testf_bug11763507()
0
SELECT TESTF_bug11763507();
TESTF_bug11763507()
0
SHOW FUNCTION STATUS LIKE 'testf_bug11763507';
Db Name Type Definer Modified Created Security_type Comment character_set_client collation_connection Database Collation
test testf_bug11763507 FUNCTION root@localhost # # DEFINER latin1 latin1_swedish_ci latin1_swedish_ci
SHOW FUNCTION STATUS WHERE NAME='testf_bug11763507';
Db Name Type Definer Modified Created Security_type Comment character_set_client collation_connection Database Collation
test testf_bug11763507 FUNCTION root@localhost # # DEFINER latin1 latin1_swedish_ci latin1_swedish_ci
SHOW FUNCTION STATUS LIKE 'TESTF_bug11763507';
Db Name Type Definer Modified Created Security_type Comment character_set_client collation_connection Database Collation
test testf_bug11763507 FUNCTION root@localhost # # DEFINER latin1 latin1_swedish_ci latin1_swedish_ci
SHOW FUNCTION STATUS WHERE NAME='TESTF_bug11763507';
Db Name Type Definer Modified Created Security_type Comment character_set_client collation_connection Database Collation
test testf_bug11763507 FUNCTION root@localhost # # DEFINER latin1 latin1_swedish_ci latin1_swedish_ci
SHOW CREATE FUNCTION testf_bug11763507;
Function sql_mode Create Function character_set_client collation_connection Database Collation
testf_bug11763507 CREATE DEFINER=`root`@`localhost` FUNCTION `testf_bug11763507`() RETURNS int(11)
BEGIN
RETURN 0;
END latin1 latin1_swedish_ci latin1_swedish_ci
SHOW CREATE FUNCTION TESTF_bug11763507;
Function sql_mode Create Function character_set_client collation_connection Database Collation
testf_bug11763507 CREATE DEFINER=`root`@`localhost` FUNCTION `testf_bug11763507`() RETURNS int(11)
BEGIN
RETURN 0;
END latin1 latin1_swedish_ci latin1_swedish_ci
CALL testp_bug11763507();
PROCEDURE testp_bug11763507
PROCEDURE testp_bug11763507
CALL TESTP_bug11763507();
PROCEDURE testp_bug11763507
PROCEDURE testp_bug11763507
SHOW PROCEDURE STATUS LIKE 'testp_bug11763507';
Db Name Type Definer Modified Created Security_type Comment character_set_client collation_connection Database Collation
test testp_bug11763507 PROCEDURE root@localhost # # DEFINER latin1 latin1_swedish_ci latin1_swedish_ci
SHOW PROCEDURE STATUS WHERE NAME='testp_bug11763507';
Db Name Type Definer Modified Created Security_type Comment character_set_client collation_connection Database Collation
test testp_bug11763507 PROCEDURE root@localhost # # DEFINER latin1 latin1_swedish_ci latin1_swedish_ci
SHOW PROCEDURE STATUS LIKE 'TESTP_bug11763507';
Db Name Type Definer Modified Created Security_type Comment character_set_client collation_connection Database Collation
test testp_bug11763507 PROCEDURE root@localhost # # DEFINER latin1 latin1_swedish_ci latin1_swedish_ci
SHOW PROCEDURE STATUS WHERE NAME='TESTP_bug11763507';
Db Name Type Definer Modified Created Security_type Comment character_set_client collation_connection Database Collation
test testp_bug11763507 PROCEDURE root@localhost # # DEFINER latin1 latin1_swedish_ci latin1_swedish_ci
SHOW CREATE PROCEDURE testp_bug11763507;
Procedure sql_mode Create Procedure character_set_client collation_connection Database Collation
testp_bug11763507 CREATE DEFINER=`root`@`localhost` PROCEDURE `testp_bug11763507`()
BEGIN
SELECT "PROCEDURE testp_bug11763507";
END latin1 latin1_swedish_ci latin1_swedish_ci
SHOW CREATE PROCEDURE TESTP_bug11763507;
Procedure sql_mode Create Procedure character_set_client collation_connection Database Collation
testp_bug11763507 CREATE DEFINER=`root`@`localhost` PROCEDURE `testp_bug11763507`()
BEGIN
SELECT "PROCEDURE testp_bug11763507";
END latin1 latin1_swedish_ci latin1_swedish_ci
SELECT specific_name FROM INFORMATION_SCHEMA.ROUTINES WHERE specific_name LIKE 'testf_bug11763507';
specific_name
testf_bug11763507
SELECT specific_name FROM INFORMATION_SCHEMA.ROUTINES WHERE specific_name LIKE 'TESTF_bug11763507';
specific_name
testf_bug11763507
SELECT specific_name FROM INFORMATION_SCHEMA.ROUTINES WHERE specific_name='testf_bug11763507';
specific_name
testf_bug11763507
SELECT specific_name FROM INFORMATION_SCHEMA.ROUTINES WHERE specific_name='TESTF_bug11763507';
specific_name
testf_bug11763507
DROP PROCEDURE testp_bug11763507;
DROP FUNCTION testf_bug11763507;
#END OF BUG#11763507 test.

View File

@ -945,3 +945,32 @@ Error 1329 No data - zero rows fetched, selected, or processed
SET SESSION debug_dbug="-d,bug23032_emit_warning"; SET SESSION debug_dbug="-d,bug23032_emit_warning";
DROP PROCEDURE p1; DROP PROCEDURE p1;
DROP TABLE t1; DROP TABLE t1;
#
# Bug#11763507 - 56224: FUNCTION NAME IS CASE-SENSITIVE
#
SET @@SQL_MODE = '';
CREATE FUNCTION testf_bug11763507() RETURNS INT
BEGIN
RETURN 0;
END
$
CREATE PROCEDURE testp_bug11763507()
BEGIN
SELECT "PROCEDURE testp_bug11763507";
END
$
SHOW FUNCTION CODE testf_bug11763507;
Pos Instruction
0 freturn 3 0
SHOW FUNCTION CODE TESTF_bug11763507;
Pos Instruction
0 freturn 3 0
SHOW PROCEDURE CODE testp_bug11763507;
Pos Instruction
0 stmt 0 "SELECT "PROCEDURE testp_bug11763507""
SHOW PROCEDURE CODE TESTP_bug11763507;
Pos Instruction
0 stmt 0 "SELECT "PROCEDURE testp_bug11763507""
DROP PROCEDURE testp_bug11763507;
DROP FUNCTION testf_bug11763507;
#END OF BUG#11763507 test.

View File

@ -7745,6 +7745,44 @@ CALL p2();
DROP PROCEDURE p1; DROP PROCEDURE p1;
DROP PROCEDURE p2; DROP PROCEDURE p2;
DROP TABLE t1; DROP TABLE t1;
# Bug#13805127: Stored program cache produces wrong result in same THD
CREATE PROCEDURE p1(x INT UNSIGNED)
BEGIN
SELECT c1, t2.c2, count(c3)
FROM
(
SELECT 3 as c2 FROM dual WHERE x = 1
UNION
SELECT 2 FROM dual WHERE x = 1 OR x = 2
) AS t1,
(
SELECT '2012-03-01 01:00:00' AS c1, 3 as c2, 1 as c3 FROM dual
UNION
SELECT '2012-03-01 02:00:00', 3, 2 FROM dual
UNION
SELECT '2012-03-01 01:00:00', 2, 1 FROM dual
) AS t2
WHERE t2.c2 = t1.c2
GROUP BY c1, c2
;
END|
CALL p1(1);
c1 c2 count(c3)
2012-03-01 01:00:00 2 1
2012-03-01 01:00:00 3 1
2012-03-01 02:00:00 3 1
CALL p1(2);
c1 c2 count(c3)
2012-03-01 01:00:00 2 1
CALL p1(1);
c1 c2 count(c3)
2012-03-01 01:00:00 2 1
2012-03-01 01:00:00 3 1
2012-03-01 02:00:00 3 1
DROP PROCEDURE p1;
# End of 5.5 test # End of 5.5 test
# #
# Bug#12663165 SP DEAD CODE REMOVAL DOESN'T UNDERSTAND CONTINUE HANDLERS # Bug#12663165 SP DEAD CODE REMOVAL DOESN'T UNDERSTAND CONTINUE HANDLERS

View File

@ -258,4 +258,25 @@ SELECT `my.db`.f1(2);
2 2
# Switching to default connection. # Switching to default connection.
DROP DATABASE `my.db`; DROP DATABASE `my.db`;
USE test;
set @@global.concurrent_insert= @old_concurrent_insert; set @@global.concurrent_insert= @old_concurrent_insert;
#
# Bug#11763507 - 56224: FUNCTION NAME IS CASE-SENSITIVE
#
SET @@SQL_MODE = '';
CREATE EVENT teste_bug11763507 ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR
DO SELECT 1 $
SHOW EVENTS LIKE 'teste_bug11763507';
Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation
test teste_bug11763507 root@localhost SYSTEM ONE TIME # # # # NULL ENABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci
SHOW EVENTS LIKE 'TESTE_bug11763507';
Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation
test teste_bug11763507 root@localhost SYSTEM ONE TIME # # # # NULL ENABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci
SHOW CREATE EVENT teste_bug11763507;
Event sql_mode time_zone Create Event character_set_client collation_connection Database Collation
teste_bug11763507 SYSTEM # latin1 latin1_swedish_ci latin1_swedish_ci
SHOW CREATE EVENT TESTE_bug11763507;
Event sql_mode time_zone Create Event character_set_client collation_connection Database Collation
teste_bug11763507 SYSTEM # latin1 latin1_swedish_ci latin1_swedish_ci
DROP EVENT teste_bug11763507;
#END OF BUG#11763507 test.

View File

@ -533,3 +533,207 @@ SELECT * FROM mysql.columns_priv WHERE Host = 'localhost' AND User LIKE 'user_%P
Host Db User Table_name Column_name Timestamp Column_priv Host Db User Table_name Column_name Timestamp Column_priv
DROP TABLE t1; DROP TABLE t1;
DROP TABLE t2; DROP TABLE t2;
#
# Test for Bug#12601974 - STORED PROCEDURE SQL_MODE=NO_BACKSLASH_ESCAPES
# IGNORED AND BREAKS REPLICATION
#
DROP TABLE IF EXISTS test_table;
DROP FUNCTION IF EXISTS test_function;
CREATE TABLE test_table (c1 CHAR(50));
SET @org_mode=@@sql_mode;
SET @@sql_mode='';
PREPARE insert_stmt FROM 'INSERT INTO test_table VALUES (?)';
PREPARE update_stmt FROM 'UPDATE test_table SET c1= ? WHERE c1= ?';
CREATE FUNCTION test_function(var CHAR(50)) RETURNS CHAR(50)
BEGIN
DECLARE char_val CHAR(50);
SELECT c1 INTO char_val FROM test_table WHERE c1=var;
RETURN char_val;
END
$
SET @var1='abcd\'ef';
SET @var2='abcd\"ef';
SET @var3='abcd\bef';
SET @var4='abcd\nef';
SET @var5='abcd\ref';
SET @var6='abcd\tef';
SET @var7='abcd\\ef';
SET @var8='abcd\%ef';
SET @var9='abcd\_ef';
SET @to_var1='wxyz\'ef';
SET @to_var2='wxyz\"ef';
SET @to_var3='wxyz\bef';
SET @to_var4='wxyz\nef';
SET @to_var5='wxyz\ref';
SET @to_var6='wxyz\tef';
SET @to_var7='wxyz\\ef';
SET @to_var8='wxyz\%ef';
SET @to_var9='wxyz\_ef';
# STRING LILTERAL WITH BACKSLASH IN PREPARE STATEMENT
EXECUTE insert_stmt USING @var1;
EXECUTE insert_stmt USING @var2;
EXECUTE insert_stmt USING @var3;
EXECUTE insert_stmt USING @var4;
EXECUTE insert_stmt USING @var5;
EXECUTE insert_stmt USING @var6;
EXECUTE insert_stmt USING @var7;
EXECUTE insert_stmt USING @var8;
EXECUTE insert_stmt USING @var9;
SELECT * FROM test_table;
c1
abcd'ef
abcd"ef
abcdef
abcd
ef
abcd ef
abcd ef
abcd\ef
abcd\%ef
abcd\_ef
EXECUTE update_stmt USING @to_var1, @var1;
EXECUTE update_stmt USING @to_var2, @var2;
EXECUTE update_stmt USING @to_var3, @var3;
EXECUTE update_stmt USING @to_var4, @var4;
EXECUTE update_stmt USING @to_var5, @var5;
EXECUTE update_stmt USING @to_var6, @var6;
EXECUTE update_stmt USING @to_var7, @var7;
EXECUTE update_stmt USING @to_var8, @var8;
EXECUTE update_stmt USING @to_var9, @var9;
SELECT * FROM test_table;
c1
wxyz'ef
wxyz"ef
wxyzef
wxyz
ef
wxyz ef
wxyz ef
wxyz\ef
wxyz\%ef
wxyz\_ef
# END OF CASE - STRING LILTERAL WITH BACKSLASH IN PREPARE STATEMENT
# STRING LILTERAL WITH BACKSLASH IN FUNCTION RETURNING STRING
select test_function(@to_var1);
test_function(@to_var1)
wxyz'ef
SELECT test_function(@to_var2);
test_function(@to_var2)
wxyz"ef
SELECT test_function(@to_var3);
test_function(@to_var3)
wxyzef
SELECT test_function(@to_var4);
test_function(@to_var4)
wxyz
ef
SELECT test_function(@to_var5);
test_function(@to_var5)
wxyz ef
SELECT test_function(@to_var6);
test_function(@to_var6)
wxyz ef
SELECT test_function(@to_var7);
test_function(@to_var7)
wxyz\ef
SELECT test_function(@to_var8);
test_function(@to_var8)
wxyz\%ef
SELECT test_function(@to_var9);
test_function(@to_var9)
wxyz\_ef
# END OF CASE - STRING LILTERAL WITH BACKSLASH IN FUNCTION RETURNING STRING
DELETE FROM test_table;
DROP FUNCTION test_function;
SET @@sql_mode='NO_BACKSLASH_ESCAPES';
CREATE FUNCTION test_function(var CHAR(50)) RETURNS CHAR(50)
BEGIN
DECLARE char_val CHAR(50);
SELECT c1 INTO char_val FROM test_table WHERE c1=var;
RETURN char_val;
END
$
# STRING LILTERAL WITH BACKSLASH IN PREPARE STATEMENT
EXECUTE insert_stmt USING @var1;
EXECUTE insert_stmt USING @var2;
EXECUTE insert_stmt USING @var3;
EXECUTE insert_stmt USING @var4;
EXECUTE insert_stmt USING @var5;
EXECUTE insert_stmt USING @var6;
EXECUTE insert_stmt USING @var7;
EXECUTE insert_stmt USING @var8;
EXECUTE insert_stmt USING @var9;
SELECT * FROM test_table;
c1
abcd'ef
abcd"ef
abcdef
abcd
ef
abcd ef
abcd ef
abcd\ef
abcd\%ef
abcd\_ef
EXECUTE update_stmt USING @to_var1, @var1;
EXECUTE update_stmt USING @to_var2, @var2;
EXECUTE update_stmt USING @to_var3, @var3;
EXECUTE update_stmt USING @to_var4, @var4;
EXECUTE update_stmt USING @to_var5, @var5;
EXECUTE update_stmt USING @to_var6, @var6;
EXECUTE update_stmt USING @to_var7, @var7;
EXECUTE update_stmt USING @to_var8, @var8;
EXECUTE update_stmt USING @to_var9, @var9;
SELECT * FROM test_table;
c1
wxyz'ef
wxyz"ef
wxyzef
wxyz
ef
wxyz ef
wxyz ef
wxyz\ef
wxyz\%ef
wxyz\_ef
# END OF CASE - STRING LILTERAL WITH BACKSLASH IN PREPARE STATEMENT
# STRING LILTERAL WITH BACKSLASH IN FUNCTION RETURNING STRING
select test_function(@to_var1);
test_function(@to_var1)
wxyz'ef
SELECT test_function(@to_var2);
test_function(@to_var2)
wxyz"ef
SELECT test_function(@to_var3);
test_function(@to_var3)
wxyzef
SELECT test_function(@to_var4);
test_function(@to_var4)
wxyz
ef
SELECT test_function(@to_var5);
test_function(@to_var5)
wxyz ef
SELECT test_function(@to_var6);
test_function(@to_var6)
wxyz ef
SELECT test_function(@to_var7);
test_function(@to_var7)
wxyz\ef
SELECT test_function(@to_var8);
test_function(@to_var8)
wxyz\%ef
SELECT test_function(@to_var9);
test_function(@to_var9)
wxyz\_ef
# END OF CASE - STRING LILTERAL WITH BACKSLASH IN FUNCTION RETURNING STRING
DROP TABLE test_table;
DROP FUNCTION test_function;
SET @@sql_mode= @org_mode;
#End of Test for Bug#12601974

View File

@ -4645,6 +4645,55 @@ ERROR 21000: Subquery returns more than 1 row
SET SESSION sql_mode=@old_sql_mode; SET SESSION sql_mode=@old_sql_mode;
DEALLOCATE PREPARE stmt; DEALLOCATE PREPARE stmt;
DROP TABLE t1; DROP TABLE t1;
#
# Bug#12763207 - ASSERT IN SUBSELECT::SINGLE_VALUE_TRANSFORMER
#
CREATE TABLE t1(a1 int);
INSERT INTO t1 VALUES (1),(2);
CREATE TABLE t2(a1 int);
INSERT INTO t2 VALUES (3);
SELECT @@session.sql_mode INTO @old_sql_mode;
SET SESSION sql_mode='ONLY_FULL_GROUP_BY';
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT 2 FROM t2);
1
1
1
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT 2.0 FROM t2);
1
1
1
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT 'a' FROM t2);
1
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT a1 FROM t2);
1
1
1
SET SESSION sql_mode=@old_sql_mode;
DROP TABLE t1, t2;
#
# Bug#12763207 - ASSERT IN SUBSELECT::SINGLE_VALUE_TRANSFORMER
#
create table t2(i int);
insert into t2 values(0);
SELECT @@session.sql_mode INTO @old_sql_mode;
SET SESSION sql_mode='ONLY_FULL_GROUP_BY';
CREATE VIEW v1 AS
SELECT 'f' FROM t2 UNION SELECT 'x' FROM t2
;
CREATE TABLE t1 (
pk int NOT NULL,
col_varchar_key varchar(1) DEFAULT NULL,
PRIMARY KEY (pk),
KEY col_varchar_key (col_varchar_key)
);
SELECT t1.pk
FROM t1
WHERE t1.col_varchar_key < ALL ( SELECT * FROM v1 )
;
pk
SET SESSION sql_mode=@old_sql_mode;
drop table t2, t1;
drop view v1;
End of 5.0 tests. End of 5.0 tests.
create table t_out (subcase char(3), create table t_out (subcase char(3),
a1 char(2), b1 char(2), c1 char(2)); a1 char(2), b1 char(2), c1 char(2));

View File

@ -4647,6 +4647,55 @@ ERROR 21000: Subquery returns more than 1 row
SET SESSION sql_mode=@old_sql_mode; SET SESSION sql_mode=@old_sql_mode;
DEALLOCATE PREPARE stmt; DEALLOCATE PREPARE stmt;
DROP TABLE t1; DROP TABLE t1;
#
# Bug#12763207 - ASSERT IN SUBSELECT::SINGLE_VALUE_TRANSFORMER
#
CREATE TABLE t1(a1 int);
INSERT INTO t1 VALUES (1),(2);
CREATE TABLE t2(a1 int);
INSERT INTO t2 VALUES (3);
SELECT @@session.sql_mode INTO @old_sql_mode;
SET SESSION sql_mode='ONLY_FULL_GROUP_BY';
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT 2 FROM t2);
1
1
1
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT 2.0 FROM t2);
1
1
1
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT 'a' FROM t2);
1
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT a1 FROM t2);
1
1
1
SET SESSION sql_mode=@old_sql_mode;
DROP TABLE t1, t2;
#
# Bug#12763207 - ASSERT IN SUBSELECT::SINGLE_VALUE_TRANSFORMER
#
create table t2(i int);
insert into t2 values(0);
SELECT @@session.sql_mode INTO @old_sql_mode;
SET SESSION sql_mode='ONLY_FULL_GROUP_BY';
CREATE VIEW v1 AS
SELECT 'f' FROM t2 UNION SELECT 'x' FROM t2
;
CREATE TABLE t1 (
pk int NOT NULL,
col_varchar_key varchar(1) DEFAULT NULL,
PRIMARY KEY (pk),
KEY col_varchar_key (col_varchar_key)
);
SELECT t1.pk
FROM t1
WHERE t1.col_varchar_key < ALL ( SELECT * FROM v1 )
;
pk
SET SESSION sql_mode=@old_sql_mode;
drop table t2, t1;
drop view v1;
End of 5.0 tests. End of 5.0 tests.
create table t_out (subcase char(3), create table t_out (subcase char(3),
a1 char(2), b1 char(2), c1 char(2)); a1 char(2), b1 char(2), c1 char(2));

View File

@ -4643,6 +4643,55 @@ ERROR 21000: Subquery returns more than 1 row
SET SESSION sql_mode=@old_sql_mode; SET SESSION sql_mode=@old_sql_mode;
DEALLOCATE PREPARE stmt; DEALLOCATE PREPARE stmt;
DROP TABLE t1; DROP TABLE t1;
#
# Bug#12763207 - ASSERT IN SUBSELECT::SINGLE_VALUE_TRANSFORMER
#
CREATE TABLE t1(a1 int);
INSERT INTO t1 VALUES (1),(2);
CREATE TABLE t2(a1 int);
INSERT INTO t2 VALUES (3);
SELECT @@session.sql_mode INTO @old_sql_mode;
SET SESSION sql_mode='ONLY_FULL_GROUP_BY';
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT 2 FROM t2);
1
1
1
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT 2.0 FROM t2);
1
1
1
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT 'a' FROM t2);
1
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT a1 FROM t2);
1
1
1
SET SESSION sql_mode=@old_sql_mode;
DROP TABLE t1, t2;
#
# Bug#12763207 - ASSERT IN SUBSELECT::SINGLE_VALUE_TRANSFORMER
#
create table t2(i int);
insert into t2 values(0);
SELECT @@session.sql_mode INTO @old_sql_mode;
SET SESSION sql_mode='ONLY_FULL_GROUP_BY';
CREATE VIEW v1 AS
SELECT 'f' FROM t2 UNION SELECT 'x' FROM t2
;
CREATE TABLE t1 (
pk int NOT NULL,
col_varchar_key varchar(1) DEFAULT NULL,
PRIMARY KEY (pk),
KEY col_varchar_key (col_varchar_key)
);
SELECT t1.pk
FROM t1
WHERE t1.col_varchar_key < ALL ( SELECT * FROM v1 )
;
pk
SET SESSION sql_mode=@old_sql_mode;
drop table t2, t1;
drop view v1;
End of 5.0 tests. End of 5.0 tests.
create table t_out (subcase char(3), create table t_out (subcase char(3),
a1 char(2), b1 char(2), c1 char(2)); a1 char(2), b1 char(2), c1 char(2));

View File

@ -4651,6 +4651,55 @@ ERROR 21000: Subquery returns more than 1 row
SET SESSION sql_mode=@old_sql_mode; SET SESSION sql_mode=@old_sql_mode;
DEALLOCATE PREPARE stmt; DEALLOCATE PREPARE stmt;
DROP TABLE t1; DROP TABLE t1;
#
# Bug#12763207 - ASSERT IN SUBSELECT::SINGLE_VALUE_TRANSFORMER
#
CREATE TABLE t1(a1 int);
INSERT INTO t1 VALUES (1),(2);
CREATE TABLE t2(a1 int);
INSERT INTO t2 VALUES (3);
SELECT @@session.sql_mode INTO @old_sql_mode;
SET SESSION sql_mode='ONLY_FULL_GROUP_BY';
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT 2 FROM t2);
1
1
1
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT 2.0 FROM t2);
1
1
1
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT 'a' FROM t2);
1
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT a1 FROM t2);
1
1
1
SET SESSION sql_mode=@old_sql_mode;
DROP TABLE t1, t2;
#
# Bug#12763207 - ASSERT IN SUBSELECT::SINGLE_VALUE_TRANSFORMER
#
create table t2(i int);
insert into t2 values(0);
SELECT @@session.sql_mode INTO @old_sql_mode;
SET SESSION sql_mode='ONLY_FULL_GROUP_BY';
CREATE VIEW v1 AS
SELECT 'f' FROM t2 UNION SELECT 'x' FROM t2
;
CREATE TABLE t1 (
pk int NOT NULL,
col_varchar_key varchar(1) DEFAULT NULL,
PRIMARY KEY (pk),
KEY col_varchar_key (col_varchar_key)
);
SELECT t1.pk
FROM t1
WHERE t1.col_varchar_key < ALL ( SELECT * FROM v1 )
;
pk
SET SESSION sql_mode=@old_sql_mode;
drop table t2, t1;
drop view v1;
End of 5.0 tests. End of 5.0 tests.
create table t_out (subcase char(3), create table t_out (subcase char(3),
a1 char(2), b1 char(2), c1 char(2)); a1 char(2), b1 char(2), c1 char(2));

View File

@ -4643,6 +4643,55 @@ ERROR 21000: Subquery returns more than 1 row
SET SESSION sql_mode=@old_sql_mode; SET SESSION sql_mode=@old_sql_mode;
DEALLOCATE PREPARE stmt; DEALLOCATE PREPARE stmt;
DROP TABLE t1; DROP TABLE t1;
#
# Bug#12763207 - ASSERT IN SUBSELECT::SINGLE_VALUE_TRANSFORMER
#
CREATE TABLE t1(a1 int);
INSERT INTO t1 VALUES (1),(2);
CREATE TABLE t2(a1 int);
INSERT INTO t2 VALUES (3);
SELECT @@session.sql_mode INTO @old_sql_mode;
SET SESSION sql_mode='ONLY_FULL_GROUP_BY';
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT 2 FROM t2);
1
1
1
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT 2.0 FROM t2);
1
1
1
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT 'a' FROM t2);
1
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT a1 FROM t2);
1
1
1
SET SESSION sql_mode=@old_sql_mode;
DROP TABLE t1, t2;
#
# Bug#12763207 - ASSERT IN SUBSELECT::SINGLE_VALUE_TRANSFORMER
#
create table t2(i int);
insert into t2 values(0);
SELECT @@session.sql_mode INTO @old_sql_mode;
SET SESSION sql_mode='ONLY_FULL_GROUP_BY';
CREATE VIEW v1 AS
SELECT 'f' FROM t2 UNION SELECT 'x' FROM t2
;
CREATE TABLE t1 (
pk int NOT NULL,
col_varchar_key varchar(1) DEFAULT NULL,
PRIMARY KEY (pk),
KEY col_varchar_key (col_varchar_key)
);
SELECT t1.pk
FROM t1
WHERE t1.col_varchar_key < ALL ( SELECT * FROM v1 )
;
pk
SET SESSION sql_mode=@old_sql_mode;
drop table t2, t1;
drop view v1;
End of 5.0 tests. End of 5.0 tests.
create table t_out (subcase char(3), create table t_out (subcase char(3),
a1 char(2), b1 char(2), c1 char(2)); a1 char(2), b1 char(2), c1 char(2));

View File

@ -4609,6 +4609,13 @@ UNLOCK TABLES;
DROP PROCEDURE p1; DROP PROCEDURE p1;
DROP TABLE t1; DROP TABLE t1;
# #
# Bug#12626844: WRONG ERROR MESSAGE WHILE CREATING A VIEW ON A
# NON EXISTING DATABASE
#
DROP DATABASE IF EXISTS nodb;
CREATE VIEW nodb.a AS SELECT 1;
ERROR 42000: Unknown database 'nodb'
#
# lp:833600 Wrong result with view + outer join + uncorrelated subquery (non-semijoin) # lp:833600 Wrong result with view + outer join + uncorrelated subquery (non-semijoin)
# #
CREATE TABLE t1 ( a int, b int ); CREATE TABLE t1 ( a int, b int );

View File

@ -174,7 +174,6 @@ CREATE TABLE t1 (a INT) engine=InnoDB;
XA START 'a'; XA START 'a';
INSERT INTO t1 VALUES (1); INSERT INTO t1 VALUES (1);
SAVEPOINT savep; SAVEPOINT savep;
ERROR XAE07: XAER_RMFAIL: The command cannot be executed when global transaction is in the ACTIVE state
XA END 'a'; XA END 'a';
SELECT * FROM t1; SELECT * FROM t1;
ERROR XAE07: XAER_RMFAIL: The command cannot be executed when global transaction is in the IDLE state ERROR XAE07: XAER_RMFAIL: The command cannot be executed when global transaction is in the IDLE state

View File

@ -38,3 +38,114 @@ DROP VIEW testView;
DROP TABLE t1; DROP TABLE t1;
SET @@global.sql_mode= @old_sql_mode; SET @@global.sql_mode= @old_sql_mode;
SET @@session.binlog_format=@old_binlog_format; SET @@session.binlog_format=@old_binlog_format;
#
# Test for Bug#12601974 - STORED PROCEDURE SQL_MODE=NO_BACKSLASH_ESCAPES
# IGNORED AND BREAKS REPLICATION
#
DROP DATABASE IF EXISTS mysqltest_db;
DROP TABLE IF EXISTS test_table;
CREATE DATABASE mysqltest_db;
USE mysqltest_db;
CREATE TABLE test_table (c1 CHAR(50));
SET @org_mode=@@sql_mode;
SET @@sql_mode='';
CREATE PROCEDURE proc_without_sql_mode (IN param1 CHAR(50), IN param2 CHAR(50))
BEGIN
DECLARE var1 CHAR(50) DEFAULT param1;
DECLARE var2 CHAR(50) DEFAULT param2;
DECLARE var3 CHAR(50) DEFAULT 'abcd\bef';
DECLARE var4 CHAR(50) DEFAULT 'abcd\nef';
DECLARE var5 CHAR(50) DEFAULT 'abcd\ref';
DECLARE var6 CHAR(50) DEFAULT 'abcd\tef';
DECLARE var7 CHAR(50) DEFAULT 'abcd\\ef';
DECLARE var8 CHAR(50) DEFAULT 'abcd\%ef';
DECLARE var9 CHAR(50) DEFAULT 'abcd\_ef';
INSERT INTO test_table VALUES (var1);
INSERT INTO test_table VALUES (var2);
INSERT INTO test_table VALUES (var3);
INSERT INTO test_table VALUES (var4);
INSERT INTO test_table VALUES (var5);
INSERT INTO test_table VALUES (var6);
INSERT INTO test_table VALUES (var7);
INSERT INTO test_table VALUES (var8);
INSERT INTO test_table VALUES (var9);
END
$
SET @@sql_mode='NO_BACKSLASH_ESCAPES'$
CREATE PROCEDURE proc_with_sql_mode (IN param1 CHAR(50), IN param2 CHAR(50))
BEGIN
DECLARE var1 CHAR(50) DEFAULT param1;
DECLARE var2 CHAR(50) DEFAULT param2;
DECLARE var3 CHAR(50) DEFAULT 'wxyz\bef';
DECLARE var4 CHAR(50) DEFAULT 'wxyz\nef';
DECLARE var5 CHAR(50) DEFAULT 'wxyz\ref';
DECLARE var6 CHAR(50) DEFAULT 'wxyz\tef';
DECLARE var7 CHAR(50) DEFAULT 'wxyz\\ef';
DECLARE var8 CHAR(50) DEFAULT 'wxyz\%ef';
DECLARE var9 CHAR(50) DEFAULT 'wxyz\_ef';
INSERT INTO test_table VALUES (var1);
INSERT INTO test_table VALUES (var2);
INSERT INTO test_table VALUES (var3);
INSERT INTO test_table VALUES (var4);
INSERT INTO test_table VALUES (var5);
INSERT INTO test_table VALUES (var6);
INSERT INTO test_table VALUES (var7);
INSERT INTO test_table VALUES (var8);
INSERT INTO test_table VALUES (var9);
END
$
SET @@sql_mode='';
CALL proc_without_sql_mode('abcd\'ef', 'abcd\"ef');
CALL proc_with_sql_mode('wxyz\'ef', 'wxyz\"ef');
SELECT * FROM test_table;
c1
abcd'ef
abcd"ef
abcdef
abcd
ef
abcd ef
abcd ef
abcd\ef
abcd\%ef
abcd\_ef
wxyz'ef
wxyz"ef
wxyz\bef
wxyz\nef
wxyz\ref
wxyz\tef
wxyz\\ef
wxyz\%ef
wxyz\_ef
"Dropping table test_table"
DROP TABLE test_table;
#"test_table" content after replaying the binlog
SELECT * FROM test_table;
c1
abcd'ef
abcd"ef
abcdef
abcd
ef
abcd ef
abcd ef
abcd\ef
abcd\%ef
abcd\_ef
wxyz'ef
wxyz"ef
wxyz\bef
wxyz\nef
wxyz\ref
wxyz\tef
wxyz\\ef
wxyz\%ef
wxyz\_ef
#Clean up
DROP DATABASE mysqltest_db;
SET @@sql_mode= @org_mode;
use test;
#End of Test for Bug#12601974

View File

@ -73,3 +73,99 @@ DROP TABLE t1;
SET @@global.sql_mode= @old_sql_mode; SET @@global.sql_mode= @old_sql_mode;
SET @@session.binlog_format=@old_binlog_format; SET @@session.binlog_format=@old_binlog_format;
--echo
--echo #
--echo # Test for Bug#12601974 - STORED PROCEDURE SQL_MODE=NO_BACKSLASH_ESCAPES
--echo # IGNORED AND BREAKS REPLICATION
--echo #
--disable_warnings
DROP DATABASE IF EXISTS mysqltest_db;
DROP TABLE IF EXISTS test_table;
--enable_warnings
CREATE DATABASE mysqltest_db;
USE mysqltest_db;
CREATE TABLE test_table (c1 CHAR(50));
SET @org_mode=@@sql_mode;
SET @@sql_mode='';
DELIMITER $;
CREATE PROCEDURE proc_without_sql_mode (IN param1 CHAR(50), IN param2 CHAR(50))
BEGIN
DECLARE var1 CHAR(50) DEFAULT param1;
DECLARE var2 CHAR(50) DEFAULT param2;
DECLARE var3 CHAR(50) DEFAULT 'abcd\bef';
DECLARE var4 CHAR(50) DEFAULT 'abcd\nef';
DECLARE var5 CHAR(50) DEFAULT 'abcd\ref';
DECLARE var6 CHAR(50) DEFAULT 'abcd\tef';
DECLARE var7 CHAR(50) DEFAULT 'abcd\\ef';
DECLARE var8 CHAR(50) DEFAULT 'abcd\%ef';
DECLARE var9 CHAR(50) DEFAULT 'abcd\_ef';
INSERT INTO test_table VALUES (var1);
INSERT INTO test_table VALUES (var2);
INSERT INTO test_table VALUES (var3);
INSERT INTO test_table VALUES (var4);
INSERT INTO test_table VALUES (var5);
INSERT INTO test_table VALUES (var6);
INSERT INTO test_table VALUES (var7);
INSERT INTO test_table VALUES (var8);
INSERT INTO test_table VALUES (var9);
END
$
SET @@sql_mode='NO_BACKSLASH_ESCAPES'$
CREATE PROCEDURE proc_with_sql_mode (IN param1 CHAR(50), IN param2 CHAR(50))
BEGIN
DECLARE var1 CHAR(50) DEFAULT param1;
DECLARE var2 CHAR(50) DEFAULT param2;
DECLARE var3 CHAR(50) DEFAULT 'wxyz\bef';
DECLARE var4 CHAR(50) DEFAULT 'wxyz\nef';
DECLARE var5 CHAR(50) DEFAULT 'wxyz\ref';
DECLARE var6 CHAR(50) DEFAULT 'wxyz\tef';
DECLARE var7 CHAR(50) DEFAULT 'wxyz\\ef';
DECLARE var8 CHAR(50) DEFAULT 'wxyz\%ef';
DECLARE var9 CHAR(50) DEFAULT 'wxyz\_ef';
INSERT INTO test_table VALUES (var1);
INSERT INTO test_table VALUES (var2);
INSERT INTO test_table VALUES (var3);
INSERT INTO test_table VALUES (var4);
INSERT INTO test_table VALUES (var5);
INSERT INTO test_table VALUES (var6);
INSERT INTO test_table VALUES (var7);
INSERT INTO test_table VALUES (var8);
INSERT INTO test_table VALUES (var9);
END
$
DELIMITER ;$
SET @@sql_mode='';
CALL proc_without_sql_mode('abcd\'ef', 'abcd\"ef');
CALL proc_with_sql_mode('wxyz\'ef', 'wxyz\"ef');
SELECT * FROM test_table;
let $MYSQLD_DATADIR= `select @@datadir`;
--exec $MYSQL_BINLOG --force-if-open -d mysqltest_db $MYSQLD_DATADIR/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_bug12601974.binlog
--echo "Dropping table test_table"
DROP TABLE test_table;
--exec $MYSQL -e "source $MYSQLTEST_VARDIR/tmp/mysqlbinlog_bug12601974.binlog"
--echo #"test_table" content after replaying the binlog
SELECT * FROM test_table;
--echo #Clean up
--remove_file $MYSQLTEST_VARDIR/tmp/mysqlbinlog_bug12601974.binlog
DROP DATABASE mysqltest_db;
SET @@sql_mode= @org_mode;
use test;
--echo
--echo #End of Test for Bug#12601974

View File

@ -1,26 +0,0 @@
--eval create table t1(a int not null, b int, c char(10), d varchar(20), primary key (a)) engine = innodb default charset=$charset
insert into t1 values (1,1,'ab','ab'),(2,2,'ac','ac'),(3,2,'ad','ad'),(4,4,'afe','afe');
commit;
--error ER_DUP_ENTRY
alter table t1 add unique index (b);
insert into t1 values(8,9,'fff','fff');
select * from t1;
show create table t1;
alter table t1 add index (b);
insert into t1 values(10,10,'kkk','iii');
select * from t1;
select * from t1 force index(b) order by b;
explain select * from t1 force index(b) order by b;
show create table t1;
alter table t1 add unique index (c), add index (d);
insert into t1 values(11,11,'aaa','mmm');
select * from t1;
select * from t1 force index(b) order by b;
select * from t1 force index(c) order by c;
select * from t1 force index(d) order by d;
explain select * from t1 force index(b) order by b;
explain select * from t1 force index(c) order by c;
explain select * from t1 force index(d) order by d;
show create table t1;
check table t1;
drop table t1;

View File

@ -366,7 +366,7 @@ c1
310 310
400 400
1000 1000
1110 1010
DROP TABLE t1; DROP TABLE t1;
SET @@SESSION.AUTO_INCREMENT_INCREMENT=1, @@SESSION.AUTO_INCREMENT_OFFSET=1; SET @@SESSION.AUTO_INCREMENT_INCREMENT=1, @@SESSION.AUTO_INCREMENT_OFFSET=1;
SET @@INSERT_ID=1; SET @@INSERT_ID=1;
@ -648,7 +648,7 @@ t2 CREATE TABLE `t2` (
`n` int(10) unsigned NOT NULL, `n` int(10) unsigned NOT NULL,
`o` enum('FALSE','TRUE') DEFAULT NULL, `o` enum('FALSE','TRUE') DEFAULT NULL,
PRIMARY KEY (`m`) PRIMARY KEY (`m`)
) ENGINE=InnoDB AUTO_INCREMENT=11 DEFAULT CHARSET=latin1 ) ENGINE=InnoDB AUTO_INCREMENT=19 DEFAULT CHARSET=latin1
INSERT INTO t1 (b,c) SELECT n,o FROM t2 ; INSERT INTO t1 (b,c) SELECT n,o FROM t2 ;
SHOW CREATE TABLE t1; SHOW CREATE TABLE t1;
Table Create Table Table Create Table
@ -1251,3 +1251,21 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`c1`) PRIMARY KEY (`c1`)
) ENGINE=InnoDB AUTO_INCREMENT=18446744073709551615 DEFAULT CHARSET=latin1 ) ENGINE=InnoDB AUTO_INCREMENT=18446744073709551615 DEFAULT CHARSET=latin1
DROP TABLE t1; DROP TABLE t1;
SET @@SESSION.AUTO_INCREMENT_INCREMENT=1, @@SESSION.AUTO_INCREMENT_OFFSET=256;
SHOW VARIABLES LIKE "%auto_inc%";
Variable_name Value
auto_increment_increment 1
auto_increment_offset 256
CREATE TABLE t1 (c1 TINYINT PRIMARY KEY AUTO_INCREMENT, c2 VARCHAR(10)) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1, NULL);
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`c1` tinyint(4) NOT NULL AUTO_INCREMENT,
`c2` varchar(10) DEFAULT NULL,
PRIMARY KEY (`c1`)
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=latin1
SELECT * FROM t1;
c1 c2
1 NULL
DROP TABLE t1;

View File

@ -0,0 +1,5 @@
create table t1 (f1 char(255)) engine innodb;
drop table t1;
create table t1 (f1 char(255)) engine innodb;
1
drop table t1;

View File

@ -637,3 +637,14 @@ INSERT INTO t1 VALUES (18446744073709551615);
-- source include/restart_mysqld.inc -- source include/restart_mysqld.inc
SHOW CREATE TABLE t1; SHOW CREATE TABLE t1;
DROP TABLE t1; DROP TABLE t1;
# Check if we handl offset > column max value properly
SET @@SESSION.AUTO_INCREMENT_INCREMENT=1, @@SESSION.AUTO_INCREMENT_OFFSET=256;
SHOW VARIABLES LIKE "%auto_inc%";
# TINYINT
CREATE TABLE t1 (c1 TINYINT PRIMARY KEY AUTO_INCREMENT, c2 VARCHAR(10)) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1, NULL);
SHOW CREATE TABLE t1;
SELECT * FROM t1;
DROP TABLE t1;

View File

@ -0,0 +1,59 @@
# Bug 11766634 59783: InnoDB data grows unexpectedly when inserting,
# truncating, inserting the same set of rows.
#
# Scenario:
# create table t1. Insert $recs records. check size of ibdata1.
# drop table t1. create table t1. Insert the same set of $recs
# records. The size of ibdata1 must not increase.
#
-- source include/not_embedded.inc
-- source include/have_innodb.inc
create table t1 (f1 char(255)) engine innodb;
let $MYSQLD_DATADIR=`select @@datadir`;
let IBDATA1=$MYSQLD_DATADIR/ibdata1;
let $recs = 36262;
--disable_query_log
let $c = $recs;
start transaction;
while ($c)
{
insert into t1 values ('Hello World');
dec $c;
}
commit work;
--enable_query_log
perl;
my $filesize = -s $ENV{'IBDATA1'};
my $filename = $ENV{MYSQL_TMP_DIR} . '/innodb_bug11766634.txt';
open F, '>', $filename or die "open(>$filename): $!";
print F $filesize;
EOF
drop table t1;
create table t1 (f1 char(255)) engine innodb;
--disable_query_log
let $c = $recs;
start transaction;
while ($c)
{
insert into t1 values ('Hello World');
dec $c;
}
commit work;
--enable_query_log
perl;
my $filesize = -s $ENV{'IBDATA1'};
my $filename = $ENV{MYSQL_TMP_DIR} . '/innodb_bug11766634.txt';
$_=do $filename;
print $_ == $filesize, "\n";
EOF
drop table t1;

View File

@ -8,6 +8,7 @@
-- disable_result_log -- disable_result_log
call mtr.add_suppression("InnoDB: Warning: a long semaphore wait:"); call mtr.add_suppression("InnoDB: Warning: a long semaphore wait:");
call mtr.add_suppression("the age of the last checkpoint is");
# set packet size and reconnect # set packet size and reconnect
let $max_packet=`select @@global.max_allowed_packet`; let $max_packet=`select @@global.max_allowed_packet`;

View File

@ -0,0 +1,8 @@
include/master-slave.inc
[connection master]
include/assert.inc [The default value shown for the slave's port number is the actual port number of the slave.]
include/rpl_restart_server.inc [server_number=2 parameters: --report-port=9000]
include/start_slave.inc
[Slave restarted with the report-port set to some value]
include/assert.inc [The value shown for the slave's port number is 9000 which is the value set for report-port.]
include/rpl_end.inc

View File

@ -6,7 +6,7 @@ START SLAVE IO_THREAD;
include/wait_for_slave_io_to_start.inc include/wait_for_slave_io_to_start.inc
SHOW SLAVE HOSTS; SHOW SLAVE HOSTS;
Server_id Host Port Master_id Server_id Host Port Master_id
3 slave2 DEFAULT_PORT 1 3 slave2 SLAVE_PORT 1
2 SLAVE_PORT 1 2 SLAVE_PORT 1
include/stop_slave_io.inc include/stop_slave_io.inc
SHOW SLAVE HOSTS; SHOW SLAVE HOSTS;

View File

@ -0,0 +1,5 @@
include/master-slave.inc
[connection master]
include/assert.inc [Assert that relay log space is close to the limit]
include/diff_tables.inc [master:test.t1,slave:test.t1]
include/rpl_end.inc

View File

@ -0,0 +1 @@
--report-port=

View File

@ -0,0 +1,49 @@
#
#BUG#13333431 : INCORRECT DEFAULT PORT IN 'SHOW SLAVE HOSTS' OUTPUT
#
# ==== Purpose ====
#
# The test show the default value printed for the slave's port number if the
# --report-port= <some value> is not set on the slave. This is different from
# the present scenario which show 3306 as the default value if the report-port
# is not set on the slave.
#
#====Method====
#
# Start replication with report port not set.This will give the actual port
# number of the slave (ie. SLAVE_PORT) for the on doing SHOW SLAVE HOSTS on
# the master.
# Restart the slave server with report port set to 9000 and start the slave.
# In this case on doing SHOW SLAVE HOSTS on the master, we get the port number
# of the slave to be 9000.
source include/master-slave.inc;
connection master;
--let $report_port= query_get_value(SHOW SLAVE HOSTS, Port, 1)
--let assert_text= The default value shown for the slave's port number is the actual port number of the slave.
--let assert_cond= $report_port = "$SLAVE_MYPORT"
--source include/assert.inc
# Start the server with some value being passed to the report_port= <option>
# this will be used incase we have to mask the value of the slave's port
# number in certain situations.
--let $rpl_server_number= 2
--let $rpl_server_parameters= --report-port=9000
--source include/rpl_restart_server.inc
connection slave;
--source include/start_slave.inc
--echo [Slave restarted with the report-port set to some value]
connection master;
# 9000 is the value of the port we should get.
--let $report_port= query_get_value(SHOW SLAVE HOSTS, Port, 1)
--let assert_text= The value shown for the slave's port number is 9000 which is the value set for report-port.
--let assert_cond= $report_port = "9000"
--source include/assert.inc
--source include/rpl_end.inc

View File

@ -25,6 +25,7 @@ let $field= Server_id;
# 3 is server_id of slave2. # 3 is server_id of slave2.
let $condition= ='3'; let $condition= ='3';
source include/wait_show_condition.inc; source include/wait_show_condition.inc;
--replace_column 3 'SLAVE_PORT'
--replace_result $SLAVE_MYPORT SLAVE_PORT $DEFAULT_MASTER_PORT DEFAULT_PORT --replace_result $SLAVE_MYPORT SLAVE_PORT $DEFAULT_MASTER_PORT DEFAULT_PORT
SHOW SLAVE HOSTS; SHOW SLAVE HOSTS;

View File

@ -0,0 +1 @@
--relay-log-space-limit=8192 --relay-log-purge --max-relay-log-size=4096

View File

@ -0,0 +1,101 @@
#
# BUG#12400313 / BUG#64503 test case
#
#
# Description
# -----------
#
# This test case starts the slave server with:
# --relay-log-space-limit=8192 --relay-log-purge --max-relay-log-size=4096
#
# Then it issues some queries that will cause the slave to reach
# relay-log-space-limit. We lock the table so that the SQL thread is
# not able to purge the log and then we issue some more statements.
#
# The purpose is to show that the IO thread will honor the limits
# while the SQL thread is not able to purge the relay logs, which did
# not happen before this patch. In addition we assert that while
# ignoring the limit (SQL thread needs to rotate before purging), the
# IO thread does not do it in an uncontrolled manner.
--source include/have_binlog_format_statement.inc
--source include/master-slave.inc
--source include/have_innodb.inc
--disable_query_log
CREATE TABLE t1 (c1 TEXT) engine=InnoDB;
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
--sync_slave_with_master
# wait for the SQL thread to sleep
--let $show_statement= SHOW PROCESSLIST
--let $field= State
--let $condition= = 'Slave has read all relay log; waiting for the slave I/O thread to update it'
--source include/wait_show_condition.inc
# now the io thread has set rli->ignore_space_limit
# lets lock the table so that once the SQL thread awakes
# it blocks there and does not set rli->ignore_space_limit
# back to zero
LOCK TABLE t1 WRITE;
# now issue more statements that will overflow the
# rli->log_space_limit (in this case ~10K)
--connection master
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
--connection slave
# ASSERT that the IO thread waits for the SQL thread to release some
# space before continuing
--let $show_statement= SHOW PROCESSLIST
--let $field= State
--let $condition= LIKE 'Waiting for %'
# before the patch (IO would have transfered everything)
#--let $condition= = 'Waiting for master to send event'
# after the patch (now it waits for space to be freed)
#--let $condition= = 'Waiting for the slave SQL thread to free enough relay log space'
--source include/wait_show_condition.inc
# without the patch we can uncomment the following two lines and
# watch the IO thread synchronize with the master, thus writing
# relay logs way over the space limit
#--connection master
#--source include/sync_slave_io_with_master.inc
## ASSERT that the IO thread has honored the limit+few bytes required to be able to purge
--let $relay_log_space_while_sql_is_executing = query_get_value(SHOW SLAVE STATUS, Relay_Log_Space, 1)
--let $relay_log_space_limit = query_get_value(SHOW VARIABLES LIKE "relay_log_space_limit", Value, 1)
--let $assert_text= Assert that relay log space is close to the limit
--let $assert_cond= $relay_log_space_while_sql_is_executing <= $relay_log_space_limit * 1.15
--source include/assert.inc
# unlock the table and let SQL thread continue applying events
UNLOCK TABLES;
--connection master
--sync_slave_with_master
--let $diff_tables=master:test.t1,slave:test.t1
--source include/diff_tables.inc
--connection master
DROP TABLE t1;
--enable_query_log
--sync_slave_with_master
--source include/rpl_end.inc

View File

@ -2,18 +2,19 @@
# #
# only global # only global
# #
--replace_result $DEFAULT_MASTER_PORT DEFAULT_MASTER_PORT --replace_regex s/[0-9]+/DEFAULT_MASTER_PORT/
select @@global.report_port; select @@global.report_port;
--error ER_INCORRECT_GLOBAL_LOCAL_VAR --error ER_INCORRECT_GLOBAL_LOCAL_VAR
select @@session.report_port; select @@session.report_port;
--replace_result $DEFAULT_MASTER_PORT DEFAULT_MASTER_PORT --replace_column 2 'DEFAULT_MASTER_PORT'
show global variables like 'report_port'; show global variables like 'report_port';
--replace_result $DEFAULT_MASTER_PORT DEFAULT_MASTER_PORT --replace_column 2 'DEFAULT_MASTER_PORT'
show session variables like 'report_port'; show session variables like 'report_port';
--replace_result $DEFAULT_MASTER_PORT DEFAULT_MASTER_PORT --replace_column 2 'DEFAULT_MASTER_PORT'
select * from information_schema.global_variables where variable_name='report_port'; select * from information_schema.global_variables where variable_name='report_port';
--replace_result $DEFAULT_MASTER_PORT DEFAULT_MASTER_PORT --replace_column 2 'DEFAULT_MASTER_PORT'
select * from information_schema.session_variables where variable_name='report_port'; select * from information_schema.session_variables where variable_name='report_port';
--replace_column 2 'DEFAULT_MASTER_PORT'
# #
# show that it's read-only # show that it's read-only

View File

@ -1 +0,0 @@
--query_cache_size=1M

View File

@ -795,6 +795,15 @@ SET collation_connection=ucs2_general_ci;
--source include/ctype_numconv.inc --source include/ctype_numconv.inc
SET NAMES latin1; SET NAMES latin1;
--echo #
--echo # Bug #13832953 MY_STRNXFRM_UNICODE: ASSERTION `SRC' FAILED
--echo #
CREATE TABLE t1 (c1 SET('','') CHARACTER SET ucs2);
INSERT INTO t1 VALUES ('');
SELECT COALESCE(c1) FROM t1 ORDER BY 1;
DROP TABLE t1;
--echo # --echo #
--echo # End of 5.5 tests --echo # End of 5.5 tests
--echo # --echo #

View File

@ -57,3 +57,46 @@ disconnect con2;
--source include/wait_until_count_sessions.inc --source include/wait_until_count_sessions.inc
SET DEBUG_SYNC= "RESET"; SET DEBUG_SYNC= "RESET";
DROP TABLE t1; DROP TABLE t1;
--echo #
--echo # Bug#13832772 ASSERTION `THD->IS_ERROR() || KILL_ERRNO'
--echo # FAILED IN FILESORT/MYSQL_DELETE
--echo #
CREATE TABLE t1 (
c1 BLOB,
c2 TEXT,
c3 TEXT,
c4 TEXT,
c5 TEXT,
c6 TEXT,
c7 TEXT,
c8 BLOB,
c9 TEXT,
c19 TEXT,
pk INT,
c20 TEXT,
c21 BLOB,
c22 TEXT,
c23 TEXT,
c24 TEXT,
c25 TEXT,
c26 BLOB,
c27 TEXT,
c28 TEXT,
primary key (pk)
);
CALL mtr.add_suppression("Out of sort memory");
--error ER_OUT_OF_SORTMEMORY
DELETE IGNORE FROM t1 ORDER BY c26,c7,c23,c4,c25,c5,c20,
c19,c21,c8,c1,c27,c28,c3,c9,c22,c24,c6,c2,pk LIMIT 2;
SHOW WARNINGS;
--error ER_OUT_OF_SORTMEMORY
DELETE FROM t1 ORDER BY c26,c7,c23,c4,c25,c5,c20,
c19,c21,c8,c1,c27,c28,c3,c9,c22,c24,c6,c2,pk LIMIT 2;
SHOW WARNINGS;
DROP TABLE t1;

View File

@ -562,5 +562,18 @@ SELECT 1 IN (YEAR(FROM_UNIXTIME(NULL)) ,1);
--echo # --echo #
--echo #
--echo # Bug#13012483: EXPLAIN EXTENDED, PREPARED STATEMENT, CRASH IN CHECK_SIMPLE_EQUALITY
--echo #
CREATE TABLE t1 (a INT);
PREPARE s FROM "SELECT 1 FROM t1 WHERE 1 < ALL (SELECT @:= (1 IN (SELECT 1 FROM t1)) FROM t1)";
EXECUTE s;
DROP TABLE t1;
--echo # End of test BUG#13012483
--echo #
--echo End of 5.1 tests --echo End of 5.1 tests

View File

@ -0,0 +1,21 @@
# t/cache_innodb.test
#
# Last update:
# 2006-07-26 ML test refactored (MySQL 5.1)
# main code t/innodb_cache.test --> include/query_cache.inc
# new wrapper t/cache_innodb.test
#
--source include/have_query_cache.inc
--source include/have_innodb.inc
--source include/have_partition.inc
let $engine_type= InnoDB;
# Using SELECT to get a space as first character.
let $partitions_a= `SELECT ' PARTITION BY KEY (a) PARTITIONS 3'`;
let $partitions_id= `SELECT ' PARTITION BY HASH (id) PARTITIONS 3'`;
let $partitions_s1= `SELECT ' PARTITION BY KEY (s1) PARTITIONS 3'`;
# partitioning does not support FOREIGN KEYs
let $test_foreign_keys= 0;
--source include/query_cache.inc

View File

@ -11,6 +11,35 @@ drop table if exists t1, t2;
let $MYSQLD_DATADIR= `SELECT @@datadir`; let $MYSQLD_DATADIR= `SELECT @@datadir`;
--echo #
--echo # Bug#13694811: THE OPTIMIZER WRONGLY USES THE FIRST
--echo # INNODB PARTITION STATISTICS
--echo #
CREATE TABLE t1
(a INT,
b varchar(64),
PRIMARY KEY (a),
KEY (b))
ENGINE = InnoDB
PARTITION BY RANGE (a)
SUBPARTITION BY HASH (a) SUBPARTITIONS 10
(PARTITION pNeg VALUES LESS THAN (0),
PARTITION p0 VALUES LESS THAN (1000),
PARTITION pMAX VALUES LESS THAN MAXVALUE);
--echo # Only one row in the first 10 subpartitions
INSERT INTO t1 VALUES (-1, 'Only negative pk value');
INSERT INTO t1 VALUES (0, 'Mod Zero'), (1, 'One'), (2, 'Two'), (3, 'Three'),
(10, 'Zero'), (11, 'Mod One'), (12, 'Mod Two'), (13, 'Mod Three'),
(20, '0'), (21, '1'), (22, '2'), (23, '3'),
(4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9');
INSERT INTO t1 SELECT a + 30, b FROM t1 WHERE a >= 0;
ANALYZE TABLE t1;
EXPLAIN SELECT b FROM t1 WHERE b between 'L' and 'N' AND a > -100;
DROP TABLE t1;
--echo # --echo #
--echo # Bug#56287: crash when using Partition datetime in sub in query --echo # Bug#56287: crash when using Partition datetime in sub in query
--echo # --echo #

View File

@ -3377,6 +3377,94 @@ disconnect con1;
SELECT * SELECT *
FROM (SELECT 1 UNION SELECT 2) t; FROM (SELECT 1 UNION SELECT 2) t;
--echo
--echo # Bug#13805127: Stored program cache produces wrong result in same THD
--echo
PREPARE s1 FROM
"
SELECT c1, t2.c2, count(c3)
FROM
(
SELECT 3 as c2 FROM dual WHERE @x = 1
UNION
SELECT 2 FROM dual WHERE @x = 1 OR @x = 2
) AS t1,
(
SELECT '2012-03-01 01:00:00' AS c1, 3 as c2, 1 as c3 FROM dual
UNION
SELECT '2012-03-01 02:00:00', 3, 2 FROM dual
UNION
SELECT '2012-03-01 01:00:00', 2, 1 FROM dual
) AS t2
WHERE t2.c2 = t1.c2
GROUP BY c1, c2
";
--echo
SET @x = 1;
SELECT c1, t2.c2, count(c3)
FROM
(
SELECT 3 as c2 FROM dual WHERE @x = 1
UNION
SELECT 2 FROM dual WHERE @x = 1 OR @x = 2
) AS t1,
(
SELECT '2012-03-01 01:00:00' AS c1, 3 as c2, 1 as c3 FROM dual
UNION
SELECT '2012-03-01 02:00:00', 3, 2 FROM dual
UNION
SELECT '2012-03-01 01:00:00', 2, 1 FROM dual
) AS t2
WHERE t2.c2 = t1.c2
GROUP BY c1, c2;
--echo
EXECUTE s1;
--echo
SET @x = 2;
SELECT c1, t2.c2, count(c3)
FROM
(
SELECT 3 as c2 FROM dual WHERE @x = 1
UNION
SELECT 2 FROM dual WHERE @x = 1 OR @x = 2
) AS t1,
(
SELECT '2012-03-01 01:00:00' AS c1, 3 as c2, 1 as c3 FROM dual
UNION
SELECT '2012-03-01 02:00:00', 3, 2 FROM dual
UNION
SELECT '2012-03-01 01:00:00', 2, 1 FROM dual
) AS t2
WHERE t2.c2 = t1.c2
GROUP BY c1, c2;
--echo
EXECUTE s1;
--echo
SET @x = 1;
SELECT c1, t2.c2, count(c3)
FROM
(
SELECT 3 as c2 FROM dual WHERE @x = 1
UNION
SELECT 2 FROM dual WHERE @x = 1 OR @x = 2
) AS t1,
(
SELECT '2012-03-01 01:00:00' AS c1, 3 as c2, 1 as c3 FROM dual
UNION
SELECT '2012-03-01 02:00:00', 3, 2 FROM dual
UNION
SELECT '2012-03-01 01:00:00', 2, 1 FROM dual
) AS t2
WHERE t2.c2 = t1.c2
GROUP BY c1, c2;
--echo
EXECUTE s1;
DEALLOCATE PREPARE s1;
--echo # --echo #
--echo # End of 5.5 tests. --echo # End of 5.5 tests.

View File

@ -138,6 +138,7 @@ DROP FUNCTION db1.f1;
DROP TABLE db1.t1; DROP TABLE db1.t1;
DROP DATABASE db1; DROP DATABASE db1;
DROP DATABASE db2; DROP DATABASE db2;
USE test;
--echo # --echo #
--echo # Bug#13105873:valgrind warning:possible crash in foreign --echo # Bug#13105873:valgrind warning:possible crash in foreign
@ -161,5 +162,69 @@ CALL p1();
--echo # below stmt should not return valgrind warnings --echo # below stmt should not return valgrind warnings
CALL p1(); CALL p1();
DROP DATABASE testdb; DROP DATABASE testdb;
USE test;
--echo End of 5.1 tests --echo End of 5.1 tests
--echo #
--echo # Bug#11763507 - 56224: FUNCTION NAME IS CASE-SENSITIVE
--echo #
SET @@SQL_MODE = '';
DELIMITER $;
CREATE FUNCTION testf_bug11763507() RETURNS INT
BEGIN
RETURN 0;
END
$
CREATE PROCEDURE testp_bug11763507()
BEGIN
SELECT "PROCEDURE testp_bug11763507";
END
$
DELIMITER ;$
# STORED FUNCTIONS
SELECT testf_bug11763507();
SELECT TESTF_bug11763507();
--replace_column 5 # 6 #
SHOW FUNCTION STATUS LIKE 'testf_bug11763507';
--replace_column 5 # 6 #
SHOW FUNCTION STATUS WHERE NAME='testf_bug11763507';
--replace_column 5 # 6 #
SHOW FUNCTION STATUS LIKE 'TESTF_bug11763507';
--replace_column 5 # 6 #
SHOW FUNCTION STATUS WHERE NAME='TESTF_bug11763507';
SHOW CREATE FUNCTION testf_bug11763507;
SHOW CREATE FUNCTION TESTF_bug11763507;
# STORED PROCEDURE
CALL testp_bug11763507();
CALL TESTP_bug11763507();
--replace_column 5 # 6 #
SHOW PROCEDURE STATUS LIKE 'testp_bug11763507';
--replace_column 5 # 6 #
SHOW PROCEDURE STATUS WHERE NAME='testp_bug11763507';
--replace_column 5 # 6 #
SHOW PROCEDURE STATUS LIKE 'TESTP_bug11763507';
--replace_column 5 # 6 #
SHOW PROCEDURE STATUS WHERE NAME='TESTP_bug11763507';
SHOW CREATE PROCEDURE testp_bug11763507;
SHOW CREATE PROCEDURE TESTP_bug11763507;
# INFORMATION SCHEMA
SELECT specific_name FROM INFORMATION_SCHEMA.ROUTINES WHERE specific_name LIKE 'testf_bug11763507';
SELECT specific_name FROM INFORMATION_SCHEMA.ROUTINES WHERE specific_name LIKE 'TESTF_bug11763507';
SELECT specific_name FROM INFORMATION_SCHEMA.ROUTINES WHERE specific_name='testf_bug11763507';
SELECT specific_name FROM INFORMATION_SCHEMA.ROUTINES WHERE specific_name='TESTF_bug11763507';
DROP PROCEDURE testp_bug11763507;
DROP FUNCTION testf_bug11763507;
--echo #END OF BUG#11763507 test.

View File

@ -702,3 +702,35 @@ SET SESSION debug_dbug="-d,bug23032_emit_warning";
DROP PROCEDURE p1; DROP PROCEDURE p1;
DROP TABLE t1; DROP TABLE t1;
--echo #
--echo # Bug#11763507 - 56224: FUNCTION NAME IS CASE-SENSITIVE
--echo #
SET @@SQL_MODE = '';
DELIMITER $;
CREATE FUNCTION testf_bug11763507() RETURNS INT
BEGIN
RETURN 0;
END
$
CREATE PROCEDURE testp_bug11763507()
BEGIN
SELECT "PROCEDURE testp_bug11763507";
END
$
DELIMITER ;$
# STORED FUNCTIONS
SHOW FUNCTION CODE testf_bug11763507;
SHOW FUNCTION CODE TESTF_bug11763507;
# STORED PROCEDURE
SHOW PROCEDURE CODE testp_bug11763507;
SHOW PROCEDURE CODE TESTP_bug11763507;
DROP PROCEDURE testp_bug11763507;
DROP FUNCTION testf_bug11763507;
--echo #END OF BUG#11763507 test.

View File

@ -9039,6 +9039,42 @@ DROP PROCEDURE p1;
DROP PROCEDURE p2; DROP PROCEDURE p2;
DROP TABLE t1; DROP TABLE t1;
--echo
--echo # Bug#13805127: Stored program cache produces wrong result in same THD
--echo
delimiter |;
CREATE PROCEDURE p1(x INT UNSIGNED)
BEGIN
SELECT c1, t2.c2, count(c3)
FROM
(
SELECT 3 as c2 FROM dual WHERE x = 1
UNION
SELECT 2 FROM dual WHERE x = 1 OR x = 2
) AS t1,
(
SELECT '2012-03-01 01:00:00' AS c1, 3 as c2, 1 as c3 FROM dual
UNION
SELECT '2012-03-01 02:00:00', 3, 2 FROM dual
UNION
SELECT '2012-03-01 01:00:00', 2, 1 FROM dual
) AS t2
WHERE t2.c2 = t1.c2
GROUP BY c1, c2
;
END|
delimiter ;|
--echo
CALL p1(1);
CALL p1(2);
CALL p1(1);
DROP PROCEDURE p1;
--echo # End of 5.5 test --echo # End of 5.5 test

View File

@ -427,6 +427,7 @@ SELECT `my.db`.f1(2);
connection default; connection default;
disconnect addcon; disconnect addcon;
DROP DATABASE `my.db`; DROP DATABASE `my.db`;
USE test;
# #
# Restore global concurrent_insert value. Keep in the end of the test file. # Restore global concurrent_insert value. Keep in the end of the test file.
@ -437,3 +438,26 @@ set @@global.concurrent_insert= @old_concurrent_insert;
# Wait till all disconnects are completed # Wait till all disconnects are completed
--source include/wait_until_count_sessions.inc --source include/wait_until_count_sessions.inc
--echo #
--echo # Bug#11763507 - 56224: FUNCTION NAME IS CASE-SENSITIVE
--echo #
SET @@SQL_MODE = '';
DELIMITER $;
CREATE EVENT teste_bug11763507 ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR
DO SELECT 1 $
DELIMITER ;$
# EVENTS
--replace_column 6 # 7 # 8 # 9 #
SHOW EVENTS LIKE 'teste_bug11763507';
--replace_column 6 # 7 # 8 # 9 #
SHOW EVENTS LIKE 'TESTE_bug11763507';
--replace_column 4 #
SHOW CREATE EVENT teste_bug11763507;
--replace_column 4 #
SHOW CREATE EVENT TESTE_bug11763507;
DROP EVENT teste_bug11763507;
--echo #END OF BUG#11763507 test.

View File

@ -359,3 +359,157 @@ SELECT * FROM mysql.columns_priv WHERE Host = 'localhost' AND User LIKE 'user_%P
# Cleanup # Cleanup
DROP TABLE t1; DROP TABLE t1;
DROP TABLE t2; DROP TABLE t2;
--echo
--echo #
--echo # Test for Bug#12601974 - STORED PROCEDURE SQL_MODE=NO_BACKSLASH_ESCAPES
--echo # IGNORED AND BREAKS REPLICATION
--echo #
--disable_warnings
DROP TABLE IF EXISTS test_table;
DROP FUNCTION IF EXISTS test_function;
--enable_warnings
CREATE TABLE test_table (c1 CHAR(50));
SET @org_mode=@@sql_mode;
SET @@sql_mode='';
PREPARE insert_stmt FROM 'INSERT INTO test_table VALUES (?)';
PREPARE update_stmt FROM 'UPDATE test_table SET c1= ? WHERE c1= ?';
DELIMITER $;
CREATE FUNCTION test_function(var CHAR(50)) RETURNS CHAR(50)
BEGIN
DECLARE char_val CHAR(50);
SELECT c1 INTO char_val FROM test_table WHERE c1=var;
RETURN char_val;
END
$
DELIMITER ;$
SET @var1='abcd\'ef';
SET @var2='abcd\"ef';
SET @var3='abcd\bef';
SET @var4='abcd\nef';
SET @var5='abcd\ref';
SET @var6='abcd\tef';
SET @var7='abcd\\ef';
SET @var8='abcd\%ef';
SET @var9='abcd\_ef';
SET @to_var1='wxyz\'ef';
SET @to_var2='wxyz\"ef';
SET @to_var3='wxyz\bef';
SET @to_var4='wxyz\nef';
SET @to_var5='wxyz\ref';
SET @to_var6='wxyz\tef';
SET @to_var7='wxyz\\ef';
SET @to_var8='wxyz\%ef';
SET @to_var9='wxyz\_ef';
--echo # STRING LILTERAL WITH BACKSLASH IN PREPARE STATEMENT
EXECUTE insert_stmt USING @var1;
EXECUTE insert_stmt USING @var2;
EXECUTE insert_stmt USING @var3;
EXECUTE insert_stmt USING @var4;
EXECUTE insert_stmt USING @var5;
EXECUTE insert_stmt USING @var6;
EXECUTE insert_stmt USING @var7;
EXECUTE insert_stmt USING @var8;
EXECUTE insert_stmt USING @var9;
SELECT * FROM test_table;
EXECUTE update_stmt USING @to_var1, @var1;
EXECUTE update_stmt USING @to_var2, @var2;
EXECUTE update_stmt USING @to_var3, @var3;
EXECUTE update_stmt USING @to_var4, @var4;
EXECUTE update_stmt USING @to_var5, @var5;
EXECUTE update_stmt USING @to_var6, @var6;
EXECUTE update_stmt USING @to_var7, @var7;
EXECUTE update_stmt USING @to_var8, @var8;
EXECUTE update_stmt USING @to_var9, @var9;
SELECT * FROM test_table;
--echo
--echo # END OF CASE - STRING LILTERAL WITH BACKSLASH IN PREPARE STATEMENT
--echo # STRING LILTERAL WITH BACKSLASH IN FUNCTION RETURNING STRING
select test_function(@to_var1);
SELECT test_function(@to_var2);
SELECT test_function(@to_var3);
SELECT test_function(@to_var4);
SELECT test_function(@to_var5);
SELECT test_function(@to_var6);
SELECT test_function(@to_var7);
SELECT test_function(@to_var8);
SELECT test_function(@to_var9);
--echo
--echo # END OF CASE - STRING LILTERAL WITH BACKSLASH IN FUNCTION RETURNING STRING
DELETE FROM test_table;
DROP FUNCTION test_function;
SET @@sql_mode='NO_BACKSLASH_ESCAPES';
DELIMITER $;
CREATE FUNCTION test_function(var CHAR(50)) RETURNS CHAR(50)
BEGIN
DECLARE char_val CHAR(50);
SELECT c1 INTO char_val FROM test_table WHERE c1=var;
RETURN char_val;
END
$
DELIMITER ;$
--echo # STRING LILTERAL WITH BACKSLASH IN PREPARE STATEMENT
EXECUTE insert_stmt USING @var1;
EXECUTE insert_stmt USING @var2;
EXECUTE insert_stmt USING @var3;
EXECUTE insert_stmt USING @var4;
EXECUTE insert_stmt USING @var5;
EXECUTE insert_stmt USING @var6;
EXECUTE insert_stmt USING @var7;
EXECUTE insert_stmt USING @var8;
EXECUTE insert_stmt USING @var9;
SELECT * FROM test_table;
EXECUTE update_stmt USING @to_var1, @var1;
EXECUTE update_stmt USING @to_var2, @var2;
EXECUTE update_stmt USING @to_var3, @var3;
EXECUTE update_stmt USING @to_var4, @var4;
EXECUTE update_stmt USING @to_var5, @var5;
EXECUTE update_stmt USING @to_var6, @var6;
EXECUTE update_stmt USING @to_var7, @var7;
EXECUTE update_stmt USING @to_var8, @var8;
EXECUTE update_stmt USING @to_var9, @var9;
SELECT * FROM test_table;
--echo
--echo # END OF CASE - STRING LILTERAL WITH BACKSLASH IN PREPARE STATEMENT
--echo # STRING LILTERAL WITH BACKSLASH IN FUNCTION RETURNING STRING
select test_function(@to_var1);
SELECT test_function(@to_var2);
SELECT test_function(@to_var3);
SELECT test_function(@to_var4);
SELECT test_function(@to_var5);
SELECT test_function(@to_var6);
SELECT test_function(@to_var7);
SELECT test_function(@to_var8);
SELECT test_function(@to_var9);
--echo
--echo # END OF CASE - STRING LILTERAL WITH BACKSLASH IN FUNCTION RETURNING STRING
DROP TABLE test_table;
DROP FUNCTION test_function;
SET @@sql_mode= @org_mode;
--echo
--echo #End of Test for Bug#12601974

View File

@ -3542,6 +3542,61 @@ SET SESSION sql_mode=@old_sql_mode;
DEALLOCATE PREPARE stmt; DEALLOCATE PREPARE stmt;
DROP TABLE t1; DROP TABLE t1;
--echo #
--echo # Bug#12763207 - ASSERT IN SUBSELECT::SINGLE_VALUE_TRANSFORMER
--echo #
CREATE TABLE t1(a1 int);
INSERT INTO t1 VALUES (1),(2);
CREATE TABLE t2(a1 int);
INSERT INTO t2 VALUES (3);
SELECT @@session.sql_mode INTO @old_sql_mode;
SET SESSION sql_mode='ONLY_FULL_GROUP_BY';
## All these are subject to the transformation
## '1 < some (...)' => '1 < max(...)'
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT 2 FROM t2);
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT 2.0 FROM t2);
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT 'a' FROM t2);
SELECT 1 FROM t1 WHERE 1 < SOME (SELECT a1 FROM t2);
SET SESSION sql_mode=@old_sql_mode;
DROP TABLE t1, t2;
--echo #
--echo # Bug#12763207 - ASSERT IN SUBSELECT::SINGLE_VALUE_TRANSFORMER
--echo #
create table t2(i int);
insert into t2 values(0);
SELECT @@session.sql_mode INTO @old_sql_mode;
SET SESSION sql_mode='ONLY_FULL_GROUP_BY';
CREATE VIEW v1 AS
SELECT 'f' FROM t2 UNION SELECT 'x' FROM t2
;
CREATE TABLE t1 (
pk int NOT NULL,
col_varchar_key varchar(1) DEFAULT NULL,
PRIMARY KEY (pk),
KEY col_varchar_key (col_varchar_key)
);
SELECT t1.pk
FROM t1
WHERE t1.col_varchar_key < ALL ( SELECT * FROM v1 )
;
SET SESSION sql_mode=@old_sql_mode;
drop table t2, t1;
drop view v1;
--echo End of 5.0 tests. --echo End of 5.0 tests.
# #

View File

@ -4521,6 +4521,18 @@ DROP TABLE t1;
disconnect con2; disconnect con2;
disconnect con3; disconnect con3;
--echo #
--echo # Bug#12626844: WRONG ERROR MESSAGE WHILE CREATING A VIEW ON A
--echo # NON EXISTING DATABASE
--echo #
--disable_warnings
DROP DATABASE IF EXISTS nodb;
--enable_warnings
--error ER_BAD_DB_ERROR
CREATE VIEW nodb.a AS SELECT 1;
# Check that all connections opened by test cases in this file are really # Check that all connections opened by test cases in this file are really
# gone so execution of other tests won't be affected by their presence. # gone so execution of other tests won't be affected by their presence.
--source include/wait_until_count_sessions.inc --source include/wait_until_count_sessions.inc

View File

@ -298,7 +298,6 @@ CREATE TABLE t1 (a INT) engine=InnoDB;
XA START 'a'; XA START 'a';
INSERT INTO t1 VALUES (1); INSERT INTO t1 VALUES (1);
--error ER_XAER_RMFAIL
SAVEPOINT savep; SAVEPOINT savep;
XA END 'a'; XA END 'a';

View File

@ -211,13 +211,16 @@ void _lf_pinbox_put_pins(LF_PINS *pins)
LF_PINBOX *pinbox= pins->pinbox; LF_PINBOX *pinbox= pins->pinbox;
uint32 top_ver, nr; uint32 top_ver, nr;
nr= pins->link; nr= pins->link;
#ifdef MY_LF_EXTRA_DEBUG
#ifndef DBUG_OFF
{ {
/* This thread should not hold any pin. */
int i; int i;
for (i= 0; i < LF_PINBOX_PINS; i++) for (i= 0; i < LF_PINBOX_PINS; i++)
DBUG_ASSERT(pins->pin[i] == 0); DBUG_ASSERT(pins->pin[i] == 0);
} }
#endif #endif /* DBUG_OFF */
/* /*
XXX this will deadlock if other threads will wait for XXX this will deadlock if other threads will wait for
the caller to do something after _lf_pinbox_put_pins(), the caller to do something after _lf_pinbox_put_pins(),

View File

@ -7860,8 +7860,7 @@ String *Field_set::val_str(String *val_buffer,
ulonglong tmp=(ulonglong) Field_enum::val_int(); ulonglong tmp=(ulonglong) Field_enum::val_int();
uint bitnr=0; uint bitnr=0;
val_buffer->length(0); val_buffer->set("", 0, field_charset);
val_buffer->set_charset(field_charset);
while (tmp && bitnr < (uint) typelib->count) while (tmp && bitnr < (uint) typelib->count)
{ {
if (tmp & 1) if (tmp & 1)

View File

@ -219,7 +219,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
param.keys= table_sort.keys; param.keys= table_sort.keys;
if (memory_available < min_sort_memory) if (memory_available < min_sort_memory)
{ {
my_error(ER_OUT_OF_SORTMEMORY,MYF(ME_ERROR+ME_WAITTANG)); my_error(ER_OUT_OF_SORTMEMORY,MYF(ME_ERROR + ME_FATALERROR));
goto err; goto err;
} }
if (open_cached_file(&buffpek_pointers,mysql_tmpdir,TEMP_PREFIX, if (open_cached_file(&buffpek_pointers,mysql_tmpdir,TEMP_PREFIX,

View File

@ -1,5 +1,5 @@
/* /*
Copyright (c) 2005, 2011, Oracle and/or its affiliates. Copyright (c) 2005, 2012, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by it under the terms of the GNU General Public License as published by
@ -289,6 +289,7 @@ void ha_partition::init_handler_variables()
m_is_sub_partitioned= 0; m_is_sub_partitioned= 0;
m_is_clone_of= NULL; m_is_clone_of= NULL;
m_clone_mem_root= NULL; m_clone_mem_root= NULL;
m_part_ids_sorted_by_num_of_records= NULL;
#ifdef DONT_HAVE_TO_BE_INITALIZED #ifdef DONT_HAVE_TO_BE_INITALIZED
m_start_key.flag= 0; m_start_key.flag= 0;
@ -325,6 +326,7 @@ ha_partition::~ha_partition()
} }
my_free(m_ordered_rec_buffer); my_free(m_ordered_rec_buffer);
m_ordered_rec_buffer= NULL; m_ordered_rec_buffer= NULL;
my_free(m_part_ids_sorted_by_num_of_records);
clear_handler_file(); clear_handler_file();
@ -2741,6 +2743,16 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
m_start_key.key= (const uchar*)ptr; m_start_key.key= (const uchar*)ptr;
} }
} }
if (!m_part_ids_sorted_by_num_of_records)
{
if (!(m_part_ids_sorted_by_num_of_records=
(uint32*) my_malloc(m_tot_parts * sizeof(uint32), MYF(MY_WME))))
DBUG_RETURN(error);
uint32 i;
/* Initialize it with all partition ids. */
for (i= 0; i < m_tot_parts; i++)
m_part_ids_sorted_by_num_of_records[i]= i;
}
/* Initialize the bitmap we use to minimize ha_start_bulk_insert calls */ /* Initialize the bitmap we use to minimize ha_start_bulk_insert calls */
if (bitmap_init(&m_bulk_insert_started, NULL, m_tot_parts + 1, FALSE)) if (bitmap_init(&m_bulk_insert_started, NULL, m_tot_parts + 1, FALSE))
@ -5314,6 +5326,24 @@ int ha_partition::handle_ordered_prev(uchar *buf)
and read_time calls and read_time calls
*/ */
/**
Helper function for sorting according to number of rows in descending order.
*/
int ha_partition::compare_number_of_records(ha_partition *me,
const uint32 *a,
const uint32 *b)
{
handler **file= me->m_file;
/* Note: sorting in descending order! */
if (file[*a]->stats.records > file[*b]->stats.records)
return -1;
if (file[*a]->stats.records < file[*b]->stats.records)
return 1;
return 0;
}
/* /*
General method to gather info from handler General method to gather info from handler
@ -5558,6 +5588,15 @@ int ha_partition::info(uint flag)
} }
i++; i++;
} while (*(++file_array)); } while (*(++file_array));
/*
Sort the array of part_ids by number of records in
in descending order.
*/
my_qsort2((void*) m_part_ids_sorted_by_num_of_records,
m_tot_parts,
sizeof(uint32),
(qsort2_cmp) compare_number_of_records,
this);
file= m_file[handler_instance]; file= m_file[handler_instance];
file->info(HA_STATUS_CONST | no_lock_flag); file->info(HA_STATUS_CONST | no_lock_flag);
@ -6313,21 +6352,73 @@ const key_map *ha_partition::keys_to_use_for_scanning()
DBUG_RETURN(m_file[0]->keys_to_use_for_scanning()); DBUG_RETURN(m_file[0]->keys_to_use_for_scanning());
} }
#define MAX_PARTS_FOR_OPTIMIZER_CALLS 10
/*
Prepare start variables for estimating optimizer costs.
@param[out] num_used_parts Number of partitions after pruning. /**
@param[out] check_min_num Number of partitions to call. Minimum number of rows to base optimizer estimate on.
@param[out] first first used partition.
*/ */
void ha_partition::partitions_optimizer_call_preparations(uint *first,
uint *num_used_parts, ha_rows ha_partition::min_rows_for_estimate()
uint *check_min_num)
{ {
*first= bitmap_get_first_set(&(m_part_info->used_partitions)); uint i, max_used_partitions, tot_used_partitions;
*num_used_parts= bitmap_bits_set(&(m_part_info->used_partitions)); DBUG_ENTER("ha_partition::min_rows_for_estimate");
*check_min_num= min(MAX_PARTS_FOR_OPTIMIZER_CALLS, *num_used_parts);
tot_used_partitions= bitmap_bits_set(&m_part_info->used_partitions);
DBUG_ASSERT(tot_used_partitions);
/*
Allow O(log2(tot_partitions)) increase in number of used partitions.
This gives O(tot_rows/log2(tot_partitions)) rows to base the estimate on.
I.e when the total number of partitions doubles, allow one more
partition to be checked.
*/
i= 2;
max_used_partitions= 1;
while (i < m_tot_parts)
{
max_used_partitions++;
i= i << 1;
}
if (max_used_partitions > tot_used_partitions)
max_used_partitions= tot_used_partitions;
/* stats.records is already updated by the info(HA_STATUS_VARIABLE) call. */
DBUG_PRINT("info", ("max_used_partitions: %u tot_rows: %lu",
max_used_partitions,
(ulong) stats.records));
DBUG_PRINT("info", ("tot_used_partitions: %u min_rows_to_check: %lu",
tot_used_partitions,
(ulong) stats.records * max_used_partitions
/ tot_used_partitions));
DBUG_RETURN(stats.records * max_used_partitions / tot_used_partitions);
}
/**
Get the biggest used partition.
Starting at the N:th biggest partition and skips all non used
partitions, returning the biggest used partition found
@param[in,out] part_index Skip the *part_index biggest partitions
@return The biggest used partition with index not lower than *part_index.
@retval NO_CURRENT_PART_ID No more partition used.
@retval != NO_CURRENT_PART_ID partition id of biggest used partition with
index >= *part_index supplied. Note that
*part_index will be updated to the next
partition index to use.
*/
uint ha_partition::get_biggest_used_partition(uint *part_index)
{
uint part_id;
while ((*part_index) < m_tot_parts)
{
part_id= m_part_ids_sorted_by_num_of_records[(*part_index)++];
if (bitmap_is_set(&m_part_info->used_partitions, part_id))
return part_id;
}
return NO_CURRENT_PART_ID;
} }
@ -6343,115 +6434,107 @@ void ha_partition::partitions_optimizer_call_preparations(uint *first,
double ha_partition::scan_time() double ha_partition::scan_time()
{ {
double scan_time= 0.0; double scan_time= 0;
uint first, part_id, num_used_parts, check_min_num, partitions_called= 0; handler **file;
DBUG_ENTER("ha_partition::scan_time"); DBUG_ENTER("ha_partition::scan_time");
partitions_optimizer_call_preparations(&first, &num_used_parts, &check_min_num); for (file= m_file; *file; file++)
for (part_id= first; partitions_called < num_used_parts ; part_id++) if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
{ scan_time+= (*file)->scan_time();
if (!bitmap_is_set(&(m_part_info->used_partitions), part_id))
continue;
scan_time+= m_file[part_id]->scan_time();
partitions_called++;
if (partitions_called >= check_min_num && scan_time != 0.0)
{
DBUG_RETURN(scan_time *
(double) num_used_parts / (double) partitions_called);
}
}
DBUG_RETURN(scan_time); DBUG_RETURN(scan_time);
} }
/* /**
Estimate rows for records_in_range or estimate_rows_upper_bound. Find number of records in a range.
@param inx Index number
@param min_key Start of range
@param max_key End of range
@param is_records_in_range call records_in_range instead of @return Number of rows in range.
estimate_rows_upper_bound.
@param inx (only for records_in_range) index to use.
@param min_key (only for records_in_range) start of range.
@param max_key (only for records_in_range) end of range.
@return Number of rows or HA_POS_ERROR. Given a starting key, and an ending key estimate the number of rows that
*/ will exist between the two. max_key may be empty which in case determine
ha_rows ha_partition::estimate_rows(bool is_records_in_range, uint inx, if start_key matches any rows.
key_range *min_key, key_range *max_key)
{
ha_rows rows, estimated_rows= 0;
uint first, part_id, num_used_parts, check_min_num, partitions_called= 0;
DBUG_ENTER("ha_partition::records_in_range");
partitions_optimizer_call_preparations(&first, &num_used_parts, &check_min_num);
for (part_id= first; partitions_called < num_used_parts ; part_id++)
{
if (!bitmap_is_set(&(m_part_info->used_partitions), part_id))
continue;
if (is_records_in_range)
rows= m_file[part_id]->records_in_range(inx, min_key, max_key);
else
rows= m_file[part_id]->estimate_rows_upper_bound();
if (rows == HA_POS_ERROR)
DBUG_RETURN(HA_POS_ERROR);
estimated_rows+= rows;
partitions_called++;
if (partitions_called >= check_min_num && estimated_rows)
{
DBUG_RETURN(estimated_rows * num_used_parts / partitions_called);
}
}
DBUG_RETURN(estimated_rows);
}
/*
Find number of records in a range
SYNOPSIS
records_in_range()
inx Index number
min_key Start of range
max_key End of range
RETURN VALUE
Number of rows in range
DESCRIPTION
Given a starting key, and an ending key estimate the number of rows that
will exist between the two. end_key may be empty which in case determine
if start_key matches any rows.
Called from opt_range.cc by check_quick_keys().
monty: MUST be called for each range and added.
Note that MySQL will assume that if this returns 0 there is no
matching rows for the range!
*/ */
ha_rows ha_partition::records_in_range(uint inx, key_range *min_key, ha_rows ha_partition::records_in_range(uint inx, key_range *min_key,
key_range *max_key) key_range *max_key)
{ {
ha_rows min_rows_to_check, rows, estimated_rows=0, checked_rows= 0;
uint partition_index= 0, part_id;
DBUG_ENTER("ha_partition::records_in_range"); DBUG_ENTER("ha_partition::records_in_range");
DBUG_RETURN(estimate_rows(TRUE, inx, min_key, max_key)); min_rows_to_check= min_rows_for_estimate();
while ((part_id= get_biggest_used_partition(&partition_index))
!= NO_CURRENT_PART_ID)
{
rows= m_file[part_id]->records_in_range(inx, min_key, max_key);
DBUG_PRINT("info", ("part %u match %lu rows of %lu", part_id, (ulong) rows,
(ulong) m_file[part_id]->stats.records));
if (rows == HA_POS_ERROR)
DBUG_RETURN(HA_POS_ERROR);
estimated_rows+= rows;
checked_rows+= m_file[part_id]->stats.records;
/*
Returning 0 means no rows can be found, so we must continue
this loop as long as we have estimated_rows == 0.
Also many engines return 1 to indicate that there may exist
a matching row, we do not normalize this by dividing by number of
used partitions, but leave it to be returned as a sum, which will
reflect that we will need to scan each partition's index.
Note that this statistics may not always be correct, so we must
continue even if the current partition has 0 rows, since we might have
deleted rows from the current partition, or inserted to the next
partition.
*/
if (estimated_rows && checked_rows &&
checked_rows >= min_rows_to_check)
{
DBUG_PRINT("info",
("records_in_range(inx %u): %lu (%lu * %lu / %lu)",
inx,
(ulong) (estimated_rows * stats.records / checked_rows),
(ulong) estimated_rows,
(ulong) stats.records,
(ulong) checked_rows));
DBUG_RETURN(estimated_rows * stats.records / checked_rows);
}
}
DBUG_PRINT("info", ("records_in_range(inx %u): %lu",
inx,
(ulong) estimated_rows));
DBUG_RETURN(estimated_rows);
} }
/* /**
Estimate upper bound of number of rows Estimate upper bound of number of rows.
SYNOPSIS @return Number of rows.
estimate_rows_upper_bound()
RETURN VALUE
Number of rows
*/ */
ha_rows ha_partition::estimate_rows_upper_bound() ha_rows ha_partition::estimate_rows_upper_bound()
{ {
ha_rows rows, tot_rows= 0;
handler **file= m_file;
DBUG_ENTER("ha_partition::estimate_rows_upper_bound"); DBUG_ENTER("ha_partition::estimate_rows_upper_bound");
DBUG_RETURN(estimate_rows(FALSE, 0, NULL, NULL)); do
{
if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
{
rows= (*file)->estimate_rows_upper_bound();
if (rows == HA_POS_ERROR)
DBUG_RETURN(HA_POS_ERROR);
tot_rows+= rows;
}
} while (*(++file));
DBUG_RETURN(tot_rows);
} }

View File

@ -2,7 +2,7 @@
#define HA_PARTITION_INCLUDED #define HA_PARTITION_INCLUDED
/* /*
Copyright (c) 2005, 2011, Oracle and/or its affiliates. Copyright (c) 2005, 2012, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by it under the terms of the GNU General Public License as published by
@ -176,6 +176,12 @@ private:
ha_rows m_bulk_inserted_rows; ha_rows m_bulk_inserted_rows;
/** used for prediction of start_bulk_insert rows */ /** used for prediction of start_bulk_insert rows */
enum_monotonicity_info m_part_func_monotonicity_info; enum_monotonicity_info m_part_func_monotonicity_info;
/** Sorted array of partition ids in descending order of number of rows. */
uint32 *m_part_ids_sorted_by_num_of_records;
/* Compare function for my_qsort2, for reversed order. */
static int compare_number_of_records(ha_partition *me,
const uint32 *a,
const uint32 *b);
public: public:
handler *clone(const char *name, MEM_ROOT *mem_root); handler *clone(const char *name, MEM_ROOT *mem_root);
virtual void set_part_info(partition_info *part_info) virtual void set_part_info(partition_info *part_info)
@ -196,9 +202,9 @@ public:
*/ */
ha_partition(handlerton *hton, TABLE_SHARE * table); ha_partition(handlerton *hton, TABLE_SHARE * table);
ha_partition(handlerton *hton, partition_info * part_info); ha_partition(handlerton *hton, partition_info * part_info);
ha_partition(handlerton *hton, TABLE_SHARE *share, ha_partition(handlerton *hton, TABLE_SHARE *share,
partition_info *part_info_arg, partition_info *part_info_arg,
ha_partition *clone_arg, ha_partition *clone_arg,
MEM_ROOT *clone_mem_root_arg); MEM_ROOT *clone_mem_root_arg);
~ha_partition(); ~ha_partition();
/* /*
@ -538,6 +544,20 @@ public:
virtual int extra(enum ha_extra_function operation); virtual int extra(enum ha_extra_function operation);
virtual int extra_opt(enum ha_extra_function operation, ulong cachesize); virtual int extra_opt(enum ha_extra_function operation, ulong cachesize);
virtual int reset(void); virtual int reset(void);
/*
Do not allow caching of partitioned tables, since we cannot return
a callback or engine_data that would work for a generic engine.
*/
virtual my_bool register_query_cache_table(THD *thd, char *table_key,
uint key_length,
qc_engine_callback
*engine_callback,
ulonglong *engine_data)
{
*engine_callback= NULL;
*engine_data= 0;
return FALSE;
}
private: private:
static const uint NO_CURRENT_PART_ID; static const uint NO_CURRENT_PART_ID;
@ -567,15 +587,9 @@ public:
*/ */
private: private:
/* /* Helper functions for optimizer hints. */
Helper function to get the minimum number of partitions to use for ha_rows min_rows_for_estimate();
the optimizer hints/cost calls. uint get_biggest_used_partition(uint *part_index);
*/
void partitions_optimizer_call_preparations(uint *num_used_parts,
uint *check_min_num,
uint *first);
ha_rows estimate_rows(bool is_records_in_range, uint inx,
key_range *min_key, key_range *max_key);
public: public:
/* /*

View File

@ -3575,7 +3575,7 @@ String *Item_param::val_str(String* str)
that binary log contains wrong statement that binary log contains wrong statement
*/ */
const String *Item_param::query_val_str(String* str) const const String *Item_param::query_val_str(THD *thd, String* str) const
{ {
switch (state) { switch (state) {
case INT_VALUE: case INT_VALUE:
@ -3613,7 +3613,8 @@ const String *Item_param::query_val_str(String* str) const
case LONG_DATA_VALUE: case LONG_DATA_VALUE:
{ {
str->length(0); str->length(0);
append_query_string(value.cs_info.character_set_client, &str_value, str); append_query_string(thd, value.cs_info.character_set_client, &str_value,
str);
break; break;
} }
case NULL_VALUE: case NULL_VALUE:
@ -3746,7 +3747,7 @@ void Item_param::print(String *str, enum_query_type query_type)
char buffer[STRING_BUFFER_USUAL_SIZE]; char buffer[STRING_BUFFER_USUAL_SIZE];
String tmp(buffer, sizeof(buffer), &my_charset_bin); String tmp(buffer, sizeof(buffer), &my_charset_bin);
const String *res; const String *res;
res= query_val_str(&tmp); res= query_val_str(current_thd, &tmp);
str->append(*res); str->append(*res);
} }
} }

View File

@ -2260,7 +2260,7 @@ public:
*/ */
void (*set_param_func)(Item_param *param, uchar **pos, ulong len); void (*set_param_func)(Item_param *param, uchar **pos, ulong len);
const String *query_val_str(String *str) const; const String *query_val_str(THD *thd, String *str) const;
bool convert_str_value(THD *thd); bool convert_str_value(THD *thd);

View File

@ -249,7 +249,7 @@ public:
Item_in_optimizer(Item *a, Item_in_subselect *b): Item_in_optimizer(Item *a, Item_in_subselect *b):
Item_bool_func(a, reinterpret_cast<Item *>(b)), cache(0), expr_cache(0), Item_bool_func(a, reinterpret_cast<Item *>(b)), cache(0), expr_cache(0),
save_cache(0), result_for_null_param(UNKNOWN) save_cache(0), result_for_null_param(UNKNOWN)
{} { with_subselect= true; }
bool fix_fields(THD *, Item **); bool fix_fields(THD *, Item **);
bool fix_left(THD *thd, Item **ref); bool fix_left(THD *thd, Item **ref);
bool is_null(); bool is_null();

View File

@ -3709,8 +3709,6 @@ int MYSQL_BIN_LOG::purge_first_log(Relay_log_info* rli, bool included)
mysql_mutex_lock(&rli->log_space_lock); mysql_mutex_lock(&rli->log_space_lock);
rli->relay_log.purge_logs(to_purge_if_included, included, rli->relay_log.purge_logs(to_purge_if_included, included,
0, 0, &rli->log_space_total); 0, 0, &rli->log_space_total);
// Tell the I/O thread to take the relay_log_space_limit into account
rli->ignore_log_space_limit= 0;
mysql_mutex_unlock(&rli->log_space_lock); mysql_mutex_unlock(&rli->log_space_lock);
/* /*

View File

@ -629,7 +629,7 @@ char *str_to_hex(char *to, const char *from, uint len)
*/ */
int int
append_query_string(CHARSET_INFO *csinfo, append_query_string(THD *thd, CHARSET_INFO *csinfo,
String const *from, String *to) String const *from, String *to)
{ {
char *beg, *ptr; char *beg, *ptr;
@ -644,9 +644,26 @@ append_query_string(CHARSET_INFO *csinfo,
else else
{ {
*ptr++= '\''; *ptr++= '\'';
ptr+= escape_string_for_mysql(csinfo, ptr, 0, if (!(thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES))
from->ptr(), from->length()); {
*ptr++='\''; ptr+= escape_string_for_mysql(csinfo, ptr, 0,
from->ptr(), from->length());
}
else
{
const char *frm_str= from->ptr();
for (; frm_str < (from->ptr() + from->length()); frm_str++)
{
/* Using '' way to represent "'" */
if (*frm_str == '\'')
*ptr++= *frm_str;
*ptr++= *frm_str;
}
}
*ptr++= '\'';
} }
to->length(orig_len + ptr - beg); to->length(orig_len + ptr - beg);
return 0; return 0;

View File

@ -4260,7 +4260,7 @@ private:
}; };
#endif #endif
int append_query_string(CHARSET_INFO *csinfo, int append_query_string(THD *thd, CHARSET_INFO *csinfo,
String const *from, String *to); String const *from, String *to);
bool rpl_get_position_info(const char **log_file_name, ulonglong *log_pos, bool rpl_get_position_info(const char **log_file_name, ulonglong *log_pos,

175
sql/mem_root_array.h Normal file
View File

@ -0,0 +1,175 @@
/* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#ifndef MEM_ROOT_ARRAY_INCLUDED
#define MEM_ROOT_ARRAY_INCLUDED
#include <my_alloc.h>
/**
A typesafe replacement for DYNAMIC_ARRAY.
We use MEM_ROOT for allocating storage, rather than the C++ heap.
The interface is chosen to be similar to std::vector.
@remark
Unlike DYNAMIC_ARRAY, elements are properly copied
(rather than memcpy()d) if the underlying array needs to be expanded.
@remark
Depending on has_trivial_destructor, we destroy objects which are
removed from the array (including when the array object itself is destroyed).
@remark
Note that MEM_ROOT has no facility for reusing free space,
so don't use this if multiple re-expansions are likely to happen.
@param Element_type The type of the elements of the container.
Elements must be copyable.
@param has_trivial_destructor If true, we don't destroy elements.
We could have used type traits to determine this.
__has_trivial_destructor is supported by some (but not all)
compilers we use.
*/
template<typename Element_type, bool has_trivial_destructor>
class Mem_root_array
{
public:
Mem_root_array(MEM_ROOT *root)
: m_root(root), m_array(NULL), m_size(0), m_capacity(0)
{
DBUG_ASSERT(m_root != NULL);
}
~Mem_root_array()
{
clear();
}
Element_type &at(size_t n)
{
DBUG_ASSERT(n < size());
return m_array[n];
}
const Element_type &at(size_t n) const
{
DBUG_ASSERT(n < size());
return m_array[n];
}
// Returns a pointer to the first element in the array.
Element_type *begin() { return &m_array[0]; }
// Returns a pointer to the past-the-end element in the array.
Element_type *end() { return &m_array[size()]; }
// Erases all of the elements.
void clear()
{
if (!empty())
chop(0);
}
/*
Chops the tail off the array, erasing all tail elements.
@param pos Index of first element to erase.
*/
void chop(const size_t pos)
{
DBUG_ASSERT(pos < m_size);
if (!has_trivial_destructor)
{
for (size_t ix= pos; ix < m_size; ++ix)
{
Element_type *p= &m_array[ix];
p->~Element_type(); // Destroy discarded element.
}
}
m_size= pos;
}
/*
Reserves space for array elements.
Copies over existing elements, in case we are re-expanding the array.
@param n number of elements.
@retval true if out-of-memory, false otherwise.
*/
bool reserve(size_t n)
{
if (n <= m_capacity)
return false;
void *mem= alloc_root(m_root, n * element_size());
if (!mem)
return true;
Element_type *array= static_cast<Element_type*>(mem);
// Copy all the existing elements into the new array.
for (size_t ix= 0; ix < m_size; ++ix)
{
Element_type *new_p= &array[ix];
Element_type *old_p= &m_array[ix];
new (new_p) Element_type(*old_p); // Copy into new location.
if (!has_trivial_destructor)
old_p->~Element_type(); // Destroy the old element.
}
// Forget the old array.
m_array= array;
m_capacity= n;
return false;
}
/*
Adds a new element at the end of the array, after its current last
element. The content of this new element is initialized to a copy of
the input argument.
@param element Object to copy.
@retval true if out-of-memory, false otherwise.
*/
bool push_back(const Element_type &element)
{
const size_t min_capacity= 20;
const size_t expansion_factor= 2;
if (0 == m_capacity && reserve(min_capacity))
return true;
if (m_size == m_capacity && reserve(m_capacity * expansion_factor))
return true;
Element_type *p= &m_array[m_size++];
new (p) Element_type(element);
return false;
}
size_t capacity() const { return m_capacity; }
size_t element_size() const { return sizeof(Element_type); }
bool empty() const { return size() == 0; }
size_t size() const { return m_size; }
private:
MEM_ROOT *const m_root;
Element_type *m_array;
size_t m_size;
size_t m_capacity;
// Not (yet) implemented.
Mem_root_array(const Mem_root_array&);
Mem_root_array &operator=(const Mem_root_array&);
};
#endif // MEM_ROOT_ARRAY_INCLUDED

View File

@ -676,7 +676,7 @@ int mysqld_server_started= 0;
File_parser_dummy_hook file_parser_dummy_hook; File_parser_dummy_hook file_parser_dummy_hook;
/* replication parameters, if master_host is not NULL, we are a slave */ /* replication parameters, if master_host is not NULL, we are a slave */
uint report_port= MYSQL_PORT; uint report_port= 0;
ulong master_retry_count=0; ulong master_retry_count=0;
char *master_info_file; char *master_info_file;
char *relay_log_info_file, *report_user, *report_password, *report_host; char *relay_log_info_file, *report_user, *report_password, *report_host;
@ -2229,6 +2229,11 @@ static void network_init(void)
set_ports(); set_ports();
if (report_port == 0)
{
report_port= mysqld_port;
}
DBUG_ASSERT(report_port != 0);
if (!opt_disable_networking && !opt_bootstrap) if (!opt_disable_networking && !opt_bootstrap)
{ {
if (mysqld_port) if (mysqld_port)
@ -2746,10 +2751,6 @@ static void check_data_home(const char *path)
#endif /* __WIN__ */ #endif /* __WIN__ */
#ifdef HAVE_LINUXTHREADS
#define UNSAFE_DEFAULT_LINUX_THREADS 200
#endif
#if BACKTRACE_DEMANGLE #if BACKTRACE_DEMANGLE
#include <cxxabi.h> #include <cxxabi.h>

View File

@ -202,6 +202,13 @@ public:
ulonglong log_space_limit,log_space_total; ulonglong log_space_limit,log_space_total;
bool ignore_log_space_limit; bool ignore_log_space_limit;
/*
Used by the SQL thread to instructs the IO thread to rotate
the logs when the SQL thread needs to purge to release some
disk space.
*/
bool sql_force_rotate_relay;
/* /*
When it commits, InnoDB internally stores the master log position it has When it commits, InnoDB internally stores the master log position it has
processed so far; the position to store is the one of the end of the processed so far; the position to store is the one of the end of the

View File

@ -6492,6 +6492,10 @@ ER_BINLOG_UNSAFE_WRITE_AUTOINC_SELECT
ER_BINLOG_UNSAFE_CREATE_SELECT_AUTOINC ER_BINLOG_UNSAFE_CREATE_SELECT_AUTOINC
eng "CREATE TABLE... SELECT... on a table with an auto-increment column is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are inserted. This order cannot be predicted and may differ on master and the slave." eng "CREATE TABLE... SELECT... on a table with an auto-increment column is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are inserted. This order cannot be predicted and may differ on master and the slave."
#
# End of 5.5 error messages.
#
# #
# MariaDB error messages section starts here # MariaDB error messages section starts here
# #
@ -6569,4 +6573,3 @@ ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
eng "Cannot modify @@session.skip_replication inside a stored function or trigger" eng "Cannot modify @@session.skip_replication inside a stored function or trigger"
ER_QUERY_EXCEEDED_ROWS_EXAMINED_LIMIT ER_QUERY_EXCEEDED_ROWS_EXAMINED_LIMIT
eng "Query execution was interrupted. The query examined at least %llu rows, which exceeds LIMIT ROWS EXAMINED (%llu). The query result may be incomplete." eng "Query execution was interrupted. The query examined at least %llu rows, which exceeds LIMIT ROWS EXAMINED (%llu). The query result may be incomplete."

View File

@ -139,6 +139,7 @@ extern "C" sig_handler handle_fatal_signal(int sig)
"Hope that's ok; if not, decrease some variables in the equation.\n\n"); "Hope that's ok; if not, decrease some variables in the equation.\n\n");
#if defined(HAVE_LINUXTHREADS) #if defined(HAVE_LINUXTHREADS)
#define UNSAFE_DEFAULT_LINUX_THREADS 200
if (sizeof(char*) == 4 && thread_count > UNSAFE_DEFAULT_LINUX_THREADS) if (sizeof(char*) == 4 && thread_count > UNSAFE_DEFAULT_LINUX_THREADS)
{ {
my_safe_printf_stderr( my_safe_printf_stderr(

View File

@ -1788,6 +1788,54 @@ Waiting for the slave SQL thread to free enough relay log space");
!(slave_killed=io_slave_killed(thd,mi)) && !(slave_killed=io_slave_killed(thd,mi)) &&
!rli->ignore_log_space_limit) !rli->ignore_log_space_limit)
mysql_cond_wait(&rli->log_space_cond, &rli->log_space_lock); mysql_cond_wait(&rli->log_space_cond, &rli->log_space_lock);
/*
Makes the IO thread read only one event at a time
until the SQL thread is able to purge the relay
logs, freeing some space.
Therefore, once the SQL thread processes this next
event, it goes to sleep (no more events in the queue),
sets ignore_log_space_limit=true and wakes the IO thread.
However, this event may have been enough already for
the SQL thread to purge some log files, freeing
rli->log_space_total .
This guarantees that the SQL and IO thread move
forward only one event at a time (to avoid deadlocks),
when the relay space limit is reached. It also
guarantees that when the SQL thread is prepared to
rotate (to be able to purge some logs), the IO thread
will know about it and will rotate.
NOTE: The ignore_log_space_limit is only set when the SQL
thread sleeps waiting for events.
*/
if (rli->ignore_log_space_limit)
{
#ifndef DBUG_OFF
{
char llbuf1[22], llbuf2[22];
DBUG_PRINT("info", ("log_space_limit=%s "
"log_space_total=%s "
"ignore_log_space_limit=%d "
"sql_force_rotate_relay=%d",
llstr(rli->log_space_limit,llbuf1),
llstr(rli->log_space_total,llbuf2),
(int) rli->ignore_log_space_limit,
(int) rli->sql_force_rotate_relay));
}
#endif
if (rli->sql_force_rotate_relay)
{
rotate_relay_log(rli->mi);
rli->sql_force_rotate_relay= false;
}
rli->ignore_log_space_limit= false;
}
thd->exit_cond(save_proc_info); thd->exit_cond(save_proc_info);
DBUG_RETURN(slave_killed); DBUG_RETURN(slave_killed);
} }
@ -5058,19 +5106,45 @@ static Log_event* next_event(Relay_log_info* rli)
constraint, because we do not want the I/O thread to block because of constraint, because we do not want the I/O thread to block because of
space (it's ok if it blocks for any other reason (e.g. because the space (it's ok if it blocks for any other reason (e.g. because the
master does not send anything). Then the I/O thread stops waiting master does not send anything). Then the I/O thread stops waiting
and reads more events. and reads one more event and starts honoring log_space_limit again.
The SQL thread decides when the I/O thread should take log_space_limit
into account again : ignore_log_space_limit is reset to 0 If the SQL thread needs more events to be able to rotate the log (it
in purge_first_log (when the SQL thread purges the just-read relay might need to finish the current group first), then it can ask for one
log), and also when the SQL thread starts. We should also reset more at a time. Thus we don't outgrow the relay log indefinitely,
ignore_log_space_limit to 0 when the user does RESET SLAVE, but in but rather in a controlled manner, until the next rotate.
fact, no need as RESET SLAVE requires that the slave
When the SQL thread starts it sets ignore_log_space_limit to false.
We should also reset ignore_log_space_limit to 0 when the user does
RESET SLAVE, but in fact, no need as RESET SLAVE requires that the slave
be stopped, and the SQL thread sets ignore_log_space_limit to 0 when be stopped, and the SQL thread sets ignore_log_space_limit to 0 when
it stops. it stops.
*/ */
mysql_mutex_lock(&rli->log_space_lock); mysql_mutex_lock(&rli->log_space_lock);
// prevent the I/O thread from blocking next times
rli->ignore_log_space_limit= 1; /*
If we have reached the limit of the relay space and we
are going to sleep, waiting for more events:
1. If outside a group, SQL thread asks the IO thread
to force a rotation so that the SQL thread purges
logs next time it processes an event (thus space is
freed).
2. If in a group, SQL thread asks the IO thread to
ignore the limit and queues yet one more event
so that the SQL thread finishes the group and
is are able to rotate and purge sometime soon.
*/
if (rli->log_space_limit &&
rli->log_space_limit < rli->log_space_total)
{
/* force rotation if not in an unfinished group */
rli->sql_force_rotate_relay= !rli->is_in_group();
/* ask for one more event */
rli->ignore_log_space_limit= true;
}
/* /*
If the I/O thread is blocked, unblock it. Ok to broadcast If the I/O thread is blocked, unblock it. Ok to broadcast
after unlock, because the mutex is only destroyed in after unlock, because the mutex is only destroyed in

View File

@ -158,7 +158,7 @@ sp_get_item_value(THD *thd, Item *item, String *str)
buf.append(result->charset()->csname); buf.append(result->charset()->csname);
if (cs->escape_with_backslash_is_dangerous) if (cs->escape_with_backslash_is_dangerous)
buf.append(' '); buf.append(' ');
append_query_string(cs, result, &buf); append_query_string(thd, cs, result, &buf);
buf.append(" COLLATE '"); buf.append(" COLLATE '");
buf.append(item->collation.collation->name); buf.append(item->collation.collation->name);
buf.append('\''); buf.append('\'');

View File

@ -471,6 +471,8 @@ void lex_start(THD *thd)
lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE_UNSPECIFIED; lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE_UNSPECIFIED;
lex->select_lex.init_order(); lex->select_lex.init_order();
lex->select_lex.group_list.empty(); lex->select_lex.group_list.empty();
if (lex->select_lex.group_list_ptrs)
lex->select_lex.group_list_ptrs->clear();
lex->describe= 0; lex->describe= 0;
lex->subqueries= FALSE; lex->subqueries= FALSE;
lex->context_analysis_only= 0; lex->context_analysis_only= 0;
@ -1881,6 +1883,8 @@ void st_select_lex::init_select()
sj_nests.empty(); sj_nests.empty();
sj_subselects.empty(); sj_subselects.empty();
group_list.empty(); group_list.empty();
if (group_list_ptrs)
group_list_ptrs->clear();
type= db= 0; type= db= 0;
having= 0; having= 0;
table_join_options= 0; table_join_options= 0;
@ -3288,6 +3292,8 @@ static void fix_prepare_info_in_table_list(THD *thd, TABLE_LIST *tbl)
The passed WHERE and HAVING are to be saved for the future executions. The passed WHERE and HAVING are to be saved for the future executions.
This function saves it, and returns a copy which can be thrashed during This function saves it, and returns a copy which can be thrashed during
this execution of the statement. By saving/thrashing here we mean only this execution of the statement. By saving/thrashing here we mean only
We also save the chain of ORDER::next in group_list, in case
the list is modified by remove_const().
AND/OR trees. AND/OR trees.
The function also calls fix_prepare_info_in_table_list that saves all The function also calls fix_prepare_info_in_table_list that saves all
ON expressions. ON expressions.
@ -3299,6 +3305,19 @@ void st_select_lex::fix_prepare_information(THD *thd, Item **conds,
if (!thd->stmt_arena->is_conventional() && first_execution) if (!thd->stmt_arena->is_conventional() && first_execution)
{ {
first_execution= 0; first_execution= 0;
if (group_list.first)
{
if (!group_list_ptrs)
{
void *mem= thd->stmt_arena->alloc(sizeof(Group_list_ptrs));
group_list_ptrs= new (mem) Group_list_ptrs(thd->stmt_arena->mem_root);
}
group_list_ptrs->reserve(group_list.elements);
for (ORDER *order= group_list.first; order; order= order->next)
{
group_list_ptrs->push_back(order);
}
}
if (*conds) if (*conds)
{ {
thd->check_and_register_item_tree(&prep_where, conds); thd->check_and_register_item_tree(&prep_where, conds);
@ -4187,3 +4206,7 @@ void binlog_unsafe_map_init()
BINLOG_DIRECT_OFF & TRX_CACHE_NOT_EMPTY); BINLOG_DIRECT_OFF & TRX_CACHE_NOT_EMPTY);
} }
#endif #endif
#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
template class Mem_root_array<ORDER*, true>;
#endif

View File

@ -24,6 +24,7 @@
#include "sql_trigger.h" #include "sql_trigger.h"
#include "item.h" /* From item_subselect.h: subselect_union_engine */ #include "item.h" /* From item_subselect.h: subselect_union_engine */
#include "thr_lock.h" /* thr_lock_type, TL_UNLOCK */ #include "thr_lock.h" /* thr_lock_type, TL_UNLOCK */
#include "mem_root_array.h"
/* YACC and LEX Definitions */ /* YACC and LEX Definitions */
@ -261,6 +262,7 @@ enum enum_drop_mode
#define TL_OPTION_ALIAS 8 #define TL_OPTION_ALIAS 8
typedef List<Item> List_item; typedef List<Item> List_item;
typedef Mem_root_array<ORDER*, true> Group_list_ptrs;
/* SERVERS CACHE CHANGES */ /* SERVERS CACHE CHANGES */
typedef struct st_lex_server_options typedef struct st_lex_server_options
@ -735,7 +737,16 @@ public:
enum olap_type olap; enum olap_type olap;
/* FROM clause - points to the beginning of the TABLE_LIST::next_local list. */ /* FROM clause - points to the beginning of the TABLE_LIST::next_local list. */
SQL_I_List<TABLE_LIST> table_list; SQL_I_List<TABLE_LIST> table_list;
SQL_I_List<ORDER> group_list; /* GROUP BY clause. */
/*
GROUP BY clause.
This list may be mutated during optimization (by remove_const()),
so for prepared statements, we keep a copy of the ORDER.next pointers in
group_list_ptrs, and re-establish the original list before each execution.
*/
SQL_I_List<ORDER> group_list;
Group_list_ptrs *group_list_ptrs;
List<Item> item_list; /* list of fields & expressions */ List<Item> item_list; /* list of fields & expressions */
List<String> interval_list; List<String> interval_list;
bool is_item_list_lookup; bool is_item_list_lookup;
@ -941,7 +952,8 @@ public:
bool test_limit(); bool test_limit();
friend void lex_start(THD *thd); friend void lex_start(THD *thd);
st_select_lex() : n_sum_items(0), n_child_sum_items(0) {} st_select_lex() : group_list_ptrs(NULL), n_sum_items(0), n_child_sum_items(0)
{}
void make_empty_select() void make_empty_select()
{ {
init_query(); init_query();

View File

@ -888,7 +888,7 @@ static bool insert_params_with_log(Prepared_statement *stmt, uchar *null_array,
*/ */
else if (! is_param_long_data_type(param)) else if (! is_param_long_data_type(param))
DBUG_RETURN(1); DBUG_RETURN(1);
res= param->query_val_str(&str); res= param->query_val_str(thd, &str);
if (param->convert_str_value(thd)) if (param->convert_str_value(thd))
DBUG_RETURN(1); /* out of memory */ DBUG_RETURN(1); /* out of memory */
@ -1062,7 +1062,7 @@ static bool emb_insert_params_with_log(Prepared_statement *stmt,
DBUG_RETURN(1); DBUG_RETURN(1);
} }
} }
res= param->query_val_str(&str); res= param->query_val_str(thd, &str);
if (param->convert_str_value(thd)) if (param->convert_str_value(thd))
DBUG_RETURN(1); /* out of memory */ DBUG_RETURN(1); /* out of memory */
@ -1208,7 +1208,7 @@ static bool insert_params_from_vars_with_log(Prepared_statement *stmt,
setup_one_conversion_function(thd, param, param->param_type); setup_one_conversion_function(thd, param, param->param_type);
if (param->set_from_user_var(thd, entry)) if (param->set_from_user_var(thd, entry))
DBUG_RETURN(1); DBUG_RETURN(1);
val= param->query_val_str(&buf); val= param->query_val_str(thd, &buf);
if (param->convert_str_value(thd)) if (param->convert_str_value(thd))
DBUG_RETURN(1); /* out of memory */ DBUG_RETURN(1); /* out of memory */
@ -2512,6 +2512,14 @@ void reinit_stmt_before_use(THD *thd, LEX *lex)
DBUG_ASSERT(sl->join == 0); DBUG_ASSERT(sl->join == 0);
ORDER *order; ORDER *order;
/* Fix GROUP list */ /* Fix GROUP list */
if (sl->group_list_ptrs && sl->group_list_ptrs->size() > 0)
{
for (uint ix= 0; ix < sl->group_list_ptrs->size() - 1; ++ix)
{
order= sl->group_list_ptrs->at(ix);
order->next= sl->group_list_ptrs->at(ix+1);
}
}
for (order= sl->group_list.first; order; order= order->next) for (order= sl->group_list.first; order; order= order->next)
order->item= &order->item_ptr; order->item= &order->item_ptr;
/* Fix ORDER list */ /* Fix ORDER list */

View File

@ -5254,7 +5254,8 @@ bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table,
(sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) == 0) (sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) == 0)
{ {
restore_record(table, s->default_values); restore_record(table, s->default_values);
if (!wild || !wild[0] || !wild_compare(sp_name.c_ptr_safe(), wild, 0)) if (!wild || !wild[0] || !wild_case_compare(system_charset_info,
sp_name.c_ptr_safe(), wild))
{ {
int enum_idx= (int) proc_table->field[MYSQL_PROC_FIELD_ACCESS]->val_int(); int enum_idx= (int) proc_table->field[MYSQL_PROC_FIELD_ACCESS]->val_int();
table->field[3]->store(sp_name.ptr(), sp_name.length(), cs); table->field[3]->store(sp_name.ptr(), sp_name.length(), cs);
@ -6414,7 +6415,7 @@ copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table)
DBUG_RETURN(1); DBUG_RETURN(1);
} }
if (!(!wild || !wild[0] || !wild_compare(et.name.str, wild, 0))) if (!(!wild || !wild[0] || !wild_case_compare(scs, et.name.str, wild)))
DBUG_RETURN(0); DBUG_RETURN(0);
/* /*

View File

@ -445,6 +445,13 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
view= lex->unlink_first_table(&link_to_local); view= lex->unlink_first_table(&link_to_local);
if (check_db_dir_existence(view->db))
{
my_error(ER_BAD_DB_ERROR, MYF(0), view->db);
res= TRUE;
goto err;
}
if (mode == VIEW_ALTER && fill_defined_view_parts(thd, view)) if (mode == VIEW_ALTER && fill_defined_view_parts(thd, view))
{ {
res= TRUE; res= TRUE;

View File

@ -2954,7 +2954,7 @@ static Sys_var_uint Sys_repl_report_port(
"port or if you have a special tunnel from the master or other clients " "port or if you have a special tunnel from the master or other clients "
"to the slave. If not sure, leave this option unset", "to the slave. If not sure, leave this option unset",
READ_ONLY GLOBAL_VAR(report_port), CMD_LINE(REQUIRED_ARG), READ_ONLY GLOBAL_VAR(report_port), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, UINT_MAX), DEFAULT(MYSQL_PORT), BLOCK_SIZE(1)); VALID_RANGE(0, UINT_MAX), DEFAULT(0), BLOCK_SIZE(1));
#endif #endif
static Sys_var_mybool Sys_keep_files_on_create( static Sys_var_mybool Sys_keep_files_on_create(

View File

@ -390,7 +390,7 @@ bool trans_savepoint(THD *thd, LEX_STRING name)
DBUG_RETURN(FALSE); DBUG_RETURN(FALSE);
enum xa_states xa_state= thd->transaction.xid_state.xa_state; enum xa_states xa_state= thd->transaction.xid_state.xa_state;
if (xa_state != XA_NOTR) if (xa_state != XA_NOTR && xa_state != XA_ACTIVE)
{ {
my_error(ER_XAER_RMFAIL, MYF(0), xa_state_names[xa_state]); my_error(ER_XAER_RMFAIL, MYF(0), xa_state_names[xa_state]);
DBUG_RETURN(TRUE); DBUG_RETURN(TRUE);

View File

@ -1012,45 +1012,49 @@ btr_page_alloc(
/**************************************************************//** /**************************************************************//**
Gets the number of pages in a B-tree. Gets the number of pages in a B-tree.
@return number of pages */ @return number of pages, or ULINT_UNDEFINED if the index is unavailable */
UNIV_INTERN UNIV_INTERN
ulint ulint
btr_get_size( btr_get_size(
/*=========*/ /*=========*/
dict_index_t* index, /*!< in: index */ dict_index_t* index, /*!< in: index */
ulint flag) /*!< in: BTR_N_LEAF_PAGES or BTR_TOTAL_SIZE */ ulint flag, /*!< in: BTR_N_LEAF_PAGES or BTR_TOTAL_SIZE */
mtr_t* mtr) /*!< in/out: mini-transaction where index
is s-latched */
{ {
fseg_header_t* seg_header; fseg_header_t* seg_header;
page_t* root; page_t* root;
ulint n; ulint n;
ulint dummy; ulint dummy;
mtr_t mtr;
mtr_start(&mtr); ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index),
MTR_MEMO_S_LOCK));
mtr_s_lock(dict_index_get_lock(index), &mtr); if (index->page == FIL_NULL
|| index->to_be_dropped
|| *index->name == TEMP_INDEX_PREFIX) {
return(ULINT_UNDEFINED);
}
root = btr_root_get(index, &mtr); root = btr_root_get(index, mtr);
if (flag == BTR_N_LEAF_PAGES) { if (flag == BTR_N_LEAF_PAGES) {
seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_LEAF; seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_LEAF;
fseg_n_reserved_pages(seg_header, &n, &mtr); fseg_n_reserved_pages(seg_header, &n, mtr);
} else if (flag == BTR_TOTAL_SIZE) { } else if (flag == BTR_TOTAL_SIZE) {
seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_TOP; seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_TOP;
n = fseg_n_reserved_pages(seg_header, &dummy, &mtr); n = fseg_n_reserved_pages(seg_header, &dummy, mtr);
seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_LEAF; seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_LEAF;
n += fseg_n_reserved_pages(seg_header, &dummy, &mtr); n += fseg_n_reserved_pages(seg_header, &dummy, mtr);
} else { } else {
ut_error; ut_error;
} }
mtr_commit(&mtr);
return(n); return(n);
} }

View File

@ -1,6 +1,6 @@
/***************************************************************************** /*****************************************************************************
Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
@ -133,6 +133,8 @@ btr_pcur_store_position(
ut_a(btr_page_get_next(page, mtr) == FIL_NULL); ut_a(btr_page_get_next(page, mtr) == FIL_NULL);
ut_a(btr_page_get_prev(page, mtr) == FIL_NULL); ut_a(btr_page_get_prev(page, mtr) == FIL_NULL);
ut_ad(page_is_leaf(page));
ut_ad(page_get_page_no(page) == index->page);
cursor->old_stored = BTR_PCUR_OLD_STORED; cursor->old_stored = BTR_PCUR_OLD_STORED;
@ -325,13 +327,19 @@ btr_pcur_restore_position_func(
/* Save the old search mode of the cursor */ /* Save the old search mode of the cursor */
old_mode = cursor->search_mode; old_mode = cursor->search_mode;
if (UNIV_LIKELY(cursor->rel_pos == BTR_PCUR_ON)) { switch (cursor->rel_pos) {
case BTR_PCUR_ON:
mode = PAGE_CUR_LE; mode = PAGE_CUR_LE;
} else if (cursor->rel_pos == BTR_PCUR_AFTER) { break;
case BTR_PCUR_AFTER:
mode = PAGE_CUR_G; mode = PAGE_CUR_G;
} else { break;
ut_ad(cursor->rel_pos == BTR_PCUR_BEFORE); case BTR_PCUR_BEFORE:
mode = PAGE_CUR_L; mode = PAGE_CUR_L;
break;
default:
ut_error;
mode = 0;
} }
btr_pcur_open_with_no_init_func(index, tuple, mode, latch_mode, btr_pcur_open_with_no_init_func(index, tuple, mode, latch_mode,
@ -340,25 +348,44 @@ btr_pcur_restore_position_func(
/* Restore the old search mode */ /* Restore the old search mode */
cursor->search_mode = old_mode; cursor->search_mode = old_mode;
if (cursor->rel_pos == BTR_PCUR_ON if (btr_pcur_is_on_user_rec(cursor)) {
&& btr_pcur_is_on_user_rec(cursor) switch (cursor->rel_pos) {
&& 0 == cmp_dtuple_rec(tuple, btr_pcur_get_rec(cursor), case BTR_PCUR_ON:
rec_get_offsets( if (!cmp_dtuple_rec(
btr_pcur_get_rec(cursor), index, tuple, btr_pcur_get_rec(cursor),
NULL, ULINT_UNDEFINED, &heap))) { rec_get_offsets(btr_pcur_get_rec(cursor),
index, NULL,
ULINT_UNDEFINED, &heap))) {
/* We have to store the NEW value for the modify clock, since /* We have to store the NEW value for
the cursor can now be on a different page! But we can retain the modify clock, since the cursor can
the value of old_rec */ now be on a different page! But we can
retain the value of old_rec */
cursor->block_when_stored = btr_pcur_get_block(cursor); cursor->block_when_stored =
cursor->modify_clock = buf_block_get_modify_clock( btr_pcur_get_block(cursor);
cursor->block_when_stored); cursor->modify_clock =
cursor->old_stored = BTR_PCUR_OLD_STORED; buf_block_get_modify_clock(
cursor->block_when_stored);
cursor->old_stored = BTR_PCUR_OLD_STORED;
mem_heap_free(heap); mem_heap_free(heap);
return(TRUE); return(TRUE);
}
break;
case BTR_PCUR_BEFORE:
page_cur_move_to_next(btr_pcur_get_page_cur(cursor));
break;
case BTR_PCUR_AFTER:
page_cur_move_to_prev(btr_pcur_get_page_cur(cursor));
break;
#ifdef UNIV_DEBUG
default:
ut_error;
#endif /* UNIV_DEBUG */
}
} }
mem_heap_free(heap); mem_heap_free(heap);

View File

@ -2461,7 +2461,7 @@ wait_until_unfixed:
block->page.buf_fix_count = 1; block->page.buf_fix_count = 1;
buf_block_set_io_fix(block, BUF_IO_READ); buf_block_set_io_fix(block, BUF_IO_READ);
rw_lock_x_lock_func(&block->lock, 0, file, line); rw_lock_x_lock_inline(&block->lock, 0, file, line);
UNIV_MEM_INVALID(bpage, sizeof *bpage); UNIV_MEM_INVALID(bpage, sizeof *bpage);
@ -2601,14 +2601,14 @@ wait_until_unfixed:
break; break;
case RW_S_LATCH: case RW_S_LATCH:
rw_lock_s_lock_func(&(block->lock), 0, file, line); rw_lock_s_lock_inline(&(block->lock), 0, file, line);
fix_type = MTR_MEMO_PAGE_S_FIX; fix_type = MTR_MEMO_PAGE_S_FIX;
break; break;
default: default:
ut_ad(rw_latch == RW_X_LATCH); ut_ad(rw_latch == RW_X_LATCH);
rw_lock_x_lock_func(&(block->lock), 0, file, line); rw_lock_x_lock_inline(&(block->lock), 0, file, line);
fix_type = MTR_MEMO_PAGE_X_FIX; fix_type = MTR_MEMO_PAGE_X_FIX;
break; break;
@ -2688,8 +2688,8 @@ buf_page_optimistic_get(
file, line); file, line);
fix_type = MTR_MEMO_PAGE_S_FIX; fix_type = MTR_MEMO_PAGE_S_FIX;
} else { } else {
success = rw_lock_x_lock_func_nowait(&(block->lock), success = rw_lock_x_lock_func_nowait_inline(&(block->lock),
file, line); file, line);
fix_type = MTR_MEMO_PAGE_X_FIX; fix_type = MTR_MEMO_PAGE_X_FIX;
} }
@ -2818,8 +2818,8 @@ buf_page_get_known_nowait(
file, line); file, line);
fix_type = MTR_MEMO_PAGE_S_FIX; fix_type = MTR_MEMO_PAGE_S_FIX;
} else { } else {
success = rw_lock_x_lock_func_nowait(&(block->lock), success = rw_lock_x_lock_func_nowait_inline(&(block->lock),
file, line); file, line);
fix_type = MTR_MEMO_PAGE_X_FIX; fix_type = MTR_MEMO_PAGE_X_FIX;
} }
@ -2906,8 +2906,8 @@ buf_page_try_get_func(
S-latch. */ S-latch. */
fix_type = MTR_MEMO_PAGE_X_FIX; fix_type = MTR_MEMO_PAGE_X_FIX;
success = rw_lock_x_lock_func_nowait(&block->lock, success = rw_lock_x_lock_func_nowait_inline(&block->lock,
file, line); file, line);
} }
if (!success) { if (!success) {
@ -4750,34 +4750,32 @@ buf_all_freed(void)
/*********************************************************************//** /*********************************************************************//**
Checks that there currently are no pending i/o-operations for the buffer Checks that there currently are no pending i/o-operations for the buffer
pool. pool.
@return TRUE if there is no pending i/o */ @return number of pending i/o */
UNIV_INTERN UNIV_INTERN
ibool ulint
buf_pool_check_no_pending_io(void) buf_pool_check_num_pending_io(void)
/*==============================*/ /*===============================*/
{ {
ulint i; ulint i;
ibool ret = TRUE; ulint pending_io = 0;
buf_pool_mutex_enter_all(); buf_pool_mutex_enter_all();
for (i = 0; i < srv_buf_pool_instances && ret; i++) { for (i = 0; i < srv_buf_pool_instances; i++) {
const buf_pool_t* buf_pool; const buf_pool_t* buf_pool;
buf_pool = buf_pool_from_array(i); buf_pool = buf_pool_from_array(i);
if (buf_pool->n_pend_reads pending_io += buf_pool->n_pend_reads
+ buf_pool->n_flush[BUF_FLUSH_LRU] + buf_pool->n_flush[BUF_FLUSH_LRU]
+ buf_pool->n_flush[BUF_FLUSH_LIST] + buf_pool->n_flush[BUF_FLUSH_LIST]
+ buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE]) { + buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE];
ret = FALSE;
}
} }
buf_pool_mutex_exit_all(); buf_pool_mutex_exit_all();
return(ret); return(pending_io);
} }
#if 0 #if 0

View File

@ -334,40 +334,276 @@ next_page:
ut_free(page_arr); ut_free(page_arr);
} }
/******************************************************************//**
While flushing (or removing dirty) pages from a tablespace we don't
want to hog the CPU and resources. Release the buffer pool and block
mutex and try to force a context switch. Then reacquire the same mutexes.
The current page is "fixed" before the release of the mutexes and then
"unfixed" again once we have reacquired the mutexes. */
static
void
buf_flush_yield(
/*============*/
buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */
buf_page_t* bpage) /*!< in/out: current page */
{
mutex_t* block_mutex;
ut_ad(buf_pool_mutex_own(buf_pool));
ut_ad(buf_page_in_file(bpage));
block_mutex = buf_page_get_mutex(bpage);
mutex_enter(block_mutex);
/* "Fix" the block so that the position cannot be
changed after we release the buffer pool and
block mutexes. */
buf_page_set_sticky(bpage);
/* Now it is safe to release the buf_pool->mutex. */
buf_pool_mutex_exit(buf_pool);
mutex_exit(block_mutex);
/* Try and force a context switch. */
os_thread_yield();
buf_pool_mutex_enter(buf_pool);
mutex_enter(block_mutex);
/* "Unfix" the block now that we have both the
buffer pool and block mutex again. */
buf_page_unset_sticky(bpage);
mutex_exit(block_mutex);
}
/******************************************************************//**
If we have hogged the resources for too long then release the buffer
pool and flush list mutex and do a thread yield. Set the current page
to "sticky" so that it is not relocated during the yield.
@return TRUE if yielded */
static
ibool
buf_flush_try_yield(
/*================*/
buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */
buf_page_t* bpage, /*!< in/out: bpage to remove */
ulint processed) /*!< in: number of pages processed */
{
/* Every BUF_LRU_DROP_SEARCH_SIZE iterations in the
loop we release buf_pool->mutex to let other threads
do their job but only if the block is not IO fixed. This
ensures that the block stays in its position in the
flush_list. */
if (bpage != NULL
&& processed >= BUF_LRU_DROP_SEARCH_SIZE
&& buf_page_get_io_fix(bpage) == BUF_IO_NONE) {
buf_flush_list_mutex_exit(buf_pool);
/* Release the buffer pool and block mutex
to give the other threads a go. */
buf_flush_yield(buf_pool, bpage);
buf_flush_list_mutex_enter(buf_pool);
/* Should not have been removed from the flush
list during the yield. However, this check is
not sufficient to catch a remove -> add. */
ut_ad(bpage->in_flush_list);
return(TRUE);
}
return(FALSE);
}
/******************************************************************//**
Removes a single page from a given tablespace inside a specific
buffer pool instance.
@return TRUE if page was removed. */
static
ibool
buf_flush_or_remove_page(
/*=====================*/
buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */
buf_page_t* bpage) /*!< in/out: bpage to remove */
{
mutex_t* block_mutex;
ibool processed = FALSE;
ut_ad(buf_pool_mutex_own(buf_pool));
ut_ad(buf_flush_list_mutex_own(buf_pool));
block_mutex = buf_page_get_mutex(bpage);
/* bpage->space and bpage->io_fix are protected by
buf_pool->mutex and block_mutex. It is safe to check
them while holding buf_pool->mutex only. */
if (buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
/* We cannot remove this page during this scan
yet; maybe the system is currently reading it
in, or flushing the modifications to the file */
} else {
/* We have to release the flush_list_mutex to obey the
latching order. We are however guaranteed that the page
will stay in the flush_list because buf_flush_remove()
needs buf_pool->mutex as well (for the non-flush case). */
buf_flush_list_mutex_exit(buf_pool);
mutex_enter(block_mutex);
ut_ad(bpage->oldest_modification != 0);
if (bpage->buf_fix_count == 0) {
buf_flush_remove(bpage);
processed = TRUE;
}
mutex_exit(block_mutex);
buf_flush_list_mutex_enter(buf_pool);
}
ut_ad(!mutex_own(block_mutex));
return(processed);
}
/******************************************************************//** /******************************************************************//**
Remove all dirty pages belonging to a given tablespace inside a specific Remove all dirty pages belonging to a given tablespace inside a specific
buffer pool instance when we are deleting the data file(s) of that buffer pool instance when we are deleting the data file(s) of that
tablespace. The pages still remain a part of LRU and are evicted from tablespace. The pages still remain a part of LRU and are evicted from
the list as they age towards the tail of the LRU. */ the list as they age towards the tail of the LRU.
@return TRUE if all freed. */
static
ibool
buf_flush_or_remove_pages(
/*======================*/
buf_pool_t* buf_pool, /*!< buffer pool instance */
ulint id) /*!< in: target space id for which
to remove or flush pages */
{
buf_page_t* prev;
buf_page_t* bpage;
ulint processed = 0;
ibool all_freed = TRUE;
buf_flush_list_mutex_enter(buf_pool);
for (bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
bpage != NULL;
bpage = prev) {
ut_a(buf_page_in_file(bpage));
ut_ad(bpage->in_flush_list);
/* Save the previous link because once we free the
page we can't rely on the links. */
prev = UT_LIST_GET_PREV(list, bpage);
if (buf_page_get_space(bpage) != id) {
/* Skip this block, as it does not belong to
the target space. */
} else if (!buf_flush_or_remove_page(buf_pool, bpage)) {
/* Remove was unsuccessful, we have to try again
by scanning the entire list from the end. */
all_freed = FALSE;
}
++processed;
/* Yield if we have hogged the CPU and mutexes for too long. */
if (buf_flush_try_yield(buf_pool, prev, processed)) {
/* Reset the batch size counter if we had to yield. */
processed = 0;
}
}
buf_flush_list_mutex_exit(buf_pool);
return(all_freed);
}
/******************************************************************//**
Remove or flush all the dirty pages that belong to a given tablespace
inside a specific buffer pool instance. The pages will remain in the LRU
list and will be evicted from the LRU list as they age and move towards
the tail of the LRU list. */
static static
void void
buf_LRU_remove_dirty_pages_for_tablespace( buf_flush_dirty_pages(
/*======================================*/ /*==================*/
buf_pool_t* buf_pool, /*!< buffer pool instance */
ulint id) /*!< in: space id */
{
ibool all_freed;
do {
buf_pool_mutex_enter(buf_pool);
all_freed = buf_flush_or_remove_pages(buf_pool, id);
buf_pool_mutex_exit(buf_pool);
ut_ad(buf_flush_validate(buf_pool));
if (!all_freed) {
os_thread_sleep(20000);
}
} while (!all_freed);
}
/******************************************************************//**
Remove all pages that belong to a given tablespace inside a specific
buffer pool instance when we are DISCARDing the tablespace. */
static
void
buf_LRU_remove_all_pages(
/*=====================*/
buf_pool_t* buf_pool, /*!< buffer pool instance */ buf_pool_t* buf_pool, /*!< buffer pool instance */
ulint id) /*!< in: space id */ ulint id) /*!< in: space id */
{ {
buf_page_t* bpage; buf_page_t* bpage;
ibool all_freed; ibool all_freed;
ulint i;
scan_again: scan_again:
buf_pool_mutex_enter(buf_pool); buf_pool_mutex_enter(buf_pool);
buf_flush_list_mutex_enter(buf_pool);
all_freed = TRUE; all_freed = TRUE;
for (bpage = UT_LIST_GET_LAST(buf_pool->flush_list), i = 0; for (bpage = UT_LIST_GET_LAST(buf_pool->LRU);
bpage != NULL; ++i) { bpage != NULL;
/* No op */) {
buf_page_t* prev_bpage; buf_page_t* prev_bpage;
mutex_t* block_mutex = NULL; mutex_t* block_mutex = NULL;
ut_a(buf_page_in_file(bpage)); ut_a(buf_page_in_file(bpage));
ut_ad(bpage->in_LRU_list);
prev_bpage = UT_LIST_GET_PREV(list, bpage); prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
/* bpage->space and bpage->io_fix are protected by /* bpage->space and bpage->io_fix are protected by
buf_pool->mutex and block_mutex. It is safe to check buf_pool->mutex and the block_mutex. It is safe to check
them while holding buf_pool->mutex only. */ them while holding buf_pool->mutex only. */
if (buf_page_get_space(bpage) != id) { if (buf_page_get_space(bpage) != id) {
@ -381,83 +617,87 @@ scan_again:
all_freed = FALSE; all_freed = FALSE;
goto next_page; goto next_page;
} else {
block_mutex = buf_page_get_mutex(bpage);
mutex_enter(block_mutex);
if (bpage->buf_fix_count > 0) {
mutex_exit(block_mutex);
/* We cannot remove this page during
this scan yet; maybe the system is
currently reading it in, or flushing
the modifications to the file */
all_freed = FALSE;
goto next_page;
}
} }
/* We have to release the flush_list_mutex to obey the ut_ad(mutex_own(block_mutex));
latching order. We are however guaranteed that the page
will stay in the flush_list because buf_flush_remove() #ifdef UNIV_DEBUG
needs buf_pool->mutex as well. */ if (buf_debug_prints) {
buf_flush_list_mutex_exit(buf_pool); fprintf(stderr,
block_mutex = buf_page_get_mutex(bpage); "Dropping space %lu page %lu\n",
mutex_enter(block_mutex); (ulong) buf_page_get_space(bpage),
(ulong) buf_page_get_page_no(bpage));
}
#endif
if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) {
/* Do nothing, because the adaptive hash index
covers uncompressed pages only. */
} else if (((buf_block_t*) bpage)->index) {
ulint page_no;
ulint zip_size;
buf_pool_mutex_exit(buf_pool);
zip_size = buf_page_get_zip_size(bpage);
page_no = buf_page_get_page_no(bpage);
if (bpage->buf_fix_count > 0) {
mutex_exit(block_mutex); mutex_exit(block_mutex);
buf_flush_list_mutex_enter(buf_pool);
/* We cannot remove this page during /* Note that the following call will acquire
this scan yet; maybe the system is and release block->lock X-latch. */
currently reading it in, or flushing
the modifications to the file */
all_freed = FALSE; btr_search_drop_page_hash_when_freed(
goto next_page; id, zip_size, page_no);
goto scan_again;
} }
ut_ad(bpage->oldest_modification != 0); if (bpage->oldest_modification != 0) {
buf_flush_remove(bpage);
}
buf_flush_remove(bpage); ut_ad(!bpage->in_flush_list);
/* Remove from the LRU list. */
if (buf_LRU_block_remove_hashed_page(bpage, TRUE)
!= BUF_BLOCK_ZIP_FREE) {
buf_LRU_block_free_hashed_page((buf_block_t*) bpage);
mutex_exit(block_mutex);
} else {
/* The block_mutex should have been released
by buf_LRU_block_remove_hashed_page() when it
returns BUF_BLOCK_ZIP_FREE. */
ut_ad(block_mutex == &buf_pool->zip_mutex);
}
ut_ad(!mutex_own(block_mutex));
mutex_exit(block_mutex);
buf_flush_list_mutex_enter(buf_pool);
next_page: next_page:
bpage = prev_bpage; bpage = prev_bpage;
if (!bpage) {
break;
}
/* Every BUF_LRU_DROP_SEARCH_SIZE iterations in the
loop we release buf_pool->mutex to let other threads
do their job. */
if (i < BUF_LRU_DROP_SEARCH_SIZE) {
continue;
}
/* We IO-fix the block to make sure that the block
stays in its position in the flush_list. */
if (buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
/* Block is already IO-fixed. We don't
want to change the value. Lets leave
this block alone. */
continue;
}
buf_flush_list_mutex_exit(buf_pool);
block_mutex = buf_page_get_mutex(bpage);
mutex_enter(block_mutex);
buf_page_set_sticky(bpage);
mutex_exit(block_mutex);
/* Now it is safe to release the buf_pool->mutex. */
buf_pool_mutex_exit(buf_pool);
os_thread_yield();
buf_pool_mutex_enter(buf_pool);
mutex_enter(block_mutex);
buf_page_unset_sticky(bpage);
mutex_exit(block_mutex);
buf_flush_list_mutex_enter(buf_pool);
ut_ad(bpage->in_flush_list);
i = 0;
} }
buf_pool_mutex_exit(buf_pool); buf_pool_mutex_exit(buf_pool);
buf_flush_list_mutex_exit(buf_pool);
ut_ad(buf_flush_validate(buf_pool));
if (!all_freed) { if (!all_freed) {
os_thread_sleep(20000); os_thread_sleep(20000);
@ -467,28 +707,46 @@ next_page:
} }
/******************************************************************//** /******************************************************************//**
Invalidates all pages belonging to a given tablespace when we are deleting Removes all pages belonging to a given tablespace. */
the data file(s) of that tablespace. */
UNIV_INTERN UNIV_INTERN
void void
buf_LRU_invalidate_tablespace( buf_LRU_flush_or_remove_pages(
/*==========================*/ /*==========================*/
ulint id) /*!< in: space id */ ulint id, /*!< in: space id */
enum buf_remove_t buf_remove)/*!< in: remove or flush
strategy */
{ {
ulint i; ulint i;
/* Before we attempt to drop pages one by one we first
attempt to drop page hash index entries in batches to make
it more efficient. The batching attempt is a best effort
attempt and does not guarantee that all pages hash entries
will be dropped. We get rid of remaining page hash entries
one by one below. */
for (i = 0; i < srv_buf_pool_instances; i++) { for (i = 0; i < srv_buf_pool_instances; i++) {
buf_pool_t* buf_pool; buf_pool_t* buf_pool;
buf_pool = buf_pool_from_array(i); buf_pool = buf_pool_from_array(i);
buf_LRU_drop_page_hash_for_tablespace(buf_pool, id);
buf_LRU_remove_dirty_pages_for_tablespace(buf_pool, id); switch (buf_remove) {
case BUF_REMOVE_ALL_NO_WRITE:
/* A DISCARD tablespace case. Remove AHI entries
and evict all pages from LRU. */
/* Before we attempt to drop pages hash entries
one by one we first attempt to drop page hash
index entries in batches to make it more
efficient. The batching attempt is a best effort
attempt and does not guarantee that all pages
hash entries will be dropped. We get rid of
remaining page hash entries one by one below. */
buf_LRU_drop_page_hash_for_tablespace(buf_pool, id);
buf_LRU_remove_all_pages(buf_pool, id);
break;
case BUF_REMOVE_FLUSH_NO_WRITE:
/* A DROP table case. AHI entries are already
removed. No need to evict all pages from LRU
list. Just evict pages from flush list without
writing. */
buf_flush_dirty_pages(buf_pool, id);
break;
}
} }
} }

View File

@ -4337,16 +4337,27 @@ dict_update_statistics(
(srv_force_recovery < SRV_FORCE_NO_IBUF_MERGE (srv_force_recovery < SRV_FORCE_NO_IBUF_MERGE
|| (srv_force_recovery < SRV_FORCE_NO_LOG_REDO || (srv_force_recovery < SRV_FORCE_NO_LOG_REDO
&& dict_index_is_clust(index)))) { && dict_index_is_clust(index)))) {
mtr_t mtr;
ulint size; ulint size;
size = btr_get_size(index, BTR_TOTAL_SIZE);
index->stat_index_size = size; mtr_start(&mtr);
mtr_s_lock(dict_index_get_lock(index), &mtr);
sum_of_index_sizes += size; size = btr_get_size(index, BTR_TOTAL_SIZE, &mtr);
size = btr_get_size(index, BTR_N_LEAF_PAGES); if (size != ULINT_UNDEFINED) {
sum_of_index_sizes += size;
index->stat_index_size = size;
size = btr_get_size(
index, BTR_N_LEAF_PAGES, &mtr);
}
if (size == 0) { mtr_commit(&mtr);
switch (size) {
case ULINT_UNDEFINED:
goto fake_statistics;
case 0:
/* The root node of the tree is a leaf */ /* The root node of the tree is a leaf */
size = 1; size = 1;
} }
@ -4363,6 +4374,7 @@ dict_update_statistics(
various means, also via secondary indexes. */ various means, also via secondary indexes. */
ulint i; ulint i;
fake_statistics:
sum_of_index_sizes++; sum_of_index_sizes++;
index->stat_index_size = index->stat_n_leaf_pages = 1; index->stat_index_size = index->stat_n_leaf_pages = 1;

View File

@ -185,7 +185,7 @@ struct fil_space_struct {
.ibd file of tablespace and want to .ibd file of tablespace and want to
stop temporarily posting of new i/o stop temporarily posting of new i/o
requests on the file */ requests on the file */
ibool stop_ibuf_merges; ibool stop_new_ops;
/*!< we set this TRUE when we start /*!< we set this TRUE when we start
deleting a single-table tablespace */ deleting a single-table tablespace */
ibool is_being_deleted; ibool is_being_deleted;
@ -210,12 +210,13 @@ struct fil_space_struct {
ulint n_pending_flushes; /*!< this is positive when flushing ulint n_pending_flushes; /*!< this is positive when flushing
the tablespace to disk; dropping of the the tablespace to disk; dropping of the
tablespace is forbidden if this is positive */ tablespace is forbidden if this is positive */
ulint n_pending_ibuf_merges;/*!< this is positive ulint n_pending_ops;/*!< this is positive when we
when merging insert buffer entries to have pending operations against this
a page so that we may need to access tablespace. The pending operations can
the ibuf bitmap page in the be ibuf merges or lock validation code
tablespade: dropping of the tablespace trying to read a block.
is forbidden if this is positive */ Dropping of the tablespace is forbidden
if this is positive */
hash_node_t hash; /*!< hash chain node */ hash_node_t hash; /*!< hash chain node */
hash_node_t name_hash;/*!< hash chain the name_hash table */ hash_node_t name_hash;/*!< hash chain the name_hash table */
#ifndef UNIV_HOTBACKUP #ifndef UNIV_HOTBACKUP
@ -1282,7 +1283,7 @@ try_again:
} }
space->stop_ios = FALSE; space->stop_ios = FALSE;
space->stop_ibuf_merges = FALSE; space->stop_new_ops = FALSE;
space->is_being_deleted = FALSE; space->is_being_deleted = FALSE;
space->purpose = purpose; space->purpose = purpose;
space->size = 0; space->size = 0;
@ -1291,7 +1292,7 @@ try_again:
space->n_reserved_extents = 0; space->n_reserved_extents = 0;
space->n_pending_flushes = 0; space->n_pending_flushes = 0;
space->n_pending_ibuf_merges = 0; space->n_pending_ops = 0;
UT_LIST_INIT(space->chain); UT_LIST_INIT(space->chain);
space->magic_n = FIL_SPACE_MAGIC_N; space->magic_n = FIL_SPACE_MAGIC_N;
@ -1891,13 +1892,12 @@ fil_read_first_page(
#ifndef UNIV_HOTBACKUP #ifndef UNIV_HOTBACKUP
/*******************************************************************//** /*******************************************************************//**
Increments the count of pending insert buffer page merges, if space is not Increments the count of pending operation, if space is not being deleted.
being deleted. @return TRUE if being deleted, and operation should be skipped */
@return TRUE if being deleted, and ibuf merges should be skipped */
UNIV_INTERN UNIV_INTERN
ibool ibool
fil_inc_pending_ibuf_merges( fil_inc_pending_ops(
/*========================*/ /*================*/
ulint id) /*!< in: space id */ ulint id) /*!< in: space id */
{ {
fil_space_t* space; fil_space_t* space;
@ -1913,13 +1913,13 @@ fil_inc_pending_ibuf_merges(
(ulong) id); (ulong) id);
} }
if (space == NULL || space->stop_ibuf_merges) { if (space == NULL || space->stop_new_ops) {
mutex_exit(&fil_system->mutex); mutex_exit(&fil_system->mutex);
return(TRUE); return(TRUE);
} }
space->n_pending_ibuf_merges++; space->n_pending_ops++;
mutex_exit(&fil_system->mutex); mutex_exit(&fil_system->mutex);
@ -1927,11 +1927,11 @@ fil_inc_pending_ibuf_merges(
} }
/*******************************************************************//** /*******************************************************************//**
Decrements the count of pending insert buffer page merges. */ Decrements the count of pending operations. */
UNIV_INTERN UNIV_INTERN
void void
fil_decr_pending_ibuf_merges( fil_decr_pending_ops(
/*=========================*/ /*=================*/
ulint id) /*!< in: space id */ ulint id) /*!< in: space id */
{ {
fil_space_t* space; fil_space_t* space;
@ -1942,13 +1942,13 @@ fil_decr_pending_ibuf_merges(
if (space == NULL) { if (space == NULL) {
fprintf(stderr, fprintf(stderr,
"InnoDB: Error: decrementing ibuf merge of a" "InnoDB: Error: decrementing pending operation"
" dropped tablespace %lu\n", " of a dropped tablespace %lu\n",
(ulong) id); (ulong) id);
} }
if (space != NULL) { if (space != NULL) {
space->n_pending_ibuf_merges--; space->n_pending_ops--;
} }
mutex_exit(&fil_system->mutex); mutex_exit(&fil_system->mutex);
@ -2159,7 +2159,7 @@ fil_op_log_parse_or_replay(
switch (type) { switch (type) {
case MLOG_FILE_DELETE: case MLOG_FILE_DELETE:
if (fil_tablespace_exists_in_mem(space_id)) { if (fil_tablespace_exists_in_mem(space_id)) {
ut_a(fil_delete_tablespace(space_id)); ut_a(fil_delete_tablespace(space_id, TRUE));
} }
break; break;
@ -2229,7 +2229,9 @@ UNIV_INTERN
ibool ibool
fil_delete_tablespace( fil_delete_tablespace(
/*==================*/ /*==================*/
ulint id) /*!< in: space id */ ulint id, /*!< in: space id */
ibool evict_all) /*!< in: TRUE if we want all pages
evicted from LRU. */
{ {
ibool success; ibool success;
fil_space_t* space; fil_space_t* space;
@ -2238,15 +2240,15 @@ fil_delete_tablespace(
char* path; char* path;
ut_a(id != 0); ut_a(id != 0);
stop_ibuf_merges: stop_new_ops:
mutex_enter(&fil_system->mutex); mutex_enter(&fil_system->mutex);
space = fil_space_get_by_id(id); space = fil_space_get_by_id(id);
if (space != NULL) { if (space != NULL) {
space->stop_ibuf_merges = TRUE; space->stop_new_ops = TRUE;
if (space->n_pending_ibuf_merges == 0) { if (space->n_pending_ops == 0) {
mutex_exit(&fil_system->mutex); mutex_exit(&fil_system->mutex);
count = 0; count = 0;
@ -2260,9 +2262,10 @@ stop_ibuf_merges:
ut_print_filename(stderr, space->name); ut_print_filename(stderr, space->name);
fprintf(stderr, ",\n" fprintf(stderr, ",\n"
"InnoDB: but there are %lu pending" "InnoDB: but there are %lu pending"
" ibuf merges on it.\n" " operations (most likely ibuf merges)"
" on it.\n"
"InnoDB: Loop %lu.\n", "InnoDB: Loop %lu.\n",
(ulong) space->n_pending_ibuf_merges, (ulong) space->n_pending_ops,
(ulong) count); (ulong) count);
} }
@ -2271,7 +2274,7 @@ stop_ibuf_merges:
os_thread_sleep(20000); os_thread_sleep(20000);
count++; count++;
goto stop_ibuf_merges; goto stop_new_ops;
} }
} }
@ -2297,7 +2300,7 @@ try_again:
} }
ut_a(space); ut_a(space);
ut_a(space->n_pending_ibuf_merges == 0); ut_a(space->n_pending_ops == 0);
space->is_being_deleted = TRUE; space->is_being_deleted = TRUE;
@ -2350,7 +2353,10 @@ try_again:
completely and permanently. The flag is_being_deleted also prevents completely and permanently. The flag is_being_deleted also prevents
fil_flush() from being applied to this tablespace. */ fil_flush() from being applied to this tablespace. */
buf_LRU_invalidate_tablespace(id); buf_LRU_flush_or_remove_pages(
id, evict_all
? BUF_REMOVE_ALL_NO_WRITE
: BUF_REMOVE_FLUSH_NO_WRITE);
#endif #endif
/* printf("Deleting tablespace %s id %lu\n", space->name, id); */ /* printf("Deleting tablespace %s id %lu\n", space->name, id); */
@ -2438,7 +2444,7 @@ fil_discard_tablespace(
{ {
ibool success; ibool success;
success = fil_delete_tablespace(id); success = fil_delete_tablespace(id, TRUE);
if (!success) { if (!success) {
fprintf(stderr, fprintf(stderr,

View File

@ -211,15 +211,13 @@ fseg_n_reserved_pages_low(
/********************************************************************//** /********************************************************************//**
Marks a page used. The page must reside within the extents of the given Marks a page used. The page must reside within the extents of the given
segment. */ segment. */
static static __attribute__((nonnull))
void void
fseg_mark_page_used( fseg_mark_page_used(
/*================*/ /*================*/
fseg_inode_t* seg_inode,/*!< in: segment inode */ fseg_inode_t* seg_inode,/*!< in: segment inode */
ulint space, /*!< in: space id */
ulint zip_size,/*!< in: compressed page size in bytes
or 0 for uncompressed pages */
ulint page, /*!< in: page offset */ ulint page, /*!< in: page offset */
xdes_t* descr, /* extent descriptor */
mtr_t* mtr); /*!< in: mtr */ mtr_t* mtr); /*!< in: mtr */
/**********************************************************************//** /**********************************************************************//**
Returns the first extent descriptor for a segment. We think of the extent Returns the first extent descriptor for a segment. We think of the extent
@ -643,12 +641,10 @@ xdes_calc_descriptor_index(
/********************************************************************//** /********************************************************************//**
Gets pointer to a the extent descriptor of a page. The page where the extent Gets pointer to a the extent descriptor of a page. The page where the extent
descriptor resides is x-locked. If the page offset is equal to the free limit descriptor resides is x-locked. This function no longer extends the data
of the space, adds new extents from above the free limit to the space free file.
list, if not free limit == space size. This adding is necessary to make the
descriptor defined, as they are uninitialized above the free limit.
@return pointer to the extent descriptor, NULL if the page does not @return pointer to the extent descriptor, NULL if the page does not
exist in the space or if the offset exceeds the free limit */ exist in the space or if the offset is >= the free limit */
UNIV_INLINE __attribute__((nonnull, warn_unused_result)) UNIV_INLINE __attribute__((nonnull, warn_unused_result))
xdes_t* xdes_t*
xdes_get_descriptor_with_space_hdr( xdes_get_descriptor_with_space_hdr(
@ -678,19 +674,10 @@ xdes_get_descriptor_with_space_hdr(
zip_size = dict_table_flags_to_zip_size( zip_size = dict_table_flags_to_zip_size(
mach_read_from_4(sp_header + FSP_SPACE_FLAGS)); mach_read_from_4(sp_header + FSP_SPACE_FLAGS));
/* If offset is >= size or > limit, return NULL */ if ((offset >= size) || (offset >= limit)) {
if ((offset >= size) || (offset > limit)) {
return(NULL); return(NULL);
} }
/* If offset is == limit, fill free list of the space. */
if (offset == limit) {
fsp_fill_free_list(FALSE, space, sp_header, mtr);
}
descr_page_no = xdes_calc_descriptor_page(zip_size, offset); descr_page_no = xdes_calc_descriptor_page(zip_size, offset);
if (descr_page_no == 0) { if (descr_page_no == 0) {
@ -2811,7 +2798,7 @@ got_hinted_page:
ut_ad(xdes_get_bit(ret_descr, XDES_FREE_BIT, ut_ad(xdes_get_bit(ret_descr, XDES_FREE_BIT,
ret_page % FSP_EXTENT_SIZE, mtr) == TRUE); ret_page % FSP_EXTENT_SIZE, mtr) == TRUE);
fseg_mark_page_used(seg_inode, space, zip_size, ret_page, mtr); fseg_mark_page_used(seg_inode, ret_page, ret_descr, mtr);
} }
return(fsp_page_create( return(fsp_page_create(
@ -3217,27 +3204,21 @@ fsp_get_available_space_in_free_extents(
/********************************************************************//** /********************************************************************//**
Marks a page used. The page must reside within the extents of the given Marks a page used. The page must reside within the extents of the given
segment. */ segment. */
static static __attribute__((nonnull))
void void
fseg_mark_page_used( fseg_mark_page_used(
/*================*/ /*================*/
fseg_inode_t* seg_inode,/*!< in: segment inode */ fseg_inode_t* seg_inode,/*!< in: segment inode */
ulint space, /*!< in: space id */
ulint zip_size,/*!< in: compressed page size in bytes
or 0 for uncompressed pages */
ulint page, /*!< in: page offset */ ulint page, /*!< in: page offset */
xdes_t* descr, /* extent descriptor */
mtr_t* mtr) /*!< in: mtr */ mtr_t* mtr) /*!< in: mtr */
{ {
xdes_t* descr;
ulint not_full_n_used; ulint not_full_n_used;
ut_ad(seg_inode && mtr);
ut_ad(!((page_offset(seg_inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE)); ut_ad(!((page_offset(seg_inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE));
ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N) ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N)
== FSEG_MAGIC_N_VALUE); == FSEG_MAGIC_N_VALUE);
descr = xdes_get_descriptor(space, zip_size, page, mtr);
ut_ad(mtr_read_ulint(seg_inode + FSEG_ID, MLOG_4BYTES, mtr) ut_ad(mtr_read_ulint(seg_inode + FSEG_ID, MLOG_4BYTES, mtr)
== mtr_read_ulint(descr + XDES_ID, MLOG_4BYTES, mtr)); == mtr_read_ulint(descr + XDES_ID, MLOG_4BYTES, mtr));

View File

@ -1448,37 +1448,88 @@ values we want to reserve for multi-value inserts e.g.,
INSERT INTO T VALUES(), (), (); INSERT INTO T VALUES(), (), ();
innobase_next_autoinc() will be called with increment set to innobase_next_autoinc() will be called with increment set to 3 where
n * 3 where autoinc_lock_mode != TRADITIONAL because we want autoinc_lock_mode != TRADITIONAL because we want to reserve 3 values for
to reserve 3 values for the multi-value INSERT above. the multi-value INSERT above.
@return the next value */ @return the next value */
static static
ulonglong ulonglong
innobase_next_autoinc( innobase_next_autoinc(
/*==================*/ /*==================*/
ulonglong current, /*!< in: Current value */ ulonglong current, /*!< in: Current value */
ulonglong increment, /*!< in: increment current by */ ulonglong need, /*!< in: count of values needed */
ulonglong step, /*!< in: AUTOINC increment step */
ulonglong offset, /*!< in: AUTOINC offset */ ulonglong offset, /*!< in: AUTOINC offset */
ulonglong max_value, /*!< in: max value for type */ ulonglong max_value) /*!< in: max value for type */
ulonglong reserve) /*!< in: how many values to reserve */
{ {
ulonglong next_value; ulonglong next_value;
ulonglong block = need * step;
/* Should never be 0. */ /* Should never be 0. */
ut_a(increment > 0); ut_a(need > 0);
ut_a(block > 0);
ut_a(max_value > 0);
/* Current value should never be greater than the maximum. */
ut_a(current <= max_value);
/* According to MySQL documentation, if the offset is greater than /* According to MySQL documentation, if the offset is greater than
the increment then the offset is ignored. */ the step then the offset is ignored. */
if (offset >= increment) if (offset > block) {
offset = 0; offset = 0;
}
if (max_value <= current) /* Check for overflow. */
return max_value; if (block >= max_value
next_value = (current / increment) + reserve; || offset > max_value
next_value = next_value * increment + offset; || current == max_value
/* Check for overflow. */ || max_value - offset <= offset) {
if (next_value < current || next_value > max_value)
next_value = max_value; next_value = max_value;
} else {
ut_a(max_value > current);
ulonglong free = max_value - current;
if (free < offset || free - offset <= block) {
next_value = max_value;
} else {
next_value = 0;
}
}
if (next_value == 0) {
ulonglong next;
if (current > offset) {
next = (current - offset) / step;
} else {
next = (offset - current) / step;
}
ut_a(max_value > next);
next_value = next * step;
/* Check for multiplication overflow. */
ut_a(next_value >= next);
ut_a(max_value > next_value);
/* Check for overflow */
if (max_value - next_value >= block) {
next_value += block;
if (max_value - next_value >= offset) {
next_value += offset;
} else {
next_value = max_value;
}
} else {
next_value = max_value;
}
}
ut_a(next_value != 0);
ut_a(next_value <= max_value);
return(next_value); return(next_value);
} }
@ -3765,7 +3816,8 @@ ha_innobase::innobase_initialize_autoinc()
nor the offset, so use a default increment of 1. */ nor the offset, so use a default increment of 1. */
auto_inc = innobase_next_autoinc( auto_inc = innobase_next_autoinc(
read_auto_inc, 1, 1, col_max_value, 1); read_auto_inc, 1, 1, 0, col_max_value);
break; break;
} }
case DB_RECORD_NOT_FOUND: case DB_RECORD_NOT_FOUND:
@ -5238,15 +5290,16 @@ set_max_autoinc:
if (auto_inc <= col_max_value) { if (auto_inc <= col_max_value) {
ut_a(prebuilt->autoinc_increment > 0); ut_a(prebuilt->autoinc_increment > 0);
ulonglong need;
ulonglong offset; ulonglong offset;
ulonglong increment;
offset = prebuilt->autoinc_offset; offset = prebuilt->autoinc_offset;
need = prebuilt->autoinc_increment; increment = prebuilt->autoinc_increment;
auto_inc = innobase_next_autoinc( auto_inc = innobase_next_autoinc(
auto_inc, auto_inc,
need, offset, col_max_value, 1); 1, increment, offset,
col_max_value);
err = innobase_set_max_autoinc( err = innobase_set_max_autoinc(
auto_inc); auto_inc);
@ -5514,14 +5567,14 @@ ha_innobase::update_row(
if (auto_inc <= col_max_value && auto_inc != 0) { if (auto_inc <= col_max_value && auto_inc != 0) {
ulonglong need;
ulonglong offset; ulonglong offset;
ulonglong increment;
offset = prebuilt->autoinc_offset; offset = prebuilt->autoinc_offset;
need = prebuilt->autoinc_increment; increment = prebuilt->autoinc_increment;
auto_inc = innobase_next_autoinc( auto_inc = innobase_next_autoinc(
auto_inc, need, offset, col_max_value, 1); auto_inc, 1, increment, offset, col_max_value);
error = innobase_set_max_autoinc(auto_inc); error = innobase_set_max_autoinc(auto_inc);
} }
@ -6996,6 +7049,8 @@ ha_innobase::create(
DBUG_RETURN(HA_ERR_TO_BIG_ROW); DBUG_RETURN(HA_ERR_TO_BIG_ROW);
} }
ut_a(strlen(name) < sizeof(name2));
strcpy(name2, name); strcpy(name2, name);
normalize_table_name(norm_name, name2); normalize_table_name(norm_name, name2);
@ -10182,10 +10237,11 @@ ha_innobase::get_auto_increment(
ulonglong next_value; ulonglong next_value;
current = *first_value > col_max_value ? autoinc : *first_value; current = *first_value > col_max_value ? autoinc : *first_value;
/* Compute the last value in the interval */ /* Compute the last value in the interval */
next_value = innobase_next_autoinc( next_value = innobase_next_autoinc(
current, increment, offset, col_max_value, *nb_reserved_values); current, *nb_reserved_values, increment, offset,
col_max_value);
prebuilt->autoinc_last_value = next_value; prebuilt->autoinc_last_value = next_value;

View File

@ -1,6 +1,6 @@
/***************************************************************************** /*****************************************************************************
Copyright (c) 2005, 2010, Innobase Oy. All Rights Reserved. Copyright (c) 2005, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
@ -11,8 +11,8 @@ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59 Temple this program; if not, write to the Free Software Foundation, Inc.,
Place, Suite 330, Boston, MA 02111-1307 USA 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
*****************************************************************************/ *****************************************************************************/
@ -1143,7 +1143,9 @@ ha_innobase::prepare_drop_index(
goto func_exit; goto func_exit;
} }
rw_lock_x_lock(dict_index_get_lock(index));
index->to_be_dropped = TRUE; index->to_be_dropped = TRUE;
rw_lock_x_unlock(dict_index_get_lock(index));
} }
/* If FOREIGN_KEY_CHECKS = 1 you may not drop an index defined /* If FOREIGN_KEY_CHECKS = 1 you may not drop an index defined
@ -1262,7 +1264,9 @@ func_exit:
= dict_table_get_first_index(prebuilt->table); = dict_table_get_first_index(prebuilt->table);
do { do {
rw_lock_x_lock(dict_index_get_lock(index));
index->to_be_dropped = FALSE; index->to_be_dropped = FALSE;
rw_lock_x_unlock(dict_index_get_lock(index));
index = dict_table_get_next_index(index); index = dict_table_get_next_index(index);
} while (index); } while (index);
} }
@ -1322,7 +1326,9 @@ ha_innobase::final_drop_index(
for (index = dict_table_get_first_index(prebuilt->table); for (index = dict_table_get_first_index(prebuilt->table);
index; index = dict_table_get_next_index(index)) { index; index = dict_table_get_next_index(index)) {
rw_lock_x_lock(dict_index_get_lock(index));
index->to_be_dropped = FALSE; index->to_be_dropped = FALSE;
rw_lock_x_unlock(dict_index_get_lock(index));
} }
goto func_exit; goto func_exit;

View File

@ -4428,7 +4428,7 @@ ibuf_merge_or_delete_for_page(
function. When the counter is > 0, that prevents tablespace function. When the counter is > 0, that prevents tablespace
from being dropped. */ from being dropped. */
tablespace_being_deleted = fil_inc_pending_ibuf_merges(space); tablespace_being_deleted = fil_inc_pending_ops(space);
if (UNIV_UNLIKELY(tablespace_being_deleted)) { if (UNIV_UNLIKELY(tablespace_being_deleted)) {
/* Do not try to read the bitmap page from space; /* Do not try to read the bitmap page from space;
@ -4454,7 +4454,7 @@ ibuf_merge_or_delete_for_page(
/* No inserts buffered for this page */ /* No inserts buffered for this page */
if (!tablespace_being_deleted) { if (!tablespace_being_deleted) {
fil_decr_pending_ibuf_merges(space); fil_decr_pending_ops(space);
} }
return; return;
@ -4753,7 +4753,7 @@ reset_bit:
if (update_ibuf_bitmap && !tablespace_being_deleted) { if (update_ibuf_bitmap && !tablespace_being_deleted) {
fil_decr_pending_ibuf_merges(space); fil_decr_pending_ops(space);
} }
#ifdef UNIV_IBUF_COUNT_DEBUG #ifdef UNIV_IBUF_COUNT_DEBUG

View File

@ -567,13 +567,16 @@ btr_parse_page_reorganize(
#ifndef UNIV_HOTBACKUP #ifndef UNIV_HOTBACKUP
/**************************************************************//** /**************************************************************//**
Gets the number of pages in a B-tree. Gets the number of pages in a B-tree.
@return number of pages */ @return number of pages, or ULINT_UNDEFINED if the index is unavailable */
UNIV_INTERN UNIV_INTERN
ulint ulint
btr_get_size( btr_get_size(
/*=========*/ /*=========*/
dict_index_t* index, /*!< in: index */ dict_index_t* index, /*!< in: index */
ulint flag); /*!< in: BTR_N_LEAF_PAGES or BTR_TOTAL_SIZE */ ulint flag, /*!< in: BTR_N_LEAF_PAGES or BTR_TOTAL_SIZE */
mtr_t* mtr) /*!< in/out: mini-transaction where index
is s-latched */
__attribute__((nonnull, warn_unused_result));
/**************************************************************//** /**************************************************************//**
Allocates a new file page to be used in an index tree. NOTE: we assume Allocates a new file page to be used in an index tree. NOTE: we assume
that the caller has made the reservation for free extents! that the caller has made the reservation for free extents!

View File

@ -789,11 +789,11 @@ buf_all_freed(void);
/*********************************************************************//** /*********************************************************************//**
Checks that there currently are no pending i/o-operations for the buffer Checks that there currently are no pending i/o-operations for the buffer
pool. pool.
@return TRUE if there is no pending i/o */ @return number of pending i/o operations */
UNIV_INTERN UNIV_INTERN
ibool ulint
buf_pool_check_no_pending_io(void); buf_pool_check_num_pending_io(void);
/*==============================*/ /*===============================*/
/*********************************************************************//** /*********************************************************************//**
Invalidates the file pages in the buffer pool when an archive recovery is Invalidates the file pages in the buffer pool when an archive recovery is
completed. All the file pages buffered must be in a replaceable state when completed. All the file pages buffered must be in a replaceable state when

View File

@ -64,15 +64,14 @@ These are low-level functions
#define BUF_LRU_FREE_SEARCH_LEN(b) (5 + 2 * BUF_READ_AHEAD_AREA(b)) #define BUF_LRU_FREE_SEARCH_LEN(b) (5 + 2 * BUF_READ_AHEAD_AREA(b))
/******************************************************************//** /******************************************************************//**
Invalidates all pages belonging to a given tablespace when we are deleting Removes all pages belonging to a given tablespace. */
the data file(s) of that tablespace. A PROBLEM: if readahead is being started,
what guarantees that it will not try to read in pages after this operation has
completed? */
UNIV_INTERN UNIV_INTERN
void void
buf_LRU_invalidate_tablespace( buf_LRU_flush_or_remove_pages(
/*==========================*/ /*==========================*/
ulint id); /*!< in: space id */ ulint id, /*!< in: space id */
enum buf_remove_t buf_remove);/*!< in: remove or flush
strategy */
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/********************************************************************//** /********************************************************************//**
Insert a compressed block into buf_pool->zip_clean in the LRU order. */ Insert a compressed block into buf_pool->zip_clean in the LRU order. */

View File

@ -63,6 +63,15 @@ enum buf_io_fix {
the flush_list */ the flush_list */
}; };
/** Algorithm to remove the pages for a tablespace from the buffer pool.
@See buf_LRU_flush_or_remove_pages(). */
enum buf_remove_t {
BUF_REMOVE_ALL_NO_WRITE, /*!< Remove all pages from the buffer
pool, don't write or sync to disk */
BUF_REMOVE_FLUSH_NO_WRITE, /*!< Remove only, from the flush list,
don't write or sync to disk */
};
/** Parameters of binary buddy system for compressed pages (buf0buddy.h) */ /** Parameters of binary buddy system for compressed pages (buf0buddy.h) */
/* @{ */ /* @{ */
#define BUF_BUDDY_LOW_SHIFT PAGE_ZIP_MIN_SIZE_SHIFT #define BUF_BUDDY_LOW_SHIFT PAGE_ZIP_MIN_SIZE_SHIFT

View File

@ -1,6 +1,6 @@
/***************************************************************************** /*****************************************************************************
Copyright (c) 1996, 2009, Innobase Oy. All Rights Reserved. Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
@ -11,8 +11,8 @@ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59 Temple this program; if not, write to the Free Software Foundation, Inc.,
Place, Suite 330, Boston, MA 02111-1307 USA 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
*****************************************************************************/ *****************************************************************************/
@ -1087,14 +1087,6 @@ dict_index_get_page(
/*================*/ /*================*/
const dict_index_t* tree); /*!< in: index */ const dict_index_t* tree); /*!< in: index */
/*********************************************************************//** /*********************************************************************//**
Sets the page number of the root of index tree. */
UNIV_INLINE
void
dict_index_set_page(
/*================*/
dict_index_t* index, /*!< in/out: index */
ulint page); /*!< in: page number */
/*********************************************************************//**
Gets the read-write lock of the index tree. Gets the read-write lock of the index tree.
@return read-write lock */ @return read-write lock */
UNIV_INLINE UNIV_INLINE

View File

@ -1,6 +1,6 @@
/***************************************************************************** /*****************************************************************************
Copyright (c) 1996, 2009, Innobase Oy. All Rights Reserved. Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
@ -11,8 +11,8 @@ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59 Temple this program; if not, write to the Free Software Foundation, Inc.,
Place, Suite 330, Boston, MA 02111-1307 USA 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
*****************************************************************************/ *****************************************************************************/
@ -761,21 +761,6 @@ dict_index_get_page(
return(index->page); return(index->page);
} }
/*********************************************************************//**
Sets the page number of the root of index tree. */
UNIV_INLINE
void
dict_index_set_page(
/*================*/
dict_index_t* index, /*!< in/out: index */
ulint page) /*!< in: page number */
{
ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
index->page = page;
}
/*********************************************************************//** /*********************************************************************//**
Gets the read-write lock of the index tree. Gets the read-write lock of the index tree.
@return read-write lock */ @return read-write lock */

Some files were not shown because too many files have changed in this diff Show More