Merge from mysql-trunk-merge.

This commit is contained in:
Alexander Nozdrin 2009-10-19 17:36:19 +04:00
commit 3fd2f50b6c
185 changed files with 5351 additions and 2743 deletions

View File

@ -221,12 +221,16 @@ ENDIF(WITHOUT_DYNAMIC_PLUGINS)
FILE(GLOB STORAGE_SUBDIRS storage/*)
FOREACH(SUBDIR ${STORAGE_SUBDIRS})
FILE(RELATIVE_PATH DIRNAME ${PROJECT_SOURCE_DIR}/storage ${SUBDIR})
STRING(TOUPPER ${DIRNAME} ENGINE)
STRING(TOLOWER ${DIRNAME} ENGINE_LOWER)
IF (EXISTS ${SUBDIR}/CMakeLists.txt)
# Check MYSQL_STORAGE_ENGINE macro is present
FILE(STRINGS ${SUBDIR}/CMakeLists.txt HAVE_STORAGE_ENGINE REGEX MYSQL_STORAGE_ENGINE)
IF(HAVE_STORAGE_ENGINE)
# Extract name of engine from HAVE_STORAGE_ENGINE
STRING(REGEX REPLACE ".*MYSQL_STORAGE_ENGINE\\((.*\)\\).*"
"\\1" ENGINE_NAME ${HAVE_STORAGE_ENGINE})
STRING(TOUPPER ${ENGINE_NAME} ENGINE)
STRING(TOLOWER ${ENGINE_NAME} ENGINE_LOWER)
SET(ENGINE_BUILD_TYPE "DYNAMIC")
# Read plug.in to find out if a plugin is mandatory and whether it supports
# build as shared library (dynamic).
@ -252,6 +256,7 @@ FOREACH(SUBDIR ${STORAGE_SUBDIRS})
SET (MYSQLD_STATIC_ENGINE_LIBS ${MYSQLD_STATIC_ENGINE_LIBS} ${ENGINE_LOWER})
SET (STORAGE_ENGINE_DEFS "${STORAGE_ENGINE_DEFS} -DWITH_${ENGINE}_STORAGE_ENGINE")
SET (WITH_${ENGINE}_STORAGE_ENGINE TRUE)
SET (${ENGINE}_DIR ${DIRNAME})
ENDIF (ENGINE_BUILD_TYPE STREQUAL "STATIC")
ENDIF(EXISTS ${SUBDIR}/plug.in)

View File

@ -2863,7 +2863,7 @@ com_help(String *buffer __attribute__((unused)),
"For developer information, including the MySQL Reference Manual, "
"visit:\n"
" http://dev.mysql.com/\n"
"To buy MySQL Network Support, training, or other products, visit:\n"
"To buy MySQL Enterprise support, training, or other products, visit:\n"
" https://shop.mysql.com/\n", INFO_INFO);
put_info("List of all MySQL commands:", INFO_INFO);
if (!named_cmds)

View File

@ -6775,8 +6775,10 @@ void run_query_stmt(MYSQL *mysql, struct st_command *command,
MYSQL_STMT *stmt;
DYNAMIC_STRING ds_prepare_warnings;
DYNAMIC_STRING ds_execute_warnings;
ulonglong affected_rows;
DBUG_ENTER("run_query_stmt");
DBUG_PRINT("query", ("'%-.60s'", query));
LINT_INIT(affected_rows);
/*
Init a new stmt if it's not already one created for this connection
@ -6911,9 +6913,6 @@ void run_query_stmt(MYSQL *mysql, struct st_command *command,
Need to grab affected rows information before getting
warnings here
*/
ulonglong affected_rows;
LINT_INIT(affected_rows);
if (!disable_info)
affected_rows= mysql_affected_rows(mysql);

View File

@ -2835,7 +2835,7 @@ server_scripts=
dnl This probably should be cleaned up more - for now the threaded
dnl client is just using plain-old libs.
sql_client_dirs="strings regex mysys dbug libmysql"
sql_client_dirs="strings mysys dbug extra regex libmysql"
AM_CONDITIONAL(THREAD_SAFE_CLIENT, test "$THREAD_SAFE_CLIENT" != "no")
@ -2901,9 +2901,10 @@ AC_SUBST(mysql_plugin_defs)
# Now that sql_client_dirs and sql_server_dirs are stable, determine the union.
# Start with the (longer) server list, add each client item not yet present.
sql_union_dirs=" $sql_server_dirs "
for DIR in $sql_client_dirs
# We support client-only builds by "--without-server", but not vice versa,
# so we start with the client list, then add each server item not yet present.
sql_union_dirs=" $sql_client_dirs "
for DIR in $sql_server_dirs
do
if echo " $sql_union_dirs " | grep " $DIR " >/dev/null
then

View File

@ -741,7 +741,6 @@ extern int wild_compare(const char *str,const char *wildstr,
extern WF_PACK *wf_comp(char * str);
extern int wf_test(struct wild_file_pack *wf_pack,const char *name);
extern void wf_end(struct wild_file_pack *buffer);
extern size_t strip_sp(char * str);
extern my_bool array_append_string_unique(const char *str,
const char **array, size_t size);
extern void get_date(char * to,int timeflag,time_t use_time);

View File

@ -81,10 +81,11 @@ ENDFOREACH(rpath)
FOREACH (ENGINE_LIB ${MYSQLD_STATIC_ENGINE_LIBS})
INCLUDE(${CMAKE_SOURCE_DIR}/storage/${ENGINE_LIB}/CMakeLists.txt)
STRING(TOUPPER ${ENGINE_LIB} ENGINE_LIB_UPPER)
SET(ENGINE_DIR ${${ENGINE_LIB_UPPER}_DIR})
INCLUDE(${CMAKE_SOURCE_DIR}/storage/${ENGINE_DIR}/CMakeLists.txt)
FOREACH(rpath ${${ENGINE_LIB_UPPER}_SOURCES})
SET(LIB_SOURCES ${LIB_SOURCES} ${CMAKE_SOURCE_DIR}/storage/${ENGINE_LIB}/${rpath})
SET(LIB_SOURCES ${LIB_SOURCES} ${CMAKE_SOURCE_DIR}/storage/${ENGINE_DIR}/${rpath})
ENDFOREACH(rpath)
ENDFOREACH(ENGINE_LIB)
@ -147,6 +148,14 @@ ADD_LIBRARY(mysqlserver STATIC ${LIBMYSQLD_SOURCES})
ADD_DEPENDENCIES(mysqlserver GenServerSource GenError)
TARGET_LINK_LIBRARIES(mysqlserver)
# Add any additional libraries requested by engine(s)
FOREACH (ENGINE_LIB ${MYSQLD_STATIC_ENGINE_LIBS})
STRING(TOUPPER ${ENGINE_LIB} ENGINE_LIB_UPPER)
IF(${ENGINE_LIB_UPPER}_LIBS)
TARGET_LINK_LIBRARIES(mysqlserver ${${ENGINE_LIB_UPPER}_LIBS})
ENDIF(${ENGINE_LIB_UPPER}_LIBS)
ENDFOREACH(ENGINE_LIB)
ADD_LIBRARY(libmysqld SHARED cmake_dummy.c libmysqld.def)
ADD_DEPENDENCIES(libmysqld mysqlserver)
TARGET_LINK_LIBRARIES(libmysqld mysqlserver wsock32)

View File

@ -2,6 +2,7 @@
# in alphabetical order. This also helps with merge conflict resolution.
binlog.binlog_tmp_table* # Bug#45578:2009-07-10 alik Test binlog_tmp_table fails ramdonly on PB2: Unknown table 't2'
binlog.binlog_multi_engine # joro : NDB tests marked as experimental as agreed with bochklin
funcs_1.charset_collation_1 # depends on compile-time decisions

View File

@ -270,3 +270,42 @@ INSERT INTO test.t1 VALUES (1), (2);
CREATE TABLE test.t2 SELECT * FROM test.t1;
USE test;
DROP TABLES t1, t2;
#
# Bug#46640
# This test verifies if the server_id stored in the "format
# description BINLOG statement" will override the server_id
# of the server executing the statements.
#
connect (fresh,localhost,root,,test);
connection fresh;
RESET MASTER;
CREATE TABLE t1 (a INT PRIMARY KEY);
# Format description event, with server_id = 10;
BINLOG '
3u9kSA8KAAAAZgAAAGoAAAABAAQANS4xLjM1LW1hcmlhLWJldGExLWRlYnVnLWxvZwAAAAAAAAAA
AAAAAAAAAAAAAAAAAADe72RIEzgNAAgAEgAEBAQEEgAAUwAEGggAAAAICAgC
';
# What server_id is logged for a statement? Should be our own, not the
# one from the format description event.
INSERT INTO t1 VALUES (1);
# INSERT INTO t1 VALUES (2), with server_id=20. Check that this is logged
# with our own server id, not the 20 from the BINLOG statement.
BINLOG '
3u9kSBMUAAAAKQAAAJEBAAAAABoAAAAAAAAABHRlc3QAAnQxAAEDAAA=
3u9kSBcUAAAAIgAAALMBAAAQABoAAAAAAAEAAf/+AgAAAA==
';
# Show binlog events to check that server ids are correct.
--replace_column 1 # 2 # 5 #
--replace_regex /Server ver: .*, Binlog ver: .*/Server ver: #, Binlog ver: #/ /table_id: [0-9]+/table_id: #/
SHOW BINLOG EVENTS;
DROP TABLE t1;
disconnect fresh;

View File

@ -0,0 +1,300 @@
################################################################################
# Let
# - B be begin, C commit and R rollback.
# - T a statement that accesses and changes only transactional tables, i.e.
# T-tables
# - N a statement that accesses and changes only non-transactional tables,
# i.e, N-tables.
# - M be a mixed statement, i.e. a statement that updates both T- and
# N-tables.
# - M* be a mixed statement that fails while updating either a T
# or N-table.
# - N* be a statement that fails while updating a N-table.
#
# In this test case, when changes are logged as rows either in the RBR or MIXED
# modes, we check if a M* statement that happens early in a transaction is
# written to the binary log outside the boundaries of the transaction and
# wrapped up in a BEGIN/ROLLBACK. This is done to keep the slave consistent with
# the master as the rollback will keep the changes on N-tables and undo them on
# T-tables. In particular, we expect the following behavior:
#
# 1. B M* T C would generate in the binlog B M* R B T C.
# 2. B M M* C would generate in the binlog B M M* C.
# 3. B M* M* T C would generate in the binlog B M* R B M* R B T C.
#
# SBR is not considered in this test because a failing statement is written to
# the binary along with the error code such that a slave executes and rolls it
# back, thus undoing the effects on T-tables.
#
# Note that, in the first case, we are not preserving history from the master as
# we are introducing a rollback that never happened. However, this seems to be
# more acceptable than making the slave diverge. In the second case, the slave
# will diverge as the changes on T-tables that originated from the M statement
# are rolled back on the master but not on the slave. Unfortunately, we cannot
# simply roll the transaction back as this would undo any uncommitted changes
# on T-tables.
#
# We check two more cases. First, INSERT...SELECT* which produces the following
# results:
#
# 1. B T INSERT M...SELECT* C" with an error in INSERT M...SELECT* generates in
# the binlog the following entries: "Nothing".
# 2. B INSERT M...SELECT* C" with an error in INSERT M...SELECT* generates in
# the binlog the following entries: B INSERT M...SELECT* R.
#
# Finally, we also check if any N statement that happens early in a transaction
# (i.e. before any T or M statement) is written to the binary log outside the
# boundaries of the transaction. In particular, we expect the following
# behavior:
#
# 1. B N N T C would generate in the binlog B N C B N C B T C.
# 2. B N N T R would generate in the binlog B N C B N C B T R.
# 3. B N* N* T C would generate in the binlog B N R B N R B T C.
# 4. B N* N* T R would generate in the binlog B N R B N R B T R.
# 5. B N N T N T C would generate in the binlog B N C B N C B T N T C.
# 6. B N N T N T R would generate in the binlog the B N C B N C B T N T R.
#
# Such issues do not happen in SBR. In RBR and MBR, a full-fledged fix will be
# pushed after the WL#2687.
#
# Please, remove this test case after pushing WL#2687.
################################################################################
--echo ###################################################################################
--echo # CONFIGURATION
--echo ###################################################################################
CREATE TABLE nt_1 (a text, b int PRIMARY KEY) ENGINE = MyISAM;
CREATE TABLE nt_2 (a text, b int PRIMARY KEY) ENGINE = MyISAM;
CREATE TABLE tt_1 (a text, b int PRIMARY KEY) ENGINE = Innodb;
CREATE TABLE tt_2 (a text, b int PRIMARY KEY) ENGINE = Innodb;
DELIMITER |;
CREATE TRIGGER tr_i_tt_1_to_nt_1 BEFORE INSERT ON tt_1 FOR EACH ROW
BEGIN
INSERT INTO nt_1 VALUES (NEW.a, NEW.b);
END|
CREATE TRIGGER tr_i_nt_2_to_tt_2 BEFORE INSERT ON nt_2 FOR EACH ROW
BEGIN
INSERT INTO tt_2 VALUES (NEW.a, NEW.b);
END|
DELIMITER ;|
--echo ###################################################################################
--echo # CHECK HISTORY IN BINLOG
--echo ###################################################################################
--echo
--echo
--echo
--echo *** "B M* T C" with error in M* generates in the binlog the "B M* R B T C" entries
--echo
let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
INSERT INTO nt_1 VALUES ("new text 1", 1);
BEGIN;
--error ER_DUP_ENTRY
INSERT INTO tt_1 VALUES (USER(), 2), (USER(), 1);
INSERT INTO tt_2 VALUES ("new text 3", 3);
COMMIT;
--source include/show_binlog_events.inc
--echo
let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
INSERT INTO tt_2 VALUES ("new text 4", 4);
BEGIN;
--error ER_DUP_ENTRY
INSERT INTO nt_2 VALUES (USER(), 5), (USER(), 4);
INSERT INTO tt_2 VALUES ("new text 6", 6);
COMMIT;
--source include/show_binlog_events.inc
--echo
--echo
--echo
--echo *** "B M M* T C" with error in M* generates in the binlog the "B M M* T C" entries
--echo
let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
INSERT INTO nt_1 VALUES ("new text 10", 10);
BEGIN;
INSERT INTO tt_1 VALUES ("new text 7", 7), ("new text 8", 8);
--error ER_DUP_ENTRY
INSERT INTO tt_1 VALUES (USER(), 9), (USER(), 10);
INSERT INTO tt_2 VALUES ("new text 11", 11);
COMMIT;
--source include/show_binlog_events.inc
--echo
let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
INSERT INTO tt_2 VALUES ("new text 15", 15);
BEGIN;
INSERT INTO nt_2 VALUES ("new text 12", 12), ("new text 13", 13);
--error ER_DUP_ENTRY
INSERT INTO nt_2 VALUES (USER(), 14), (USER(), 15);
INSERT INTO tt_2 VALUES ("new text 16", 16);
COMMIT;
--source include/show_binlog_events.inc
--echo
--echo
--echo
--echo *** "B M* M* T C" with error in M* generates in the binlog the "B M* R B M* R B T C" entries
--echo
let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
INSERT INTO nt_1 VALUES ("new text 18", 18);
INSERT INTO nt_1 VALUES ("new text 20", 20);
BEGIN;
--error ER_DUP_ENTRY
INSERT INTO tt_1 VALUES (USER(), 17), (USER(), 18);
--error ER_DUP_ENTRY
INSERT INTO tt_1 VALUES (USER(), 19), (USER(), 20);
INSERT INTO tt_2 VALUES ("new text 21", 21);
COMMIT;
--source include/show_binlog_events.inc
--echo
let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
INSERT INTO tt_2 VALUES ("new text 23", 23);
INSERT INTO tt_2 VALUES ("new text 25", 25);
BEGIN;
--error ER_DUP_ENTRY
INSERT INTO nt_2 VALUES (USER(), 22), (USER(), 23);
--error ER_DUP_ENTRY
INSERT INTO nt_2 VALUES (USER(), 24), (USER(), 25);
INSERT INTO tt_2 VALUES ("new text 26", 26);
COMMIT;
--source include/show_binlog_events.inc
--echo
--echo
--echo
--echo *** "B T INSERT M...SELECT* C" with an error in INSERT M...SELECT* generates
--echo *** in the binlog the following entries: "Nothing".
--echo *** There is a bug in that will be fixed after WL#2687. Please, check BUG#47175 for further details.
--echo
let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
TRUNCATE TABLE nt_2;
TRUNCATE TABLE tt_2;
INSERT INTO tt_2 VALUES ("new text 7", 7);
BEGIN;
INSERT INTO tt_2 VALUES ("new text 27", 27);
--error ER_DUP_ENTRY
INSERT INTO nt_2(a, b) SELECT USER(), b FROM nt_1;
INSERT INTO tt_2 VALUES ("new text 28", 28);
ROLLBACK;
--source include/show_binlog_events.inc
--echo
--echo
--echo
--echo *** "B INSERT M..SELECT* C" with an error in INSERT M...SELECT* generates
--echo *** in the binlog the following entries: "B INSERT M..SELECT* R".
--echo
let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
TRUNCATE TABLE nt_2;
TRUNCATE TABLE tt_2;
INSERT INTO tt_2 VALUES ("new text 7", 7);
BEGIN;
--error ER_DUP_ENTRY
INSERT INTO nt_2(a, b) SELECT USER(), b FROM nt_1;
COMMIT;
--source include/show_binlog_events.inc
--echo
--echo
--echo
--echo *** "B N N T C" generates in the binlog the "B N C B N C B T C" entries
--echo
let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
TRUNCATE TABLE nt_1;
TRUNCATE TABLE tt_2;
BEGIN;
INSERT INTO nt_1 VALUES (USER(), 1);
INSERT INTO nt_1 VALUES (USER(), 2);
INSERT INTO tt_2 VALUES (USER(), 3);
COMMIT;
--source include/show_binlog_events.inc
--echo
--echo
--echo
--echo *** "B N N T R" generates in the binlog the "B N C B N C B T R" entries
--echo
let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
BEGIN;
INSERT INTO nt_1 VALUES (USER(), 4);
INSERT INTO nt_1 VALUES (USER(), 5);
INSERT INTO tt_2 VALUES (USER(), 6);
ROLLBACK;
--source include/show_binlog_events.inc
--echo
--echo
--echo
--echo *** "B N* N* T C" with error in N* generates in the binlog the "B N R B N R B T C" entries
--echo
let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
BEGIN;
--error ER_DUP_ENTRY
INSERT INTO nt_1 VALUES (USER(), 7), (USER(), 1);
--error ER_DUP_ENTRY
INSERT INTO nt_1 VALUES (USER(), 8), (USER(), 1);
INSERT INTO tt_2 VALUES (USER(), 9);
COMMIT;
--source include/show_binlog_events.inc
--echo
--echo
--echo
--echo *** "B N* N* T R" with error in N* generates in the binlog the "B N R B N R B T R" entries
--echo
let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
BEGIN;
--error ER_DUP_ENTRY
INSERT INTO nt_1 VALUES (USER(), 10), (USER(), 1);
--error ER_DUP_ENTRY
INSERT INTO nt_1 VALUES (USER(), 11), (USER(), 1);
INSERT INTO tt_2 VALUES (USER(), 12);
ROLLBACK;
--source include/show_binlog_events.inc
--echo
--echo
--echo
--echo *** "B N N T N T C" generates in the binlog the "B N C B N C B T N T C" entries
--echo
let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
BEGIN;
INSERT INTO nt_1 VALUES (USER(), 13);
INSERT INTO nt_1 VALUES (USER(), 14);
INSERT INTO tt_2 VALUES (USER(), 15);
INSERT INTO nt_1 VALUES (USER(), 16);
INSERT INTO tt_2 VALUES (USER(), 17);
COMMIT;
--source include/show_binlog_events.inc
--echo
--echo
--echo
--echo *** "B N N T N T R" generates in the binlog the "B N C B N C B T N T R" entries
--echo
let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
BEGIN;
INSERT INTO nt_1 VALUES (USER(), 18);
INSERT INTO nt_1 VALUES (USER(), 19);
INSERT INTO tt_2 VALUES (USER(), 20);
INSERT INTO nt_1 VALUES (USER(), 21);
INSERT INTO tt_2 VALUES (USER(), 22);
ROLLBACK;
--source include/show_binlog_events.inc
--echo ###################################################################################
--echo # CLEAN
--echo ###################################################################################
DROP TABLE tt_1;
DROP TABLE tt_2;
DROP TABLE nt_1;
DROP TABLE nt_2;

View File

@ -22,3 +22,4 @@ connection master;
select * from t1;
commit;
drop table t1;
-- sync_slave_with_master

View File

@ -321,3 +321,11 @@ Vv
Xx
YyÝýỲỳỴỵỶỷỸỹ
drop table t1;
Bug#46448 trailing spaces are not ignored when user collation maps space != 0x20
set names latin1;
show collation like 'latin1_test';
Collation Charset Id Default Compiled Sortlen
latin1_test latin1 99 Yes 1
select "foo" = "foo " collate latin1_test;
"foo" = "foo " collate latin1_test
1

View File

@ -1477,3 +1477,47 @@ COUNT(*)
SET SQL_MODE=default;
DROP TABLE t1;
End of 5.0 tests
#
# BUG#47280 - strange results from count(*) with order by multiple
# columns without where/group
#
#
# Initialize test
#
CREATE TABLE t1 (
pk INT NOT NULL,
i INT,
PRIMARY KEY (pk)
);
INSERT INTO t1 VALUES (1,11),(2,12),(3,13);
#
# Start test
# All the following queries shall return 1 record
#
# Masking all correct values {11...13} for column i in this result.
SELECT MAX(pk) as max, i
FROM t1
ORDER BY max;
max i
3 #
EXPLAIN
SELECT MAX(pk) as max, i
FROM t1
ORDER BY max;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using temporary
# Only 11 is correct for collumn i in this result
SELECT MAX(pk) as max, i
FROM t1
WHERE pk<2
ORDER BY max;
max i
1 11
#
# Cleanup
#
DROP TABLE t1;
End of 5.1 tests

View File

@ -876,10 +876,10 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range NULL idx_t1_1 163 NULL 17 Using where; Using index for group-by
explain select a1,a2,b, max(c) from t1 where (c > 'b1') or (c <= 'g1') group by a1,a2,b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range NULL idx_t1_1 147 NULL 17 Using where; Using index for group-by
1 SIMPLE t1 range NULL idx_t1_1 163 NULL 17 Using where; Using index for group-by
explain select a1,a2,b,min(c),max(c) from t1 where (c > 'b1') or (c <= 'g1') group by a1,a2,b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range NULL idx_t1_1 147 NULL 17 Using where; Using index for group-by
1 SIMPLE t1 range NULL idx_t1_1 163 NULL 17 Using where; Using index for group-by
explain select a1,a2,b,min(c),max(c) from t1 where (c > 'b111') and (c <= 'g112') group by a1,a2,b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range NULL idx_t1_1 163 NULL 17 Using where; Using index for group-by
@ -924,7 +924,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range NULL idx_t2_1 163 NULL # Using where; Using index for group-by
explain select a1,a2,b, max(c) from t2 where (c > 'b1') or (c <= 'g1') group by a1,a2,b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range NULL idx_t2_1 146 NULL # Using where; Using index for group-by
1 SIMPLE t2 range NULL idx_t2_1 163 NULL # Using where; Using index for group-by
explain select a1,a2,b,min(c),max(c) from t2 where (c > 'b1') or (c <= 'g1') group by a1,a2,b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range NULL idx_t2_1 163 NULL # Using where; Using index for group-by

View File

@ -0,0 +1,35 @@
drop table if exists t1;
set session transaction isolation level read committed;
create table t1(a int not null) engine=innodb DEFAULT CHARSET=latin1;
create table t2 like t1;
insert into t2 values (1),(2),(3),(4),(5),(6),(7);
set autocommit=0;
begin;
replace into t1 select * from t2;
set session transaction isolation level read committed;
set autocommit=0;
delete from t2 where a=5;
commit;
delete from t2;
commit;
commit;
begin;
insert into t1 select * from t2;
set session transaction isolation level read committed;
set autocommit=0;
delete from t2 where a=5;
commit;
delete from t2;
commit;
commit;
select * from t1;
a
1
2
3
4
5
6
7
drop table t1;
drop table t2;

View File

@ -6,4 +6,5 @@ SELECT f4, f8 FROM bug34300;
f4 f8
xxx zzz
DROP TABLE bug34300;
SET @@global.max_allowed_packet=1048576;
SET @@global.max_allowed_packet=default;

View File

@ -0,0 +1,14 @@
create table bug44369 (DB_ROW_ID int) engine=innodb;
ERROR HY000: Can't create table 'test.bug44369' (errno: -1)
create table bug44369 (db_row_id int) engine=innodb;
ERROR HY000: Can't create table 'test.bug44369' (errno: -1)
show errors;
Level Code Message
Error 1005 Error creating table 'test/bug44369' with column name 'db_row_id'. 'db_row_id' is a reserved name. Please try to re-create the table with a different column name.
Error 1005 Can't create table 'test.bug44369' (errno: -1)
create table bug44369 (db_TRX_Id int) engine=innodb;
ERROR HY000: Can't create table 'test.bug44369' (errno: -1)
show errors;
Level Code Message
Error 1005 Error creating table 'test/bug44369' with column name 'db_TRX_Id'. 'db_TRX_Id' is a reserved name. Please try to re-create the table with a different column name.
Error 1005 Can't create table 'test.bug44369' (errno: -1)

View File

@ -0,0 +1,9 @@
CREATE TABLE bug44571 (foo INT) ENGINE=InnoDB;
ALTER TABLE bug44571 CHANGE foo bar INT;
ALTER TABLE bug44571 ADD INDEX bug44571b (foo);
ERROR 42000: Key column 'foo' doesn't exist in table
ALTER TABLE bug44571 ADD INDEX bug44571b (bar);
ERROR HY000: Incorrect key file for table 'bug44571'; try to repair it
CREATE INDEX bug44571b ON bug44571 (bar);
ERROR HY000: Incorrect key file for table 'bug44571'; try to repair it
DROP TABLE bug44571;

View File

@ -0,0 +1,17 @@
create table bug46000(`id` int,key `GEN_CLUST_INDEX`(`id`))engine=innodb;
ERROR HY000: Can't create table 'test.bug46000' (errno: -1)
create table bug46000(`id` int, key `GEN_clust_INDEX`(`id`))engine=innodb;
ERROR HY000: Can't create table 'test.bug46000' (errno: -1)
show errors;
Level Code Message
Error 1005 Cannot Create Index with name 'GEN_CLUST_INDEX'. The name is reserved for the system default primary index.
Error 1005 Can't create table 'test.bug46000' (errno: -1)
create table bug46000(id int) engine=innodb;
create index GEN_CLUST_INDEX on bug46000(id);
ERROR HY000: Can't create table '#sql-temporary' (errno: -1)
show errors;
Level Code Message
Error 1005 Cannot Create Index with name 'GEN_CLUST_INDEX'. The name is reserved for the system default primary index.
Error 1005 Can't create table '#sql-temporary' (errno: -1)
create index idx on bug46000(id);
drop table bug46000;

View File

@ -2271,4 +2271,25 @@ checksum table t3;
Table Checksum
test.t3 326284887
drop table t1,t2,t3;
CREATE TABLE t1(a INT, b CHAR(10), KEY(a), KEY(b));
INSERT INTO t1 VALUES(1,'0'),(2,'0'),(3,'0'),(4,'0'),(5,'0'),
(6,'0'),(7,'0');
INSERT INTO t1 SELECT a+10,b FROM t1;
INSERT INTO t1 SELECT a+20,b FROM t1;
INSERT INTO t1 SELECT a+40,b FROM t1;
INSERT INTO t1 SELECT a+80,b FROM t1;
INSERT INTO t1 SELECT a+160,b FROM t1;
INSERT INTO t1 SELECT a+320,b FROM t1;
INSERT INTO t1 SELECT a+640,b FROM t1;
INSERT INTO t1 SELECT a+1280,b FROM t1;
INSERT INTO t1 SELECT a+2560,b FROM t1;
INSERT INTO t1 SELECT a+5120,b FROM t1;
SET myisam_sort_buffer_size=4;
REPAIR TABLE t1;
Table Op Msg_type Msg_text
test.t1 repair error myisam_sort_buffer_size is too small
test.t1 repair warning Number of rows changed from 0 to 7168
test.t1 repair status OK
SET myisam_sort_buffer_size=@@global.myisam_sort_buffer_size;
DROP TABLE t1;
End of 5.1 tests

View File

@ -1557,3 +1557,34 @@ a
2001
1991
DROP TABLE t1;
#
# Bug #43029: FORCE INDEX FOR ORDER BY is ignored when join buffering
# is used
#
CREATE TABLE t1 (a INT, b INT, KEY (a));
INSERT INTO t1 VALUES (0, NULL), (1, NULL), (2, NULL), (3, NULL);
INSERT INTO t1 SELECT a+4, b FROM t1;
INSERT INTO t1 SELECT a+8, b FROM t1;
CREATE TABLE t2 (a INT, b INT);
INSERT INTO t2 VALUES (0,NULL), (1,NULL), (2,NULL), (3,NULL), (4,NULL);
INSERT INTO t2 SELECT a+4, b FROM t2;
# shouldn't have "using filesort"
EXPLAIN
SELECT * FROM t1 FORCE INDEX FOR ORDER BY (a), t2 WHERE t1.a < 2 ORDER BY t1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 5 NULL 2 Using where
1 SIMPLE t2 ALL NULL NULL NULL NULL 10
# should have "using filesort"
EXPLAIN
SELECT * FROM t1 USE INDEX FOR ORDER BY (a), t2 WHERE t1.a < 2 ORDER BY t1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 5 NULL 2 Using where; Using temporary; Using filesort
1 SIMPLE t2 ALL NULL NULL NULL NULL 10 Using join buffer
# should have "using filesort"
EXPLAIN
SELECT * FROM t1 FORCE INDEX FOR JOIN (a), t2 WHERE t1.a < 2 ORDER BY t1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 5 NULL 2 Using where; Using temporary; Using filesort
1 SIMPLE t2 ALL NULL NULL NULL NULL 10 Using join buffer
DROP TABLE t1, t2;
End of 5.1 tests

View File

@ -50,6 +50,21 @@ t1 CREATE TABLE `t1` (
PARTITION p3 VALUES LESS THAN (733969) ENGINE = MyISAM,
PARTITION pmax VALUES LESS THAN MAXVALUE ENGINE = MyISAM) */
DROP TABLE t1;
create table t1 (a int, b int, key(a))
partition by list (a)
( partition p0 values in (1),
partition p1 values in (2));
insert into t1 values (1,1),(2,1),(2,2),(2,3);
show indexes from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
t1 1 a 1 a A NULL NULL NULL YES BTREE
analyze table t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
show indexes from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
t1 1 a 1 a A 1 NULL NULL YES BTREE
drop table t1;
CREATE TABLE t1 (a INT, FOREIGN KEY (a) REFERENCES t0 (a))
ENGINE=MyISAM
PARTITION BY HASH (a);

View File

@ -0,0 +1,22 @@
DROP TABLE IF EXISTS `t1`;
# Bug#46922: crash when adding partitions and open_files_limit is reached
CREATE TABLE t1 (a INT PRIMARY KEY)
ENGINE=MyISAM PARTITION BY KEY () PARTITIONS 1;
INSERT INTO t1 VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11);
# if the bug exists, then crash will happen here
ALTER TABLE t1 ADD PARTITION PARTITIONS 511;
ERROR HY000: Out of resources when opening file '<partition file>' (Errcode: 24)
SELECT * FROM t1;
a
1
10
11
2
3
4
5
6
7
8
9
DROP TABLE t1;

View File

@ -1219,3 +1219,182 @@ explain select * from t2 where a=1000 and b<11;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ref a a 5 const 502 Using where
drop table t1, t2;
CREATE TABLE t1( a INT, b INT, KEY( a, b ) );
CREATE TABLE t2( a INT, b INT, KEY( a, b ) );
CREATE TABLE t3( a INT, b INT, KEY( a, b ) );
INSERT INTO t1( a, b )
VALUES (0, 1), (1, 2), (1, 4), (2, 3), (5, 0), (9, 7);
INSERT INTO t2( a, b )
VALUES ( 1, 1), ( 2, 1), ( 3, 1), ( 4, 1), ( 5, 1),
( 6, 1), ( 7, 1), ( 8, 1), ( 9, 1), (10, 1),
(11, 1), (12, 1), (13, 1), (14, 1), (15, 1),
(16, 1), (17, 1), (18, 1), (19, 1), (20, 1);
INSERT INTO t2 SELECT a, 2 FROM t2 WHERE b = 1;
INSERT INTO t2 SELECT a, 3 FROM t2 WHERE b = 1;
INSERT INTO t2 SELECT -1, -1 FROM t2;
INSERT INTO t2 SELECT -1, -1 FROM t2;
INSERT INTO t2 SELECT -1, -1 FROM t2;
INSERT INTO t3
VALUES (1, 0), (2, 0), (3, 0), (4, 0), (5, 0),
(6, 0), (7, 0), (8, 0), (9, 0), (10, 0);
INSERT INTO t3 SELECT * FROM t3 WHERE a = 10;
INSERT INTO t3 SELECT * FROM t3 WHERE a = 10;
SELECT * FROM t1 WHERE
3 <= a AND a < 5 OR
5 < a AND b = 3 OR
3 <= a;
a b
5 0
9 7
EXPLAIN
SELECT * FROM t1 WHERE
3 <= a AND a < 5 OR
5 < a AND b = 3 OR
3 <= a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 5 NULL 3 Using where; Using index
SELECT * FROM t1 WHERE
3 <= a AND a < 5 OR
5 <= a AND b = 3 OR
3 <= a;
a b
5 0
9 7
EXPLAIN
SELECT * FROM t1 WHERE
3 <= a AND a < 5 OR
5 <= a AND b = 3 OR
3 <= a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 5 NULL 4 Using where; Using index
SELECT * FROM t1 WHERE
3 <= a AND a <= 5 OR
5 <= a AND b = 3 OR
3 <= a;
a b
5 0
9 7
EXPLAIN
SELECT * FROM t1 WHERE
3 <= a AND a <= 5 OR
5 <= a AND b = 3 OR
3 <= a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 5 NULL 3 Using where; Using index
SELECT * FROM t1 WHERE
3 <= a AND a <= 5 OR
3 <= a;
a b
5 0
9 7
EXPLAIN
SELECT * FROM t1 WHERE
3 <= a AND a <= 5 OR
3 <= a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 5 NULL 3 Using where; Using index
SELECT * FROM t2 WHERE
5 <= a AND a < 10 AND b = 1 OR
15 <= a AND a < 20 AND b = 3
OR
1 <= a AND b = 1;
a b
1 1
2 1
3 1
4 1
5 1
6 1
7 1
8 1
9 1
10 1
11 1
12 1
13 1
14 1
15 1
15 3
16 1
16 3
17 1
17 3
18 1
18 3
19 1
19 3
20 1
EXPLAIN
SELECT * FROM t2 WHERE
5 <= a AND a < 10 AND b = 1 OR
15 <= a AND a < 20 AND b = 3
OR
1 <= a AND b = 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range a a 10 NULL 50 Using where; Using index
SELECT * FROM t2 WHERE
5 <= a AND a < 10 AND b = 2 OR
15 <= a AND a < 20 AND b = 3
OR
1 <= a AND b = 1;
a b
1 1
2 1
3 1
4 1
5 1
5 2
6 1
6 2
7 1
7 2
8 1
8 2
9 1
9 2
10 1
11 1
12 1
13 1
14 1
15 1
15 3
16 1
16 3
17 1
17 3
18 1
18 3
19 1
19 3
20 1
EXPLAIN
SELECT * FROM t2 WHERE
5 <= a AND a < 10 AND b = 2 OR
15 <= a AND a < 20 AND b = 3
OR
1 <= a AND b = 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range a a 10 NULL 50 Using where; Using index
SELECT * FROM t3 WHERE
5 <= a AND a < 10 AND b = 3 OR
a < 5 OR
a < 10;
a b
1 0
2 0
3 0
4 0
5 0
6 0
7 0
8 0
9 0
EXPLAIN
SELECT * FROM t3 WHERE
5 <= a AND a < 10 AND b = 3 OR
a < 5 OR
a < 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t3 range a a 5 NULL 8 Using where; Using index
DROP TABLE t1, t2, t3;

View File

@ -749,6 +749,16 @@ bin(a1)
110000111111111
110001011111111
drop table t1bit7, t2bit7;
#
# Bug42803: Field_bit does not have unsigned_flag field,
# can lead to bad memory access
#
CREATE TABLE t1 (a BIT(7), b BIT(9), KEY(a, b));
INSERT INTO t1 VALUES(0, 0), (5, 3), (5, 6), (6, 4), (7, 0);
EXPLAIN SELECT a+0, b+0 FROM t1 WHERE a > 4 and b < 7 ORDER BY 2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 2 NULL 4 Using where; Using index; Using filesort
DROP TABLE t1;
End of 5.0 tests
create table t1(a bit(7));
insert into t1 values(0x40);

View File

@ -68,4 +68,17 @@
</charset>
<charset name="latin1">
<family>Western</family>
<description>cp1252 West European</description>
<alias>csisolatin1</alias>
<alias>iso-8859-1</alias>
<alias>iso-ir-100</alias>
<alias>iso_8859-1</alias>
<alias>iso_8859-1:1987</alias>
<alias>l1</alias>
<alias>latin1</alias>
<collation name="latin1_test" id="99" order="test"/>
</charset>
</charsets>

View File

@ -0,0 +1,135 @@
<?xml version='1.0' encoding="utf-8"?>
<charsets>
<copyright>
Copyright (C) 2009 Sun Microsystems, Inc
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
</copyright>
<charset name="latin1">
<ctype>
<map>
00
20 20 20 20 20 20 20 20 20 28 28 28 28 28 20 20
20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20
48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10
84 84 84 84 84 84 84 84 84 84 10 10 10 10 10 10
10 81 81 81 81 81 81 01 01 01 01 01 01 01 01 01
01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10
10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02
02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20
10 00 10 02 10 10 10 10 10 10 01 10 01 00 01 00
00 10 10 10 10 10 10 10 10 10 02 10 02 00 02 01
48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10
10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10
01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01
01 01 01 01 01 01 01 10 01 01 01 01 01 01 01 02
02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02
02 02 02 02 02 02 02 10 02 02 02 02 02 02 02 02
</map>
</ctype>
<lower>
<map>
00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F
20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F
30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F
40 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F
70 71 72 73 74 75 76 77 78 79 7A 5B 5C 5D 5E 5F
60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F
70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F
80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F
90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F
A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF
B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF
E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF
F0 F1 F2 F3 F4 F5 F6 D7 F8 F9 FA FB FC FD FE DF
E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF
F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF
</map>
</lower>
<upper>
<map>
00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F
20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F
30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F
40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F
50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F
60 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F
50 51 52 53 54 55 56 57 58 59 5A 7B 7C 7D 7E 7F
80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F
90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F
A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF
B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF
C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF
D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF
C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF
D0 D1 D2 D3 D4 D5 D6 F7 D8 D9 DA DB DC DD DE FF
</map>
</upper>
<unicode>
<map>
0000 0001 0002 0003 0004 0005 0006 0007 0008 0009 000A 000B 000C 000D 000E 000F
0010 0011 0012 0013 0014 0015 0016 0017 0018 0019 001A 001B 001C 001D 001E 001F
0020 0021 0022 0023 0024 0025 0026 0027 0028 0029 002A 002B 002C 002D 002E 002F
0030 0031 0032 0033 0034 0035 0036 0037 0038 0039 003A 003B 003C 003D 003E 003F
0040 0041 0042 0043 0044 0045 0046 0047 0048 0049 004A 004B 004C 004D 004E 004F
0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F
0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F
0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F
20AC 0081 201A 0192 201E 2026 2020 2021 02C6 2030 0160 2039 0152 008D 017D 008F
0090 2018 2019 201C 201D 2022 2013 2014 02DC 2122 0161 203A 0153 009D 017E 0178
00A0 00A1 00A2 00A3 00A4 00A5 00A6 00A7 00A8 00A9 00AA 00AB 00AC 00AD 00AE 00AF
00B0 00B1 00B2 00B3 00B4 00B5 00B6 00B7 00B8 00B9 00BA 00BB 00BC 00BD 00BE 00BF
00C0 00C1 00C2 00C3 00C4 00C5 00C6 00C7 00C8 00C9 00CA 00CB 00CC 00CD 00CE 00CF
00D0 00D1 00D2 00D3 00D4 00D5 00D6 00D7 00D8 00D9 00DA 00DB 00DC 00DD 00DE 00DF
00E0 00E1 00E2 00E3 00E4 00E5 00E6 00E7 00E8 00E9 00EA 00EB 00EC 00ED 00EE 00EF
00F0 00F1 00F2 00F3 00F4 00F5 00F6 00F7 00F8 00F9 00FA 00FB 00FC 00FD 00FE 00FF
</map>
</unicode>
<collation name="latin1_test">
<map>
00 01 02 03 37 2D 2E 2F 16 05 25 0B 0C 0D 0E 0F
10 11 12 13 3C 3D 32 26 18 19 3F 27 1C 1D 1E 1F
40 4F 7F 7B 5B 6C 50 7D 4D 5D 5C 4E 6B 60 4B 61
F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 7A 5E 4C 7E 6E 6F
7C C1 C2 C3 C4 C5 C6 C7 C8 C9 D1 D2 D3 D4 D5 D6
D7 D8 D9 E2 E3 E4 E5 E6 E7 E8 E9 4A E0 5A 5F 6D
79 81 82 83 84 85 86 87 88 89 91 92 93 94 95 96
97 98 99 A2 A3 A4 A5 A6 A7 A8 A9 C0 6A D0 A1 07
20 21 22 23 24 15 06 17 28 29 2A 2B 2C 09 0A 1B
30 31 1A 33 34 35 36 08 38 39 3A 3B 04 14 3E E1
41 42 43 44 45 46 47 48 49 51 52 53 54 55 56 57
58 59 62 63 64 65 66 67 68 69 70 71 72 73 74 75
76 77 78 80 8A 8B 8C 8D 8E 8F 90 9A 9B 9C 9D 9E
9F A0 AA AB AC AD AE AF B0 B1 B2 B3 B4 B5 B6 B7
B8 B9 BA BB BC BD BE BF CA CB CC CD CE CF DA DB
DC DD DE DF EA EB EC ED EE EF FA FB FC FD FE FF
</map>
</collation>
</charset>
</charsets>

View File

@ -0,0 +1,406 @@
###################################################################################
# CONFIGURATION
###################################################################################
CREATE TABLE nt_1 (a text, b int PRIMARY KEY) ENGINE = MyISAM;
CREATE TABLE nt_2 (a text, b int PRIMARY KEY) ENGINE = MyISAM;
CREATE TABLE tt_1 (a text, b int PRIMARY KEY) ENGINE = Innodb;
CREATE TABLE tt_2 (a text, b int PRIMARY KEY) ENGINE = Innodb;
CREATE TRIGGER tr_i_tt_1_to_nt_1 BEFORE INSERT ON tt_1 FOR EACH ROW
BEGIN
INSERT INTO nt_1 VALUES (NEW.a, NEW.b);
END|
CREATE TRIGGER tr_i_nt_2_to_tt_2 BEFORE INSERT ON nt_2 FOR EACH ROW
BEGIN
INSERT INTO tt_2 VALUES (NEW.a, NEW.b);
END|
###################################################################################
# CHECK HISTORY IN BINLOG
###################################################################################
*** "B M* T C" with error in M* generates in the binlog the "B M* R B T C" entries
INSERT INTO nt_1 VALUES ("new text 1", 1);
BEGIN;
INSERT INTO tt_1 VALUES (USER(), 2), (USER(), 1);
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
INSERT INTO tt_2 VALUES ("new text 3", 3);
COMMIT;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 VALUES ("new text 1", 1)
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_1)
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Query # # use `test`; INSERT INTO tt_2 VALUES ("new text 3", 3)
master-bin.000001 # Xid # # COMMIT /* XID */
INSERT INTO tt_2 VALUES ("new text 4", 4);
BEGIN;
INSERT INTO nt_2 VALUES (USER(), 5), (USER(), 4);
ERROR 23000: Duplicate entry '4' for key 'PRIMARY'
INSERT INTO tt_2 VALUES ("new text 6", 6);
COMMIT;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Query # # use `test`; INSERT INTO tt_2 VALUES ("new text 4", 4)
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_2)
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Query # # use `test`; INSERT INTO tt_2 VALUES ("new text 6", 6)
master-bin.000001 # Xid # # COMMIT /* XID */
*** "B M M* T C" with error in M* generates in the binlog the "B M M* T C" entries
INSERT INTO nt_1 VALUES ("new text 10", 10);
BEGIN;
INSERT INTO tt_1 VALUES ("new text 7", 7), ("new text 8", 8);
INSERT INTO tt_1 VALUES (USER(), 9), (USER(), 10);
ERROR 23000: Duplicate entry '10' for key 'PRIMARY'
INSERT INTO tt_2 VALUES ("new text 11", 11);
COMMIT;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 VALUES ("new text 10", 10)
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 7", 7), ("new text 8", 8)
master-bin.000001 # Table_map # # table_id: # (test.tt_1)
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # use `test`; INSERT INTO tt_2 VALUES ("new text 11", 11)
master-bin.000001 # Xid # # COMMIT /* XID */
INSERT INTO tt_2 VALUES ("new text 15", 15);
BEGIN;
INSERT INTO nt_2 VALUES ("new text 12", 12), ("new text 13", 13);
INSERT INTO nt_2 VALUES (USER(), 14), (USER(), 15);
ERROR 23000: Duplicate entry '15' for key 'PRIMARY'
INSERT INTO tt_2 VALUES ("new text 16", 16);
COMMIT;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Query # # use `test`; INSERT INTO tt_2 VALUES ("new text 15", 15)
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Query # # use `test`; INSERT INTO nt_2 VALUES ("new text 12", 12), ("new text 13", 13)
master-bin.000001 # Table_map # # table_id: # (test.nt_2)
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # use `test`; INSERT INTO tt_2 VALUES ("new text 16", 16)
master-bin.000001 # Xid # # COMMIT /* XID */
*** "B M* M* T C" with error in M* generates in the binlog the "B M* R B M* R B T C" entries
INSERT INTO nt_1 VALUES ("new text 18", 18);
INSERT INTO nt_1 VALUES ("new text 20", 20);
BEGIN;
INSERT INTO tt_1 VALUES (USER(), 17), (USER(), 18);
ERROR 23000: Duplicate entry '18' for key 'PRIMARY'
INSERT INTO tt_1 VALUES (USER(), 19), (USER(), 20);
ERROR 23000: Duplicate entry '20' for key 'PRIMARY'
INSERT INTO tt_2 VALUES ("new text 21", 21);
COMMIT;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 VALUES ("new text 18", 18)
master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 VALUES ("new text 20", 20)
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_1)
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_1)
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Query # # use `test`; INSERT INTO tt_2 VALUES ("new text 21", 21)
master-bin.000001 # Xid # # COMMIT /* XID */
INSERT INTO tt_2 VALUES ("new text 23", 23);
INSERT INTO tt_2 VALUES ("new text 25", 25);
BEGIN;
INSERT INTO nt_2 VALUES (USER(), 22), (USER(), 23);
ERROR 23000: Duplicate entry '23' for key 'PRIMARY'
INSERT INTO nt_2 VALUES (USER(), 24), (USER(), 25);
ERROR 23000: Duplicate entry '25' for key 'PRIMARY'
INSERT INTO tt_2 VALUES ("new text 26", 26);
COMMIT;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Query # # use `test`; INSERT INTO tt_2 VALUES ("new text 23", 23)
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Query # # use `test`; INSERT INTO tt_2 VALUES ("new text 25", 25)
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_2)
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_2)
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Query # # use `test`; INSERT INTO tt_2 VALUES ("new text 26", 26)
master-bin.000001 # Xid # # COMMIT /* XID */
*** "B T INSERT M...SELECT* C" with an error in INSERT M...SELECT* generates
*** in the binlog the following entries: "Nothing".
*** There is a bug in that will be fixed after WL#2687. Please, check BUG#47175 for further details.
TRUNCATE TABLE nt_2;
TRUNCATE TABLE tt_2;
INSERT INTO tt_2 VALUES ("new text 7", 7);
BEGIN;
INSERT INTO tt_2 VALUES ("new text 27", 27);
INSERT INTO nt_2(a, b) SELECT USER(), b FROM nt_1;
ERROR 23000: Duplicate entry '7' for key 'PRIMARY'
INSERT INTO tt_2 VALUES ("new text 28", 28);
ROLLBACK;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE nt_2
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE tt_2
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Query # # use `test`; INSERT INTO tt_2 VALUES ("new text 7", 7)
master-bin.000001 # Xid # # COMMIT /* XID */
*** "B INSERT M..SELECT* C" with an error in INSERT M...SELECT* generates
*** in the binlog the following entries: "B INSERT M..SELECT* R".
TRUNCATE TABLE nt_2;
TRUNCATE TABLE tt_2;
INSERT INTO tt_2 VALUES ("new text 7", 7);
BEGIN;
INSERT INTO nt_2(a, b) SELECT USER(), b FROM nt_1;
ERROR 23000: Duplicate entry '7' for key 'PRIMARY'
COMMIT;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE nt_2
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE tt_2
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Query # # use `test`; INSERT INTO tt_2 VALUES ("new text 7", 7)
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_2)
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
*** "B N N T C" generates in the binlog the "B N C B N C B T C" entries
TRUNCATE TABLE nt_1;
TRUNCATE TABLE tt_2;
BEGIN;
INSERT INTO nt_1 VALUES (USER(), 1);
INSERT INTO nt_1 VALUES (USER(), 2);
INSERT INTO tt_2 VALUES (USER(), 3);
COMMIT;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE nt_1
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE tt_2
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
*** "B N N T R" generates in the binlog the "B N C B N C B T R" entries
BEGIN;
INSERT INTO nt_1 VALUES (USER(), 4);
INSERT INTO nt_1 VALUES (USER(), 5);
INSERT INTO tt_2 VALUES (USER(), 6);
ROLLBACK;
Warnings:
Warning 1196 Some non-transactional changed tables couldn't be rolled back
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
*** "B N* N* T C" with error in N* generates in the binlog the "B N R B N R B T C" entries
BEGIN;
INSERT INTO nt_1 VALUES (USER(), 7), (USER(), 1);
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
INSERT INTO nt_1 VALUES (USER(), 8), (USER(), 1);
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
INSERT INTO tt_2 VALUES (USER(), 9);
COMMIT;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
*** "B N* N* T R" with error in N* generates in the binlog the "B N R B N R B T R" entries
BEGIN;
INSERT INTO nt_1 VALUES (USER(), 10), (USER(), 1);
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
INSERT INTO nt_1 VALUES (USER(), 11), (USER(), 1);
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
INSERT INTO tt_2 VALUES (USER(), 12);
ROLLBACK;
Warnings:
Warning 1196 Some non-transactional changed tables couldn't be rolled back
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
*** "B N N T N T C" generates in the binlog the "B N C B N C B T N T C" entries
BEGIN;
INSERT INTO nt_1 VALUES (USER(), 13);
INSERT INTO nt_1 VALUES (USER(), 14);
INSERT INTO tt_2 VALUES (USER(), 15);
INSERT INTO nt_1 VALUES (USER(), 16);
INSERT INTO tt_2 VALUES (USER(), 17);
COMMIT;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
*** "B N N T N T R" generates in the binlog the "B N C B N C B T N T R" entries
BEGIN;
INSERT INTO nt_1 VALUES (USER(), 18);
INSERT INTO nt_1 VALUES (USER(), 19);
INSERT INTO tt_2 VALUES (USER(), 20);
INSERT INTO nt_1 VALUES (USER(), 21);
INSERT INTO tt_2 VALUES (USER(), 22);
ROLLBACK;
Warnings:
Warning 1196 Some non-transactional changed tables couldn't be rolled back
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
###################################################################################
# CLEAN
###################################################################################
DROP TABLE tt_1;
DROP TABLE tt_2;
DROP TABLE nt_1;
DROP TABLE nt_2;

View File

@ -1309,3 +1309,27 @@ INSERT INTO test.t1 VALUES (1), (2);
CREATE TABLE test.t2 SELECT * FROM test.t1;
USE test;
DROP TABLES t1, t2;
RESET MASTER;
CREATE TABLE t1 (a INT PRIMARY KEY);
BINLOG '
3u9kSA8KAAAAZgAAAGoAAAABAAQANS4xLjM1LW1hcmlhLWJldGExLWRlYnVnLWxvZwAAAAAAAAAA
AAAAAAAAAAAAAAAAAADe72RIEzgNAAgAEgAEBAQEEgAAUwAEGggAAAAICAgC
';
INSERT INTO t1 VALUES (1);
BINLOG '
3u9kSBMUAAAAKQAAAJEBAAAAABoAAAAAAAAABHRlc3QAAnQxAAEDAAA=
3u9kSBcUAAAAIgAAALMBAAAQABoAAAAAAAEAAf/+AgAAAA==
';
SHOW BINLOG EVENTS;
Log_name Pos Event_type Server_id End_log_pos Info
# # Format_desc 1 # Server ver: #, Binlog ver: #
# # Query 1 # use `test`; CREATE TABLE t1 (a INT PRIMARY KEY)
# # Query 1 # BEGIN
# # Table_map 1 # table_id: # (test.t1)
# # Write_rows 1 # table_id: # flags: STMT_END_F
# # Query 1 # COMMIT
# # Query 1 # BEGIN
# # Table_map 1 # table_id: # (test.t1)
# # Write_rows 1 # table_id: # flags: STMT_END_F
# # Query 1 # COMMIT
DROP TABLE t1;

View File

@ -0,0 +1,440 @@
###################################################################################
# CONFIGURATION
###################################################################################
CREATE TABLE nt_1 (a text, b int PRIMARY KEY) ENGINE = MyISAM;
CREATE TABLE nt_2 (a text, b int PRIMARY KEY) ENGINE = MyISAM;
CREATE TABLE tt_1 (a text, b int PRIMARY KEY) ENGINE = Innodb;
CREATE TABLE tt_2 (a text, b int PRIMARY KEY) ENGINE = Innodb;
CREATE TRIGGER tr_i_tt_1_to_nt_1 BEFORE INSERT ON tt_1 FOR EACH ROW
BEGIN
INSERT INTO nt_1 VALUES (NEW.a, NEW.b);
END|
CREATE TRIGGER tr_i_nt_2_to_tt_2 BEFORE INSERT ON nt_2 FOR EACH ROW
BEGIN
INSERT INTO tt_2 VALUES (NEW.a, NEW.b);
END|
###################################################################################
# CHECK HISTORY IN BINLOG
###################################################################################
*** "B M* T C" with error in M* generates in the binlog the "B M* R B T C" entries
INSERT INTO nt_1 VALUES ("new text 1", 1);
BEGIN;
INSERT INTO tt_1 VALUES (USER(), 2), (USER(), 1);
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
INSERT INTO tt_2 VALUES ("new text 3", 3);
COMMIT;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_1)
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
INSERT INTO tt_2 VALUES ("new text 4", 4);
BEGIN;
INSERT INTO nt_2 VALUES (USER(), 5), (USER(), 4);
ERROR 23000: Duplicate entry '4' for key 'PRIMARY'
INSERT INTO tt_2 VALUES ("new text 6", 6);
COMMIT;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_2)
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
*** "B M M* T C" with error in M* generates in the binlog the "B M M* T C" entries
INSERT INTO nt_1 VALUES ("new text 10", 10);
BEGIN;
INSERT INTO tt_1 VALUES ("new text 7", 7), ("new text 8", 8);
INSERT INTO tt_1 VALUES (USER(), 9), (USER(), 10);
ERROR 23000: Duplicate entry '10' for key 'PRIMARY'
INSERT INTO tt_2 VALUES ("new text 11", 11);
COMMIT;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_1)
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Table_map # # table_id: # (test.tt_1)
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
INSERT INTO tt_2 VALUES ("new text 15", 15);
BEGIN;
INSERT INTO nt_2 VALUES ("new text 12", 12), ("new text 13", 13);
INSERT INTO nt_2 VALUES (USER(), 14), (USER(), 15);
ERROR 23000: Duplicate entry '15' for key 'PRIMARY'
INSERT INTO tt_2 VALUES ("new text 16", 16);
COMMIT;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_2)
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Table_map # # table_id: # (test.nt_2)
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
*** "B M* M* T C" with error in M* generates in the binlog the "B M* R B M* R B T C" entries
INSERT INTO nt_1 VALUES ("new text 18", 18);
INSERT INTO nt_1 VALUES ("new text 20", 20);
BEGIN;
INSERT INTO tt_1 VALUES (USER(), 17), (USER(), 18);
ERROR 23000: Duplicate entry '18' for key 'PRIMARY'
INSERT INTO tt_1 VALUES (USER(), 19), (USER(), 20);
ERROR 23000: Duplicate entry '20' for key 'PRIMARY'
INSERT INTO tt_2 VALUES ("new text 21", 21);
COMMIT;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_1)
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_1)
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
INSERT INTO tt_2 VALUES ("new text 23", 23);
INSERT INTO tt_2 VALUES ("new text 25", 25);
BEGIN;
INSERT INTO nt_2 VALUES (USER(), 22), (USER(), 23);
ERROR 23000: Duplicate entry '23' for key 'PRIMARY'
INSERT INTO nt_2 VALUES (USER(), 24), (USER(), 25);
ERROR 23000: Duplicate entry '25' for key 'PRIMARY'
INSERT INTO tt_2 VALUES ("new text 26", 26);
COMMIT;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_2)
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_2)
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
*** "B T INSERT M...SELECT* C" with an error in INSERT M...SELECT* generates
*** in the binlog the following entries: "Nothing".
*** There is a bug in that will be fixed after WL#2687. Please, check BUG#47175 for further details.
TRUNCATE TABLE nt_2;
TRUNCATE TABLE tt_2;
INSERT INTO tt_2 VALUES ("new text 7", 7);
BEGIN;
INSERT INTO tt_2 VALUES ("new text 27", 27);
INSERT INTO nt_2(a, b) SELECT USER(), b FROM nt_1;
ERROR 23000: Duplicate entry '7' for key 'PRIMARY'
INSERT INTO tt_2 VALUES ("new text 28", 28);
ROLLBACK;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE nt_2
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE tt_2
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
*** "B INSERT M..SELECT* C" with an error in INSERT M...SELECT* generates
*** in the binlog the following entries: "B INSERT M..SELECT* R".
TRUNCATE TABLE nt_2;
TRUNCATE TABLE tt_2;
INSERT INTO tt_2 VALUES ("new text 7", 7);
BEGIN;
INSERT INTO nt_2(a, b) SELECT USER(), b FROM nt_1;
ERROR 23000: Duplicate entry '7' for key 'PRIMARY'
COMMIT;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE nt_2
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE tt_2
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_2)
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: #
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
*** "B N N T C" generates in the binlog the "B N C B N C B T C" entries
TRUNCATE TABLE nt_1;
TRUNCATE TABLE tt_2;
BEGIN;
INSERT INTO nt_1 VALUES (USER(), 1);
INSERT INTO nt_1 VALUES (USER(), 2);
INSERT INTO tt_2 VALUES (USER(), 3);
COMMIT;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE nt_1
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE tt_2
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
*** "B N N T R" generates in the binlog the "B N C B N C B T R" entries
BEGIN;
INSERT INTO nt_1 VALUES (USER(), 4);
INSERT INTO nt_1 VALUES (USER(), 5);
INSERT INTO tt_2 VALUES (USER(), 6);
ROLLBACK;
Warnings:
Warning 1196 Some non-transactional changed tables couldn't be rolled back
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
*** "B N* N* T C" with error in N* generates in the binlog the "B N R B N R B T C" entries
BEGIN;
INSERT INTO nt_1 VALUES (USER(), 7), (USER(), 1);
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
INSERT INTO nt_1 VALUES (USER(), 8), (USER(), 1);
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
INSERT INTO tt_2 VALUES (USER(), 9);
COMMIT;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
*** "B N* N* T R" with error in N* generates in the binlog the "B N R B N R B T R" entries
BEGIN;
INSERT INTO nt_1 VALUES (USER(), 10), (USER(), 1);
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
INSERT INTO nt_1 VALUES (USER(), 11), (USER(), 1);
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
INSERT INTO tt_2 VALUES (USER(), 12);
ROLLBACK;
Warnings:
Warning 1196 Some non-transactional changed tables couldn't be rolled back
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
*** "B N N T N T C" generates in the binlog the "B N C B N C B T N T C" entries
BEGIN;
INSERT INTO nt_1 VALUES (USER(), 13);
INSERT INTO nt_1 VALUES (USER(), 14);
INSERT INTO tt_2 VALUES (USER(), 15);
INSERT INTO nt_1 VALUES (USER(), 16);
INSERT INTO tt_2 VALUES (USER(), 17);
COMMIT;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
*** "B N N T N T R" generates in the binlog the "B N C B N C B T N T R" entries
BEGIN;
INSERT INTO nt_1 VALUES (USER(), 18);
INSERT INTO nt_1 VALUES (USER(), 19);
INSERT INTO tt_2 VALUES (USER(), 20);
INSERT INTO nt_1 VALUES (USER(), 21);
INSERT INTO tt_2 VALUES (USER(), 22);
ROLLBACK;
Warnings:
Warning 1196 Some non-transactional changed tables couldn't be rolled back
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Table_map # # table_id: # (test.nt_1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Table_map # # table_id: # (test.tt_2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
###################################################################################
# CLEAN
###################################################################################
DROP TABLE tt_1;
DROP TABLE tt_2;
DROP TABLE nt_1;
DROP TABLE nt_2;

View File

@ -133,6 +133,10 @@ master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
insert into t1 values(11);
commit;
show binlog events from <binlog_start>;
@ -144,6 +148,8 @@ master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
@ -272,6 +278,10 @@ master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t2)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # use `test`; drop table t1,t2
master-bin.000001 # Query # # use `test`; create table t0 (n int)
master-bin.000001 # Query # # BEGIN
@ -372,7 +382,7 @@ master-bin.000001 # Query # # use `test`; DROP TABLE if exists t2
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # use `test`; DROP TABLE IF EXISTS t2
master-bin.000001 # Query # # use `test`; CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb
master-bin.000001 # Query # # BEGIN
@ -390,9 +400,7 @@ master-bin.000001 # Query # # use `test`; DROP TABLE t2
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
@ -400,7 +408,11 @@ master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # ROLLBACK
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # use `test`; TRUNCATE table t2
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t1)

View File

@ -0,0 +1,161 @@
Verbose statements from : write-partial-row.binlog
select replace(txt,'\r', '') as stmt from raw_binlog_rows where txt like '###%';
stmt
### INSERT INTO mysql.ndb_apply_status
### SET
### @1=1
### @2=25769803786
### @3=''
### @4=0
### @5=0
### INSERT INTO test.ba
### SET
### @1=3
### @2=3
### @3=3
### INSERT INTO test.ba
### SET
### @1=1
### @2=1
### @3=1
### INSERT INTO test.ba
### SET
### @1=2
### @2=2
### @3=2
### INSERT INTO test.ba
### SET
### @1=4
### @2=4
### @3=4
### INSERT INTO test.ba
### SET
### @1=4
### @3=40
### DELETE FROM test.ba
### WHERE
### @1=2
drop table raw_binlog_rows;
Verbose statements from : write-full-row.binlog
select replace(txt,'\r', '') as stmt from raw_binlog_rows where txt like '###%';
stmt
### INSERT INTO mysql.ndb_apply_status
### SET
### @1=2
### @2=25769803786
### @3=''
### @4=0
### @5=0
### INSERT INTO test.ba
### SET
### @1=3
### @2=3
### @3=3
### INSERT INTO test.ba
### SET
### @1=1
### @2=1
### @3=1
### INSERT INTO test.ba
### SET
### @1=2
### @2=2
### @3=2
### INSERT INTO test.ba
### SET
### @1=4
### @2=4
### @3=4
### INSERT INTO test.ba
### SET
### @1=4
### @2=4
### @3=40
### DELETE FROM test.ba
### WHERE
### @1=2
drop table raw_binlog_rows;
Verbose statements from : update-partial-row.binlog
select replace(txt,'\r', '') as stmt from raw_binlog_rows where txt like '###%';
stmt
### INSERT INTO mysql.ndb_apply_status
### SET
### @1=3
### @2=25769803786
### @3=''
### @4=0
### @5=0
### INSERT INTO test.ba
### SET
### @1=3
### @2=3
### @3=3
### INSERT INTO test.ba
### SET
### @1=1
### @2=1
### @3=1
### INSERT INTO test.ba
### SET
### @1=2
### @2=2
### @3=2
### INSERT INTO test.ba
### SET
### @1=4
### @2=4
### @3=4
### UPDATE test.ba
### WHERE
### @1=4
### @3=4
### SET
### @1=4
### @3=40
### DELETE FROM test.ba
### WHERE
### @1=2
drop table raw_binlog_rows;
Verbose statements from : update-full-row.binlog
select replace(txt,'\r', '') as stmt from raw_binlog_rows where txt like '###%';
stmt
### INSERT INTO mysql.ndb_apply_status
### SET
### @1=4
### @2=25769803786
### @3=''
### @4=0
### @5=0
### INSERT INTO test.ba
### SET
### @1=3
### @2=3
### @3=3
### INSERT INTO test.ba
### SET
### @1=1
### @2=1
### @3=1
### INSERT INTO test.ba
### SET
### @1=2
### @2=2
### @3=2
### INSERT INTO test.ba
### SET
### @1=4
### @2=4
### @3=4
### UPDATE test.ba
### WHERE
### @1=4
### @2=4
### @3=4
### SET
### @1=4
### @2=4
### @3=40
### DELETE FROM test.ba
### WHERE
### @1=2
drop table raw_binlog_rows;

View File

@ -784,3 +784,24 @@ INSERT INTO test.t1 VALUES (1), (2);
CREATE TABLE test.t2 SELECT * FROM test.t1;
USE test;
DROP TABLES t1, t2;
RESET MASTER;
CREATE TABLE t1 (a INT PRIMARY KEY);
BINLOG '
3u9kSA8KAAAAZgAAAGoAAAABAAQANS4xLjM1LW1hcmlhLWJldGExLWRlYnVnLWxvZwAAAAAAAAAA
AAAAAAAAAAAAAAAAAADe72RIEzgNAAgAEgAEBAQEEgAAUwAEGggAAAAICAgC
';
INSERT INTO t1 VALUES (1);
BINLOG '
3u9kSBMUAAAAKQAAAJEBAAAAABoAAAAAAAAABHRlc3QAAnQxAAEDAAA=
3u9kSBcUAAAAIgAAALMBAAAQABoAAAAAAAEAAf/+AgAAAA==
';
SHOW BINLOG EVENTS;
Log_name Pos Event_type Server_id End_log_pos Info
# # Format_desc 1 # Server ver: #, Binlog ver: #
# # Query 1 # use `test`; CREATE TABLE t1 (a INT PRIMARY KEY)
# # Query 1 # use `test`; INSERT INTO t1 VALUES (1)
# # Query 1 # BEGIN
# # Table_map 1 # table_id: # (test.t1)
# # Write_rows 1 # table_id: # flags: STMT_END_F
# # Query 1 # COMMIT
DROP TABLE t1;

Binary file not shown.

View File

@ -0,0 +1,4 @@
--source include/have_binlog_format_mixed.inc
--source include/have_innodb.inc
--source extra/binlog_tests/binlog_failure_mixing_engines.test

View File

@ -0,0 +1,4 @@
--source include/have_binlog_format_row.inc
--source include/have_innodb.inc
--source extra/binlog_tests/binlog_failure_mixing_engines.test

View File

@ -0,0 +1,86 @@
########################################################
# Test mysqlbinlog command with Ndb produced Binlog
# variants
#
# WHAT
# ====
# This test aims to check that the mysqlbinlog --verbose
# command can output binlogs in 4 format variants, currently
# used by Ndb
#
# 1) Updates logged as write_row events
# Only primary key and updated columns included in the
# event
# 2) Updates logged as write_row_events
# All columns included in the event
# 3) Updates logged as update_row events
# Only primary key and updated columns included in the
# event
# 4) Updates logged as update_row events
# All columns included in the event
#
# Format variant (1) is the Ndb default.
# Bug#47323 resulted in binlogs generated in format (1)
# being incorrectly parsed by the mysqlbinlog --verbose
# option
#
# HOW
# ===
# Row-based binlog files in each format have been
# captured from an Ndb cluster
# These are output using the mysqlbinlog --verbose
# tool and the output is checked.
#
########################################################
# We require binlog_format_row as we're independent of binlog format
# and there's no point running the same test 3 times
-- source include/have_binlog_format_row.inc
--disable_query_log
--let $binlog_file=write-partial-row.binlog
--exec $MYSQL_BINLOG --verbose suite/binlog/std_data/$binlog_file > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_verbose.sql
create table raw_binlog_rows (txt varchar(1000));
--eval load data local infile '$MYSQLTEST_VARDIR/tmp/mysqlbinlog_verbose.sql' into table raw_binlog_rows columns terminated by '\n';
--remove_file $MYSQLTEST_VARDIR/tmp/mysqlbinlog_verbose.sql
--enable_query_log
--echo Verbose statements from : $binlog_file
# Output --verbose lines, with extra Windows CR's trimmed
select replace(txt,'\r', '') as stmt from raw_binlog_rows where txt like '###%';
drop table raw_binlog_rows;
--disable_query_log
--let $binlog_file=write-full-row.binlog
--exec $MYSQL_BINLOG --verbose suite/binlog/std_data/$binlog_file > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_verbose.sql
create table raw_binlog_rows (txt varchar(1000));
--eval load data local infile '$MYSQLTEST_VARDIR/tmp/mysqlbinlog_verbose.sql' into table raw_binlog_rows columns terminated by '\n';
--remove_file $MYSQLTEST_VARDIR/tmp/mysqlbinlog_verbose.sql
--enable_query_log
--echo Verbose statements from : $binlog_file
# Output --verbose lines, with extra Windows CR's trimmed
select replace(txt,'\r', '') as stmt from raw_binlog_rows where txt like '###%';
drop table raw_binlog_rows;
--disable_query_log
--let $binlog_file=update-partial-row.binlog
--exec $MYSQL_BINLOG --verbose suite/binlog/std_data/$binlog_file > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_verbose.sql
create table raw_binlog_rows (txt varchar(1000));
--eval load data local infile '$MYSQLTEST_VARDIR/tmp/mysqlbinlog_verbose.sql' into table raw_binlog_rows columns terminated by '\n';
--remove_file $MYSQLTEST_VARDIR/tmp/mysqlbinlog_verbose.sql
--enable_query_log
--echo Verbose statements from : $binlog_file
# Output --verbose lines, with extra Windows CR's trimmed
select replace(txt,'\r', '') as stmt from raw_binlog_rows where txt like '###%';
drop table raw_binlog_rows;
--disable_query_log
--let $binlog_file=update-full-row.binlog
--exec $MYSQL_BINLOG --verbose suite/binlog/std_data/$binlog_file > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_verbose.sql
create table raw_binlog_rows (txt varchar(1000));
--eval load data local infile '$MYSQLTEST_VARDIR/tmp/mysqlbinlog_verbose.sql' into table raw_binlog_rows columns terminated by '\n';
--remove_file $MYSQLTEST_VARDIR/tmp/mysqlbinlog_verbose.sql
--enable_query_log
--echo Verbose statements from : $binlog_file
# Output --verbose lines, with extra Windows CR's trimmed
select replace(txt,'\r', '') as stmt from raw_binlog_rows where txt like '###%';
drop table raw_binlog_rows;

View File

@ -94,15 +94,24 @@ DROP TABLE t1;
SET GLOBAL log_warnings = @old_log_warnings;
let LOG_ERROR= `SELECT @@GLOBAL.log_error`;
let $log_error_= `SELECT @@GLOBAL.log_error`;
if(!`select LENGTH('$log_error_')`)
{
# MySQL Server on windows is started with --console and thus
# does not know the location of its .err log, use default location
let $log_error_ = $MYSQLTEST_VARDIR/log/mysqld.1.err;
}
# Assign env variable LOG_ERROR
let LOG_ERROR=$log_error_;
--echo # Count the number of times the "Unsafe" message was printed
--echo # to the error log.
perl;
$log_error= $ENV{'LOG_ERROR'};
use strict;
my $log_error= $ENV{'LOG_ERROR'} or die "LOG_ERROR not set";
open(FILE, "$log_error") or die("Unable to open $log_error: $!\n");
$count = () = grep(/Bug#46265/g,<FILE>);
my $count = () = grep(/Bug#46265/g,<FILE>);
print "Occurrences: $count\n";
close(FILE);
EOF

View File

@ -105,12 +105,6 @@ drop table t1;
--error ER_TOO_BIG_ROWSIZE
CREATE TABLE t1(c TEXT, PRIMARY KEY (c(440)))
ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII;
# 439 throws error with certain system zlib (ubuntu "intrepid")
# but not with zlib bundled with MySQL, because zlib's compressBound()
# are different (and used by InnoDB's page_zip_empty_size()); see
# http://www.linux-archive.org/archlinux-development/119356-zlib-1-2-3-3-1-a.html
# "Fix compressBound(), was low for some pathological cases [Fearnley]".
# 438 works with both zlib-s.
CREATE TABLE t1(c TEXT, PRIMARY KEY (c(438)))
ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII;
INSERT INTO t1 VALUES(REPEAT('A',512)),(REPEAT('B',512));

View File

@ -176,7 +176,7 @@ Log_name Pos Event_type Server_id End_log_pos Info
# 106 Query # 174 BEGIN
# 174 Table_map # 216 table_id: # (test.t7)
# 216 Write_rows # 272 table_id: # flags: STMT_END_F
# 272 Query # 343 ROLLBACK
# 272 Query # 341 COMMIT
SELECT * FROM t7 ORDER BY a,b;
a b
1 2
@ -327,7 +327,7 @@ Log_name Pos Event_type Server_id End_log_pos Info
# 1329 Query # 1397 BEGIN
# 1397 Table_map # 1438 table_id: # (test.t1)
# 1438 Write_rows # 1482 table_id: # flags: STMT_END_F
# 1482 Query # 1553 ROLLBACK
# 1482 Query # 1551 COMMIT
SHOW TABLES;
Tables_in_test
t1

View File

@ -102,3 +102,4 @@ Last_IO_Errno #
Last_IO_Error #
Last_SQL_Errno 0
Last_SQL_Error
DROP TABLE t1;

View File

@ -58,3 +58,4 @@ STOP SLAVE;
# cleanup
--connection master
DROP TABLE t1;
-- sync_slave_with_master

View File

@ -78,3 +78,7 @@ SELECT * FROM t1 ORDER BY a;
--replace_result $MASTER_MYPORT MASTER_PORT
--replace_column 1 # 7 # 8 # 9 # 22 # 23 # 33 # 35 # 36 #
query_vertical SHOW SLAVE STATUS;
-- connection master
DROP TABLE t1;
-- sync_slave_with_master

View File

@ -86,3 +86,8 @@ select hex(c1) as h, c1 from t1 order by c1, h;
select group_concat(hex(c1) order by hex(c1)) from t1 group by c1;
select group_concat(c1 order by hex(c1) SEPARATOR '') from t1 group by c1;
drop table t1;
--echo Bug#46448 trailing spaces are not ignored when user collation maps space != 0x20
set names latin1;
show collation like 'latin1_test';
select "foo" = "foo " collate latin1_test;

View File

@ -14,3 +14,4 @@ innodb_bug39438 : Bug#42383 2009-01-28 lsoares "This fails in embedded
query_cache_28249 : Bug#43861 2009-03-25 main.query_cache_28249 fails sporadically
partition_innodb_builtin : Bug#32430 2009-09-25 mattiasj Waiting for push of Innodb changes
partition_innodb_plugin : Bug#32430 2009-09-25 mattiasj Waiting for push of Innodb changes
innodb_bug46000 : Bug#47860 2009-10-16 satyab Test fails for innodb plugin 1.0.5

View File

@ -1006,3 +1006,51 @@ DROP TABLE t1;
###
--echo End of 5.0 tests
--echo #
--echo # BUG#47280 - strange results from count(*) with order by multiple
--echo # columns without where/group
--echo #
--echo #
--echo # Initialize test
--echo #
CREATE TABLE t1 (
pk INT NOT NULL,
i INT,
PRIMARY KEY (pk)
);
INSERT INTO t1 VALUES (1,11),(2,12),(3,13);
--echo #
--echo # Start test
--echo # All the following queries shall return 1 record
--echo #
--echo
--echo # Masking all correct values {11...13} for column i in this result.
--replace_column 2 #
SELECT MAX(pk) as max, i
FROM t1
ORDER BY max;
--echo
EXPLAIN
SELECT MAX(pk) as max, i
FROM t1
ORDER BY max;
--echo
--echo # Only 11 is correct for collumn i in this result
SELECT MAX(pk) as max, i
FROM t1
WHERE pk<2
ORDER BY max;
--echo #
--echo # Cleanup
--echo #
DROP TABLE t1;
--echo End of 5.1 tests

View File

@ -0,0 +1 @@
--innodb_lock_wait_timeout=2

View File

@ -0,0 +1,58 @@
-- source include/not_embedded.inc
-- source include/have_innodb.inc
--disable_warnings
drop table if exists t1;
--enable_warnings
# REPLACE INTO ... SELECT and INSERT INTO ... SELECT should do
# a consistent read of the source table.
connect (a,localhost,root,,);
connect (b,localhost,root,,);
connection a;
set session transaction isolation level read committed;
create table t1(a int not null) engine=innodb DEFAULT CHARSET=latin1;
create table t2 like t1;
insert into t2 values (1),(2),(3),(4),(5),(6),(7);
set autocommit=0;
# REPLACE INTO ... SELECT case
begin;
# this should not result in any locks on t2.
replace into t1 select * from t2;
connection b;
set session transaction isolation level read committed;
set autocommit=0;
# should not cuase a lock wait.
delete from t2 where a=5;
commit;
delete from t2;
commit;
connection a;
commit;
# INSERT INTO ... SELECT case
begin;
# this should not result in any locks on t2.
insert into t1 select * from t2;
connection b;
set session transaction isolation level read committed;
set autocommit=0;
# should not cuase a lock wait.
delete from t2 where a=5;
commit;
delete from t2;
commit;
connection a;
commit;
select * from t1;
drop table t1;
drop table t2;
connection default;
disconnect a;
disconnect b;

View File

@ -9,6 +9,7 @@
-- disable_result_log
# set packet size and reconnect
let $max_packet=`select @@global.max_allowed_packet`;
SET @@global.max_allowed_packet=16777216;
--connect (newconn, localhost, root,,)
@ -32,6 +33,7 @@ SELECT f4, f8 FROM bug34300;
DROP TABLE bug34300;
EVAL SET @@global.max_allowed_packet=$max_packet;
disconnect newconn;
connection default;
SET @@global.max_allowed_packet=default;

View File

@ -0,0 +1,21 @@
# This is the test for bug 44369. We should
# block table creation with columns match
# some innodb internal reserved key words,
# both case sensitively and insensitely.
--source include/have_innodb.inc
# This create table operation should fail.
--error ER_CANT_CREATE_TABLE
create table bug44369 (DB_ROW_ID int) engine=innodb;
# This create should fail as well
--error ER_CANT_CREATE_TABLE
create table bug44369 (db_row_id int) engine=innodb;
show errors;
--error ER_CANT_CREATE_TABLE
create table bug44369 (db_TRX_Id int) engine=innodb;
show errors;

View File

@ -0,0 +1,17 @@
#
# Bug#44571 InnoDB Plugin crashes on ADD INDEX
# http://bugs.mysql.com/44571
#
-- source include/have_innodb.inc
CREATE TABLE bug44571 (foo INT) ENGINE=InnoDB;
ALTER TABLE bug44571 CHANGE foo bar INT;
-- error ER_KEY_COLUMN_DOES_NOT_EXITS
ALTER TABLE bug44571 ADD INDEX bug44571b (foo);
# The following will fail, because the CHANGE foo bar was
# not communicated to InnoDB.
--error ER_NOT_KEYFILE
ALTER TABLE bug44571 ADD INDEX bug44571b (bar);
--error ER_NOT_KEYFILE
CREATE INDEX bug44571b ON bug44571 (bar);
DROP TABLE bug44571;

View File

@ -0,0 +1,34 @@
# This is the test for bug 46000. We shall
# block any index creation with the name of
# "GEN_CLUST_INDEX", which is the reserved
# name for innodb default primary index.
--source include/have_innodb.inc
# This 'create table' operation should fail because of
# using the reserve name as its index name.
--error ER_CANT_CREATE_TABLE
create table bug46000(`id` int,key `GEN_CLUST_INDEX`(`id`))engine=innodb;
# Mixed upper/lower case of the reserved key words
--error ER_CANT_CREATE_TABLE
create table bug46000(`id` int, key `GEN_clust_INDEX`(`id`))engine=innodb;
show errors;
create table bug46000(id int) engine=innodb;
# This 'create index' operation should fail.
--replace_regex /'[^']*test.#sql-[0-9a-f_]*'/'#sql-temporary'/
--error ER_CANT_CREATE_TABLE
create index GEN_CLUST_INDEX on bug46000(id);
--replace_regex /'[^']*test.#sql-[0-9a-f_]*'/'#sql-temporary'/
show errors;
# This 'create index' operation should succeed, no
# temp table left from last failed create index
# operation.
create index idx on bug46000(id);
drop table bug46000;

View File

@ -1518,5 +1518,36 @@ CREATE TABLE t3 select * from t1;
checksum table t3;
drop table t1,t2,t3;
#
# BUG#47073 - valgrind errs, corruption,failed repair of partition,
# low myisam_sort_buffer_size
#
CREATE TABLE t1(a INT, b CHAR(10), KEY(a), KEY(b));
INSERT INTO t1 VALUES(1,'0'),(2,'0'),(3,'0'),(4,'0'),(5,'0'),
(6,'0'),(7,'0');
INSERT INTO t1 SELECT a+10,b FROM t1;
INSERT INTO t1 SELECT a+20,b FROM t1;
INSERT INTO t1 SELECT a+40,b FROM t1;
INSERT INTO t1 SELECT a+80,b FROM t1;
INSERT INTO t1 SELECT a+160,b FROM t1;
INSERT INTO t1 SELECT a+320,b FROM t1;
INSERT INTO t1 SELECT a+640,b FROM t1;
INSERT INTO t1 SELECT a+1280,b FROM t1;
INSERT INTO t1 SELECT a+2560,b FROM t1;
INSERT INTO t1 SELECT a+5120,b FROM t1;
SET myisam_sort_buffer_size=4;
REPAIR TABLE t1;
# !!! Disabled until additional fix for BUG#47073 is pushed.
#SET myisam_repair_threads=2;
# May report different values depending on threads activity.
#--replace_regex /changed from [0-9]+/changed from #/
#REPAIR TABLE t1;
#SET myisam_repair_threads=@@global.myisam_repair_threads;
SET myisam_sort_buffer_size=@@global.myisam_sort_buffer_size;
DROP TABLE t1;
--echo End of 5.1 tests

View File

@ -1402,3 +1402,35 @@ SELECT DISTINCT a FROM t1 WHERE b = 1 ORDER BY c DESC LIMIT 0, 9;
SELECT DISTINCT a FROM t1 WHERE b = 1 ORDER BY c DESC LIMIT 0, 9;
DROP TABLE t1;
--echo #
--echo # Bug #43029: FORCE INDEX FOR ORDER BY is ignored when join buffering
--echo # is used
--echo #
CREATE TABLE t1 (a INT, b INT, KEY (a));
INSERT INTO t1 VALUES (0, NULL), (1, NULL), (2, NULL), (3, NULL);
INSERT INTO t1 SELECT a+4, b FROM t1;
INSERT INTO t1 SELECT a+8, b FROM t1;
CREATE TABLE t2 (a INT, b INT);
INSERT INTO t2 VALUES (0,NULL), (1,NULL), (2,NULL), (3,NULL), (4,NULL);
INSERT INTO t2 SELECT a+4, b FROM t2;
--echo # shouldn't have "using filesort"
EXPLAIN
SELECT * FROM t1 FORCE INDEX FOR ORDER BY (a), t2 WHERE t1.a < 2 ORDER BY t1.a;
--echo # should have "using filesort"
EXPLAIN
SELECT * FROM t1 USE INDEX FOR ORDER BY (a), t2 WHERE t1.a < 2 ORDER BY t1.a;
--echo # should have "using filesort"
EXPLAIN
SELECT * FROM t1 FORCE INDEX FOR JOIN (a), t2 WHERE t1.a < 2 ORDER BY t1.a;
DROP TABLE t1, t2;
--echo End of 5.1 tests

View File

@ -61,6 +61,19 @@ SELECT * FROM t1;
SHOW CREATE TABLE t1;
DROP TABLE t1;
#
# Bug#44059: rec_per_key on empty partition gives weird optimiser results
#
create table t1 (a int, b int, key(a))
partition by list (a)
( partition p0 values in (1),
partition p1 values in (2));
insert into t1 values (1,1),(2,1),(2,2),(2,3);
show indexes from t1;
analyze table t1;
show indexes from t1;
drop table t1;
#
# Bug#36001: Partitions: spelling and using some error messages
#

View File

@ -0,0 +1 @@
--open-files-limit=5 --max_connections=2 --table_open_cache=1

View File

@ -0,0 +1,26 @@
--source include/have_partition.inc
--disable_warnings
DROP TABLE IF EXISTS `t1`;
--enable_warnings
# On some platforms the lowest possible open_files_limit is too high...
let $max_open_files_limit= `SELECT @@open_files_limit > 511`;
if ($max_open_files_limit)
{
skip Need open_files_limit to be lower than 512;
}
#
--echo # Bug#46922: crash when adding partitions and open_files_limit is reached
#
CREATE TABLE t1 (a INT PRIMARY KEY)
ENGINE=MyISAM PARTITION BY KEY () PARTITIONS 1;
INSERT INTO t1 VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11);
--echo # if the bug exists, then crash will happen here
--replace_regex /file '.*'/file '<partition file>'/
--error 23
ALTER TABLE t1 ADD PARTITION PARTITIONS 511;
--sorted_result
SELECT * FROM t1;
DROP TABLE t1;

View File

@ -1046,3 +1046,128 @@ explain select * from t2 where a=1000 and b<11;
drop table t1, t2;
#
# Bug#42846: wrong result returned for range scan when using covering index
#
CREATE TABLE t1( a INT, b INT, KEY( a, b ) );
CREATE TABLE t2( a INT, b INT, KEY( a, b ) );
CREATE TABLE t3( a INT, b INT, KEY( a, b ) );
INSERT INTO t1( a, b )
VALUES (0, 1), (1, 2), (1, 4), (2, 3), (5, 0), (9, 7);
INSERT INTO t2( a, b )
VALUES ( 1, 1), ( 2, 1), ( 3, 1), ( 4, 1), ( 5, 1),
( 6, 1), ( 7, 1), ( 8, 1), ( 9, 1), (10, 1),
(11, 1), (12, 1), (13, 1), (14, 1), (15, 1),
(16, 1), (17, 1), (18, 1), (19, 1), (20, 1);
INSERT INTO t2 SELECT a, 2 FROM t2 WHERE b = 1;
INSERT INTO t2 SELECT a, 3 FROM t2 WHERE b = 1;
# To make range scan compelling to the optimizer
INSERT INTO t2 SELECT -1, -1 FROM t2;
INSERT INTO t2 SELECT -1, -1 FROM t2;
INSERT INTO t2 SELECT -1, -1 FROM t2;
INSERT INTO t3
VALUES (1, 0), (2, 0), (3, 0), (4, 0), (5, 0),
(6, 0), (7, 0), (8, 0), (9, 0), (10, 0);
# To make range scan compelling to the optimizer
INSERT INTO t3 SELECT * FROM t3 WHERE a = 10;
INSERT INTO t3 SELECT * FROM t3 WHERE a = 10;
#
# Problem#1 Test queries. Will give missing results unless Problem#1 is fixed.
# With one exception, they are independent of Problem#2.
#
SELECT * FROM t1 WHERE
3 <= a AND a < 5 OR
5 < a AND b = 3 OR
3 <= a;
EXPLAIN
SELECT * FROM t1 WHERE
3 <= a AND a < 5 OR
5 < a AND b = 3 OR
3 <= a;
# Query below: Tests both Problem#1 and Problem#2 (EXPLAIN differs as well)
SELECT * FROM t1 WHERE
3 <= a AND a < 5 OR
5 <= a AND b = 3 OR
3 <= a;
EXPLAIN
SELECT * FROM t1 WHERE
3 <= a AND a < 5 OR
5 <= a AND b = 3 OR
3 <= a;
SELECT * FROM t1 WHERE
3 <= a AND a <= 5 OR
5 <= a AND b = 3 OR
3 <= a;
EXPLAIN
SELECT * FROM t1 WHERE
3 <= a AND a <= 5 OR
5 <= a AND b = 3 OR
3 <= a;
SELECT * FROM t1 WHERE
3 <= a AND a <= 5 OR
3 <= a;
EXPLAIN
SELECT * FROM t1 WHERE
3 <= a AND a <= 5 OR
3 <= a;
#
# Problem#2 Test queries.
# These queries will give missing results if Problem#1 is fixed.
# But Problem#1 also hides this bug.
#
SELECT * FROM t2 WHERE
5 <= a AND a < 10 AND b = 1 OR
15 <= a AND a < 20 AND b = 3
OR
1 <= a AND b = 1;
EXPLAIN
SELECT * FROM t2 WHERE
5 <= a AND a < 10 AND b = 1 OR
15 <= a AND a < 20 AND b = 3
OR
1 <= a AND b = 1;
SELECT * FROM t2 WHERE
5 <= a AND a < 10 AND b = 2 OR
15 <= a AND a < 20 AND b = 3
OR
1 <= a AND b = 1;
EXPLAIN
SELECT * FROM t2 WHERE
5 <= a AND a < 10 AND b = 2 OR
15 <= a AND a < 20 AND b = 3
OR
1 <= a AND b = 1;
SELECT * FROM t3 WHERE
5 <= a AND a < 10 AND b = 3 OR
a < 5 OR
a < 10;
EXPLAIN
SELECT * FROM t3 WHERE
5 <= a AND a < 10 AND b = 3 OR
a < 5 OR
a < 10;
DROP TABLE t1, t2, t3;

View File

@ -397,6 +397,17 @@ insert into t2bit7 values (b'110011011111111');
select bin(a1) from t1bit7, t2bit7 where t1bit7.a1=t2bit7.b1;
drop table t1bit7, t2bit7;
--echo #
--echo # Bug42803: Field_bit does not have unsigned_flag field,
--echo # can lead to bad memory access
--echo #
CREATE TABLE t1 (a BIT(7), b BIT(9), KEY(a, b));
INSERT INTO t1 VALUES(0, 0), (5, 3), (5, 6), (6, 4), (7, 0);
EXPLAIN SELECT a+0, b+0 FROM t1 WHERE a > 4 and b < 7 ORDER BY 2;
DROP TABLE t1;
--echo End of 5.0 tests
#

View File

@ -27,7 +27,7 @@ SET(MYSYS_SOURCES array.c charset-def.c charset.c checksum.c default.c default_
errors.c hash.c list.c md5.c mf_brkhant.c mf_cache.c mf_dirname.c mf_fn_ext.c
mf_format.c mf_getdate.c mf_iocache.c mf_iocache2.c mf_keycache.c
mf_keycaches.c mf_loadpath.c mf_pack.c mf_path.c mf_qsort.c mf_qsort2.c
mf_radix.c mf_same.c mf_sort.c mf_soundex.c mf_strip.c mf_arr_appstr.c mf_tempdir.c
mf_radix.c mf_same.c mf_sort.c mf_soundex.c mf_arr_appstr.c mf_tempdir.c
mf_tempfile.c mf_unixpath.c mf_wcomp.c mf_wfile.c mulalloc.c my_access.c
my_aes.c my_alarm.c my_alloc.c my_append.c my_bit.c my_bitmap.c my_chsize.c
my_clock.c my_compress.c my_conio.c my_copy.c my_crc32.c my_create.c my_delete.c

View File

@ -35,7 +35,7 @@ libmysys_a_SOURCES = my_init.c my_getwd.c mf_getdate.c my_mmap.c \
my_error.c errors.c my_div.c my_messnc.c \
mf_format.c mf_same.c mf_dirname.c mf_fn_ext.c \
my_symlink.c my_symlink2.c \
mf_pack.c mf_unixpath.c mf_strip.c mf_arr_appstr.c \
mf_pack.c mf_unixpath.c mf_arr_appstr.c \
mf_wcomp.c mf_wfile.c my_gethwaddr.c \
mf_qsort.c mf_qsort2.c mf_sort.c \
ptr_cmp.c mf_radix.c queues.c my_getncpus.c \

View File

@ -1730,6 +1730,7 @@ restart:
- block assigned but not yet read from file (invalid data).
*/
#ifdef THREAD
if (keycache->in_resize)
{
/* This is a request during a resize operation */
@ -1971,6 +1972,9 @@ restart:
}
DBUG_RETURN(0);
}
#else /* THREAD */
DBUG_ASSERT(!keycache->in_resize);
#endif
if (page_status == PAGE_READ &&
(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH |

View File

@ -1,45 +0,0 @@
/* Copyright (C) 2000 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/* T|mmer en str{ng p{ slut_space */
#include "mysys_priv.h"
/*
strip_sp(char * str)
Strips end-space from string and returns new length.
*/
size_t strip_sp(register char * str)
{
reg2 char * found;
reg3 char * start;
start=found=str;
while (*str)
{
if (*str != ' ')
{
while (*++str && *str != ' ') {};
if (!*str)
return (size_t) (str-start); /* Return stringlength */
}
found=str;
while (*++str == ' ') {};
}
*found= '\0'; /* Stripp at first space */
return (size_t) (found-start);
} /* strip_sp */

View File

@ -16,7 +16,7 @@
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include)
SET(REGEX_SOURCES debug.c regcomp.c regerror.c regexec.c regfree.c reginit.c split.c)
SET(REGEX_SOURCES regcomp.c regerror.c regexec.c regfree.c reginit.c)
IF(NOT SOURCE_SUBLIBS)
ADD_LIBRARY(regex ${REGEX_SOURCES})

View File

@ -1337,10 +1337,10 @@ void ha_partition::cleanup_new_partition(uint part_count)
m_file= m_added_file;
m_added_file= NULL;
external_lock(ha_thd(), F_UNLCK);
/* delete_table also needed, a bit more complex */
close();
m_added_file= m_file;
m_file= save_m_file;
}
DBUG_VOID_RETURN;
@ -5144,8 +5144,9 @@ int ha_partition::info(uint flag)
If the handler doesn't support statistics, it should set all of the
above to 0.
We will allow the first handler to set the rec_per_key and use
this as an estimate on the total table.
We first scans through all partitions to get the one holding most rows.
We will then allow the handler with the most rows to set
the rec_per_key and use this as an estimate on the total table.
max_data_file_length: Maximum data file length
We ignore it, is only used in
@ -5157,14 +5158,33 @@ int ha_partition::info(uint flag)
ref_length: We set this to the value calculated
and stored in local object
create_time: Creation time of table
Set by first handler
So we calculate these constants by using the variables on the first
handler.
So we calculate these constants by using the variables from the
handler with most rows.
*/
handler *file;
handler *file, **file_array;
ulonglong max_records= 0;
uint32 i= 0;
uint32 handler_instance= 0;
file= m_file[0];
file_array= m_file;
do
{
file= *file_array;
/* Get variables if not already done */
if (!(flag & HA_STATUS_VARIABLE) ||
!bitmap_is_set(&(m_part_info->used_partitions),
(file_array - m_file)))
file->info(HA_STATUS_VARIABLE);
if (file->stats.records > max_records)
{
max_records= file->stats.records;
handler_instance= i;
}
i++;
} while (*(++file_array));
file= m_file[handler_instance];
file->info(HA_STATUS_CONST);
stats.create_time= file->stats.create_time;
ref_length= m_ref_length;

View File

@ -158,7 +158,7 @@ private:
class binlog_trx_data {
public:
binlog_trx_data()
: at_least_one_stmt(0), incident(FALSE), m_pending(0),
: at_least_one_stmt_committed(0), incident(FALSE), m_pending(0),
before_stmt_pos(MY_OFF_T_UNDEF)
{
trans_log.end_of_file= max_binlog_cache_size;
@ -187,7 +187,10 @@ public:
{
DBUG_PRINT("info", ("truncating to position %lu", (ulong) pos));
DBUG_PRINT("info", ("before_stmt_pos=%lu", (ulong) pos));
delete pending();
if (pending())
{
delete pending();
}
set_pending(0);
reinit_io_cache(&trans_log, WRITE_CACHE, pos, 0, 0);
trans_log.end_of_file= max_binlog_cache_size;
@ -197,12 +200,12 @@ public:
/*
The only valid positions that can be truncated to are at the
beginning of a statement. We are relying on this fact to be able
to set the at_least_one_stmt flag correctly. In other word, if
to set the at_least_one_stmt_committed flag correctly. In other word, if
we are truncating to the beginning of the transaction cache,
there will be no statements in the cache, otherwhise, we will
have at least one statement in the transaction cache.
*/
at_least_one_stmt= (pos > 0);
at_least_one_stmt_committed= (pos > 0);
}
/*
@ -244,7 +247,7 @@ public:
Boolean that is true if there is at least one statement in the
transaction cache.
*/
bool at_least_one_stmt;
bool at_least_one_stmt_committed;
bool incident;
private:
@ -1277,7 +1280,7 @@ static bool stmt_has_updated_trans_table(THD *thd)
{
Ha_trx_info *ha_info;
for (ha_info= thd->transaction.stmt.ha_list; ha_info; ha_info= ha_info->next())
for (ha_info= thd->transaction.stmt.ha_list; ha_info && ha_info->is_started(); ha_info= ha_info->next())
{
if (ha_info->is_trx_read_write() && ha_info->ht() != binlog_hton)
return (TRUE);
@ -1540,13 +1543,17 @@ static int binlog_commit(handlerton *hton, THD *thd, bool all)
YESNO(in_transaction),
YESNO(thd->transaction.all.modified_non_trans_table),
YESNO(thd->transaction.stmt.modified_non_trans_table)));
if (!in_transaction || all)
if (!in_transaction || all ||
(!all && !trx_data->at_least_one_stmt_committed &&
!stmt_has_updated_trans_table(thd) &&
thd->transaction.stmt.modified_non_trans_table))
{
Query_log_event qev(thd, STRING_WITH_LEN("COMMIT"), TRUE, TRUE, 0);
error= binlog_end_trans(thd, trx_data, &qev, all);
goto end;
}
trx_data->at_least_one_stmt_committed = my_b_tell(&trx_data->trans_log) > 0;
end:
if (!all)
trx_data->before_stmt_pos = MY_OFF_T_UNDEF; // part of the stmt commit
@ -1613,15 +1620,18 @@ static int binlog_rollback(handlerton *hton, THD *thd, bool all)
{
/*
We flush the cache with a rollback, wrapped in a beging/rollback if:
. aborting a transcation that modified a non-transactional table or;
. aborting a transaction that modified a non-transactional table;
. aborting a statement that modified both transactional and
non-transctional tables but which is not in the boundaries of any
transaction;
non-transactional tables but which is not in the boundaries of any
transaction or there was no early change;
. the OPTION_KEEP_LOG is activate.
*/
if ((all && thd->transaction.all.modified_non_trans_table) ||
(!all && thd->transaction.stmt.modified_non_trans_table &&
!(thd->options & (OPTION_BEGIN | OPTION_NOT_AUTOCOMMIT))) ||
(!all && thd->transaction.stmt.modified_non_trans_table &&
!trx_data->at_least_one_stmt_committed &&
thd->current_stmt_binlog_row_based) ||
((thd->options & OPTION_KEEP_LOG)))
{
Query_log_event qev(thd, STRING_WITH_LEN("ROLLBACK"), TRUE, TRUE, 0);

View File

@ -1853,6 +1853,7 @@ Rows_log_event::print_verbose_one_row(IO_CACHE *file, table_def *td,
{
const uchar *value0= value;
const uchar *null_bits= value;
uint null_bit_index= 0;
char typestr[64]= "";
value+= (m_width + 7) / 8;
@ -1861,7 +1862,8 @@ Rows_log_event::print_verbose_one_row(IO_CACHE *file, table_def *td,
for (size_t i= 0; i < td->size(); i ++)
{
int is_null= (null_bits[i / 8] >> (i % 8)) & 0x01;
int is_null= (null_bits[null_bit_index / 8]
>> (null_bit_index % 8)) & 0x01;
if (bitmap_is_set(cols_bitmap, i) == 0)
continue;
@ -1898,6 +1900,8 @@ Rows_log_event::print_verbose_one_row(IO_CACHE *file, table_def *td,
}
my_b_printf(file, "\n");
null_bit_index++;
}
return value - value0;
}
@ -3856,6 +3860,7 @@ bool Format_description_log_event::write(IO_CACHE* file)
#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
int Format_description_log_event::do_apply_event(Relay_log_info const *rli)
{
int ret= 0;
DBUG_ENTER("Format_description_log_event::do_apply_event");
#ifdef USING_TRANSACTIONS
@ -3897,17 +3902,21 @@ int Format_description_log_event::do_apply_event(Relay_log_info const *rli)
0, then 96, then jump to first really asked event (which is
>96). So this is ok.
*/
DBUG_RETURN(Start_log_event_v3::do_apply_event(rli));
ret= Start_log_event_v3::do_apply_event(rli);
}
DBUG_RETURN(0);
if (!ret)
{
/* Save the information describing this binlog */
delete rli->relay_log.description_event_for_exec;
const_cast<Relay_log_info *>(rli)->relay_log.description_event_for_exec= this;
}
DBUG_RETURN(ret);
}
int Format_description_log_event::do_update_pos(Relay_log_info *rli)
{
/* save the information describing this binlog */
delete rli->relay_log.description_event_for_exec;
rli->relay_log.description_event_for_exec= this;
if (server_id == (uint32) ::server_id)
{
/*
@ -7497,6 +7506,7 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli)
thd->reset_current_stmt_binlog_row_based();
const_cast<Relay_log_info*>(rli)->cleanup_context(thd, error);
thd->is_slave_error= 1;
DBUG_RETURN(error);
}
/*
This code would ideally be placed in do_update_pos() instead, but
@ -7525,6 +7535,14 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli)
const_cast<Relay_log_info*>(rli)->last_event_start_time= my_time(0);
}
if (get_flags(STMT_END_F))
if (error= rows_event_stmt_cleanup(rli, thd))
rli->report(ERROR_LEVEL, error,
"Error in %s event: commit of row events failed, "
"table `%s`.`%s`",
get_type_str(), m_table->s->db.str,
m_table->s->table_name.str);
DBUG_RETURN(error);
}
@ -7623,33 +7641,19 @@ Rows_log_event::do_update_pos(Relay_log_info *rli)
if (get_flags(STMT_END_F))
{
if ((error= rows_event_stmt_cleanup(rli, thd)) == 0)
{
/*
Indicate that a statement is finished.
Step the group log position if we are not in a transaction,
otherwise increase the event log position.
*/
rli->stmt_done(log_pos, when);
/*
Clear any errors pushed in thd->net.last_err* if for example "no key
found" (as this is allowed). This is a safety measure; apparently
those errors (e.g. when executing a Delete_rows_log_event of a
non-existing row, like in rpl_row_mystery22.test,
thd->net.last_error = "Can't find record in 't1'" and last_errno=1032)
do not become visible. We still prefer to wipe them out.
*/
thd->clear_error();
}
else
{
rli->report(ERROR_LEVEL, error,
"Error in %s event: commit of row events failed, "
"table `%s`.`%s`",
get_type_str(), m_table->s->db.str,
m_table->s->table_name.str);
}
/*
Indicate that a statement is finished.
Step the group log position if we are not in a transaction,
otherwise increase the event log position.
*/
rli->stmt_done(log_pos, when);
/*
Clear any errors in thd->net.last_err*. It is not known if this is
needed or not. It is believed that any errors that may exist in
thd->net.last_err* are allowed. Examples of errors are "key not
found", which is produced in the test case rpl_row_conflicts.test
*/
thd->clear_error();
}
else
{

View File

@ -1814,7 +1814,56 @@ int Old_rows_log_event::do_apply_event(Relay_log_info const *rli)
const_cast<Relay_log_info*>(rli)->last_event_start_time= my_time(0);
}
DBUG_RETURN(0);
if (get_flags(STMT_END_F))
{
/*
This is the end of a statement or transaction, so close (and
unlock) the tables we opened when processing the
Table_map_log_event starting the statement.
OBSERVER. This will clear *all* mappings, not only those that
are open for the table. There is not good handle for on-close
actions for tables.
NOTE. Even if we have no table ('table' == 0) we still need to be
here, so that we increase the group relay log position. If we didn't, we
could have a group relay log position which lags behind "forever"
(assume the last master's transaction is ignored by the slave because of
replicate-ignore rules).
*/
thd->binlog_flush_pending_rows_event(true);
/*
If this event is not in a transaction, the call below will, if some
transactional storage engines are involved, commit the statement into
them and flush the pending event to binlog.
If this event is in a transaction, the call will do nothing, but a
Xid_log_event will come next which will, if some transactional engines
are involved, commit the transaction and flush the pending event to the
binlog.
*/
if (error= ha_autocommit_or_rollback(thd, 0))
rli->report(ERROR_LEVEL, error,
"Error in %s event: commit of row events failed, "
"table `%s`.`%s`",
get_type_str(), m_table->s->db.str,
m_table->s->table_name.str);
/*
Now what if this is not a transactional engine? we still need to
flush the pending event to the binlog; we did it with
thd->binlog_flush_pending_rows_event(). Note that we imitate
what is done for real queries: a call to
ha_autocommit_or_rollback() (sometimes only if involves a
transactional engine), and a call to be sure to have the pending
event flushed.
*/
thd->reset_current_stmt_binlog_row_based();
const_cast<Relay_log_info*>(rli)->cleanup_context(thd, 0);
}
DBUG_RETURN(error);
}
@ -1844,71 +1893,18 @@ Old_rows_log_event::do_update_pos(Relay_log_info *rli)
if (get_flags(STMT_END_F))
{
/*
This is the end of a statement or transaction, so close (and
unlock) the tables we opened when processing the
Table_map_log_event starting the statement.
OBSERVER. This will clear *all* mappings, not only those that
are open for the table. There is not good handle for on-close
actions for tables.
NOTE. Even if we have no table ('table' == 0) we still need to be
here, so that we increase the group relay log position. If we didn't, we
could have a group relay log position which lags behind "forever"
(assume the last master's transaction is ignored by the slave because of
replicate-ignore rules).
*/
thd->binlog_flush_pending_rows_event(true);
Indicate that a statement is finished.
Step the group log position if we are not in a transaction,
otherwise increase the event log position.
*/
rli->stmt_done(log_pos, when);
/*
If this event is not in a transaction, the call below will, if some
transactional storage engines are involved, commit the statement into
them and flush the pending event to binlog.
If this event is in a transaction, the call will do nothing, but a
Xid_log_event will come next which will, if some transactional engines
are involved, commit the transaction and flush the pending event to the
binlog.
Clear any errors in thd->net.last_err*. It is not known if this is
needed or not. It is believed that any errors that may exist in
thd->net.last_err* are allowed. Examples of errors are "key not
found", which is produced in the test case rpl_row_conflicts.test
*/
error= ha_autocommit_or_rollback(thd, 0);
/*
Now what if this is not a transactional engine? we still need to
flush the pending event to the binlog; we did it with
thd->binlog_flush_pending_rows_event(). Note that we imitate
what is done for real queries: a call to
ha_autocommit_or_rollback() (sometimes only if involves a
transactional engine), and a call to be sure to have the pending
event flushed.
*/
thd->reset_current_stmt_binlog_row_based();
rli->cleanup_context(thd, 0);
if (error == 0)
{
/*
Indicate that a statement is finished.
Step the group log position if we are not in a transaction,
otherwise increase the event log position.
*/
rli->stmt_done(log_pos, when);
/*
Clear any errors pushed in thd->net.client_last_err* if for
example "no key found" (as this is allowed). This is a safety
measure; apparently those errors (e.g. when executing a
Delete_rows_log_event_old of a non-existing row, like in
rpl_row_mystery22.test, thd->net.last_error = "Can't
find record in 't1'" and last_errno=1032) do not become
visible. We still prefer to wipe them out.
*/
thd->clear_error();
}
else
rli->report(ERROR_LEVEL, error,
"Error in %s event: commit of row events failed, "
"table `%s`.`%s`",
get_type_str(), m_table->s->db.str,
m_table->s->table_name.str);
thd->clear_error();
}
else
{

View File

@ -2447,6 +2447,7 @@ inline void setup_table_map(TABLE *table, TABLE_LIST *table_list, uint tablenr)
table->tablenr= tablenr;
table->map= (table_map) 1 << tablenr;
table->force_index= table_list->force_index;
table->force_index_order= table->force_index_group= 0;
table->covering_keys= table->s->keys_for_keyread;
table->merge_keys.clear_all();
}

View File

@ -773,7 +773,6 @@ extern int wild_compare(const char *str,const char *wildstr,
extern WF_PACK *wf_comp(char * str);
extern int wf_test(struct wild_file_pack *wf_pack,const char *name);
extern void wf_end(struct wild_file_pack *buffer);
extern size_t strip_sp(char * str);
extern my_bool array_append_string_unique(const char *str,
const char **array, size_t size);
extern void get_date(char * to,int timeflag,time_t use_time);

View File

@ -5881,6 +5881,7 @@ get_mm_leaf(RANGE_OPT_PARAM *param, COND *conf_func, Field *field,
if (type == Item_func::LT_FUNC && (value->val_int() > 0))
type = Item_func::LE_FUNC;
else if (type == Item_func::GT_FUNC &&
(field->type() != FIELD_TYPE_BIT) &&
!((Field_num*)field)->unsigned_flag &&
!((Item_int*)value)->unsigned_flag &&
(value->val_int() < 0))
@ -5918,7 +5919,9 @@ get_mm_leaf(RANGE_OPT_PARAM *param, COND *conf_func, Field *field,
*/
if (field->result_type() == INT_RESULT &&
value->result_type() == INT_RESULT &&
((Field_num*)field)->unsigned_flag && !((Item_int*)value)->unsigned_flag)
((field->type() == FIELD_TYPE_BIT ||
((Field_num *) field)->unsigned_flag) &&
!((Item_int*) value)->unsigned_flag))
{
longlong item_val= value->val_int();
if (item_val < 0)
@ -6514,6 +6517,63 @@ get_range(SEL_ARG **e1,SEL_ARG **e2,SEL_ARG *root1)
}
/**
Combine two range expression under a common OR. On a logical level, the
transformation is key_or( expr1, expr2 ) => expr1 OR expr2.
Both expressions are assumed to be in the SEL_ARG format. In a logic sense,
theformat is reminiscent of DNF, since an expression such as the following
( 1 < kp1 < 10 AND p1 ) OR ( 10 <= kp2 < 20 AND p2 )
where there is a key consisting of keyparts ( kp1, kp2, ..., kpn ) and p1
and p2 are valid SEL_ARG expressions over keyparts kp2 ... kpn, is a valid
SEL_ARG condition. The disjuncts appear ordered by the minimum endpoint of
the first range and ranges must not overlap. It follows that they are also
ordered by maximum endpoints. Thus
( 1 < kp1 <= 2 AND ( kp2 = 2 OR kp2 = 3 ) ) OR kp1 = 3
Is a a valid SER_ARG expression for a key of at least 2 keyparts.
For simplicity, we will assume that expr2 is a single range predicate,
i.e. on the form ( a < x < b AND ... ). It is easy to generalize to a
disjunction of several predicates by subsequently call key_or for each
disjunct.
The algorithm iterates over each disjunct of expr1, and for each disjunct
where the first keypart's range overlaps with the first keypart's range in
expr2:
If the predicates are equal for the rest of the keyparts, or if there are
no more, the range in expr2 has its endpoints copied in, and the SEL_ARG
node in expr2 is deallocated. If more ranges became connected in expr1, the
surplus is also dealocated. If they differ, two ranges are created.
- The range leading up to the overlap. Empty if endpoints are equal.
- The overlapping sub-range. May be the entire range if they are equal.
Finally, there may be one more range if expr2's first keypart's range has a
greater maximum endpoint than the last range in expr1.
For the overlapping sub-range, we recursively call key_or. Thus in order to
compute key_or of
(1) ( 1 < kp1 < 10 AND 1 < kp2 < 10 )
(2) ( 2 < kp1 < 20 AND 4 < kp2 < 20 )
We create the ranges 1 < kp <= 2, 2 < kp1 < 10, 10 <= kp1 < 20. For the
first one, we simply hook on the condition for the second keypart from (1)
: 1 < kp2 < 10. For the second range 2 < kp1 < 10, key_or( 1 < kp2 < 10, 4
< kp2 < 20 ) is called, yielding 1 < kp2 < 20. For the last range, we reuse
the range 4 < kp2 < 20 from (2) for the second keypart. The result is thus
( 1 < kp1 <= 2 AND 1 < kp2 < 10 ) OR
( 2 < kp1 < 10 AND 1 < kp2 < 20 ) OR
( 10 <= kp1 < 20 AND 4 < kp2 < 20 )
*/
static SEL_ARG *
key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1,SEL_ARG *key2)
{
@ -6665,7 +6725,21 @@ key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1,SEL_ARG *key2)
key1=key1->tree_delete(save);
}
last->copy_min(tmp);
if (last->copy_min(key2) || last->copy_max(key2))
bool full_range= last->copy_min(key2);
if (!full_range)
{
if (last->next && key2->cmp_max_to_min(last->next) >= 0)
{
last->max_value= last->next->min_value;
if (last->next->min_flag & NEAR_MIN)
last->max_flag&= ~NEAR_MAX;
else
last->max_flag|= NEAR_MAX;
}
else
full_range= last->copy_max(key2);
}
if (full_range)
{ // Full range
key1->free_tree();
for (; key2 ; key2=key2->next)
@ -6675,8 +6749,6 @@ key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1,SEL_ARG *key2)
return 0;
}
}
key2=key2->next;
continue;
}
if (cmp >= 0 && tmp->cmp_min_to_min(key2) < 0)

View File

@ -97,7 +97,8 @@ static ulonglong get_exact_record_count(TABLE_LIST *tables)
@note
This function is only called for queries with sum functions and no
GROUP BY part.
GROUP BY part. This means that the result set shall contain a single
row only
@retval
0 no errors

View File

@ -2083,8 +2083,7 @@ static int has_temporary_error(THD *thd)
@retval 2 No error calling ev->apply_event(), but error calling
ev->update_pos().
*/
int apply_event_and_update_pos(Log_event* ev, THD* thd, Relay_log_info* rli,
bool skip)
int apply_event_and_update_pos(Log_event* ev, THD* thd, Relay_log_info* rli)
{
int exec_res= 0;
@ -2129,38 +2128,34 @@ int apply_event_and_update_pos(Log_event* ev, THD* thd, Relay_log_info* rli,
ev->when= my_time(0);
ev->thd = thd; // because up to this point, ev->thd == 0
if (skip)
{
int reason= ev->shall_skip(rli);
if (reason == Log_event::EVENT_SKIP_COUNT)
--rli->slave_skip_counter;
pthread_mutex_unlock(&rli->data_lock);
if (reason == Log_event::EVENT_SKIP_NOT)
exec_res= ev->apply_event(rli);
#ifndef DBUG_OFF
/*
This only prints information to the debug trace.
TODO: Print an informational message to the error log?
*/
static const char *const explain[] = {
// EVENT_SKIP_NOT,
"not skipped",
// EVENT_SKIP_IGNORE,
"skipped because event should be ignored",
// EVENT_SKIP_COUNT
"skipped because event skip counter was non-zero"
};
DBUG_PRINT("info", ("OPTION_BEGIN: %d; IN_STMT: %d",
thd->options & OPTION_BEGIN ? 1 : 0,
rli->get_flag(Relay_log_info::IN_STMT)));
DBUG_PRINT("skip_event", ("%s event was %s",
ev->get_type_str(), explain[reason]));
#endif
}
else
int reason= ev->shall_skip(rli);
if (reason == Log_event::EVENT_SKIP_COUNT)
--rli->slave_skip_counter;
pthread_mutex_unlock(&rli->data_lock);
if (reason == Log_event::EVENT_SKIP_NOT)
exec_res= ev->apply_event(rli);
#ifndef DBUG_OFF
/*
This only prints information to the debug trace.
TODO: Print an informational message to the error log?
*/
static const char *const explain[] = {
// EVENT_SKIP_NOT,
"not skipped",
// EVENT_SKIP_IGNORE,
"skipped because event should be ignored",
// EVENT_SKIP_COUNT
"skipped because event skip counter was non-zero"
};
DBUG_PRINT("info", ("OPTION_BEGIN: %d; IN_STMT: %d",
thd->options & OPTION_BEGIN ? 1 : 0,
rli->get_flag(Relay_log_info::IN_STMT)));
DBUG_PRINT("skip_event", ("%s event was %s",
ev->get_type_str(), explain[reason]));
#endif
DBUG_PRINT("info", ("apply_event error = %d", exec_res));
if (exec_res == 0)
{
@ -2279,7 +2274,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli)
delete ev;
DBUG_RETURN(1);
}
exec_res= apply_event_and_update_pos(ev, thd, rli, TRUE);
exec_res= apply_event_and_update_pos(ev, thd, rli);
/*
Format_description_log_event should not be deleted because it will be

View File

@ -190,8 +190,7 @@ int purge_relay_logs(Relay_log_info* rli, THD *thd, bool just_reset,
void set_slave_thread_options(THD* thd);
void set_slave_thread_default_charset(THD *thd, Relay_log_info const *rli);
void rotate_relay_log(Master_info* mi);
int apply_event_and_update_pos(Log_event* ev, THD* thd, Relay_log_info* rli,
bool skip);
int apply_event_and_update_pos(Log_event* ev, THD* thd, Relay_log_info* rli);
pthread_handler_t handle_slave_io(void *arg);
pthread_handler_t handle_slave_sql(void *arg);

View File

@ -2311,7 +2311,8 @@ bool reopen_name_locked_table(THD* thd, TABLE_LIST* table_list, bool link_in)
table->tablenr=thd->current_tablenr++;
table->used_fields=0;
table->const_table=0;
table->null_row= table->maybe_null= table->force_index= 0;
table->null_row= table->maybe_null= 0;
table->force_index= table->force_index_order= table->force_index_group= 0;
table->status=STATUS_NO_RECORD;
DBUG_RETURN(FALSE);
}
@ -2969,7 +2970,8 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
table->tablenr=thd->current_tablenr++;
table->used_fields=0;
table->const_table=0;
table->null_row= table->maybe_null= table->force_index= 0;
table->null_row= table->maybe_null= 0;
table->force_index= table->force_index_order= table->force_index_group= 0;
table->status=STATUS_NO_RECORD;
table->insert_values= 0;
table->fulltext_searched= 0;

View File

@ -56,17 +56,20 @@ void mysql_client_binlog_statement(THD* thd)
Format_description_event.
*/
my_bool have_fd_event= TRUE;
if (!thd->rli_fake)
int err;
Relay_log_info *rli;
rli= thd->rli_fake;
if (!rli)
{
thd->rli_fake= new Relay_log_info;
rli= thd->rli_fake= new Relay_log_info;
#ifdef HAVE_purify
thd->rli_fake->is_fake= TRUE;
rli->is_fake= TRUE;
#endif
have_fd_event= FALSE;
}
if (thd->rli_fake && !thd->rli_fake->relay_log.description_event_for_exec)
if (rli && !rli->relay_log.description_event_for_exec)
{
thd->rli_fake->relay_log.description_event_for_exec=
rli->relay_log.description_event_for_exec=
new Format_description_log_event(4);
have_fd_event= FALSE;
}
@ -78,16 +81,16 @@ void mysql_client_binlog_statement(THD* thd)
/*
Out of memory check
*/
if (!(thd->rli_fake &&
thd->rli_fake->relay_log.description_event_for_exec &&
if (!(rli &&
rli->relay_log.description_event_for_exec &&
buf))
{
my_error(ER_OUTOFMEMORY, MYF(0), 1); /* needed 1 bytes */
goto end;
}
thd->rli_fake->sql_thd= thd;
thd->rli_fake->no_storage= TRUE;
rli->sql_thd= thd;
rli->no_storage= TRUE;
for (char const *strptr= thd->lex->comment.str ;
strptr < thd->lex->comment.str + thd->lex->comment.length ; )
@ -170,8 +173,7 @@ void mysql_client_binlog_statement(THD* thd)
}
ev= Log_event::read_log_event(bufptr, event_len, &error,
thd->rli_fake->relay_log.
description_event_for_exec);
rli->relay_log.description_event_for_exec);
DBUG_PRINT("info",("binlog base64 err=%s", error));
if (!ev)
@ -209,18 +211,10 @@ void mysql_client_binlog_statement(THD* thd)
reporting.
*/
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
if (apply_event_and_update_pos(ev, thd, thd->rli_fake, FALSE))
{
delete ev;
/*
TODO: Maybe a better error message since the BINLOG statement
now contains several events.
*/
my_error(ER_UNKNOWN_ERROR, MYF(0), "Error executing BINLOG statement");
goto end;
}
err= ev->apply_event(rli);
#else
err= 0;
#endif
/*
Format_description_log_event should not be deleted because it
will be used to read info about the relay log's format; it
@ -228,8 +222,17 @@ void mysql_client_binlog_statement(THD* thd)
i.e. when this thread terminates.
*/
if (ev->get_type_code() != FORMAT_DESCRIPTION_EVENT)
delete ev;
delete ev;
ev= 0;
if (err)
{
/*
TODO: Maybe a better error message since the BINLOG statement
now contains several events.
*/
my_error(ER_UNKNOWN_ERROR, MYF(0), "Error executing BINLOG statement");
goto end;
}
}
}
@ -238,7 +241,7 @@ void mysql_client_binlog_statement(THD* thd)
my_ok(thd);
end:
thd->rli_fake->clear_tables_to_lock();
rli->clear_tables_to_lock();
my_free(buf, MYF(MY_ALLOW_ZERO_PTR));
DBUG_VOID_RETURN;
}

View File

@ -2649,7 +2649,32 @@ public:
MI_COLUMNDEF *recinfo,*start_recinfo;
KEY *keyinfo;
ha_rows end_write_records;
uint field_count,sum_func_count,func_count;
/**
Number of normal fields in the query, including those referred to
from aggregate functions. Hence, "SELECT `field1`,
SUM(`field2`) from t1" sets this counter to 2.
@see count_field_types
*/
uint field_count;
/**
Number of fields in the query that have functions. Includes both
aggregate functions (e.g., SUM) and non-aggregates (e.g., RAND).
Also counts functions referred to from aggregate functions, i.e.,
"SELECT SUM(RAND())" sets this counter to 2.
@see count_field_types
*/
uint func_count;
/**
Number of fields in the query that have aggregate functions. Note
that the optimizer may choose to optimize away these fields by
replacing them with constants, in which case sum_func_count will
need to be updated.
@see opt_sum_query, count_field_types
*/
uint sum_func_count;
uint hidden_field_count;
uint group_parts,group_length,group_null_parts;
uint quick_group;

View File

@ -6335,6 +6335,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
ptr->table_name_length=table->table.length;
ptr->lock_type= lock_type;
ptr->updating= test(table_options & TL_OPTION_UPDATING);
/* TODO: remove TL_OPTION_FORCE_INDEX as it looks like it's not used */
ptr->force_index= test(table_options & TL_OPTION_FORCE_INDEX);
ptr->ignore_leaves= test(table_options & TL_OPTION_IGNORE_LEAVES);
ptr->derived= table->sel;

View File

@ -648,8 +648,11 @@ JOIN::prepare(Item ***rref_pointer_array,
this->group= group_list != 0;
unit= unit_arg;
if (tmp_table_param.sum_func_count && !group_list)
implicit_grouping= TRUE;
#ifdef RESTRICTED_GROUP
if (sum_func_count && !group_list && (func_count || field_count))
if (implicit_grouping)
{
my_message(ER_WRONG_SUM_SELECT,ER(ER_WRONG_SUM_SELECT),MYF(0));
goto err;
@ -885,15 +888,23 @@ JOIN::optimize()
}
#endif
/* Optimize count(*), min() and max() */
if (tables_list && tmp_table_param.sum_func_count && ! group_list)
/*
Try to optimize count(*), min() and max() to const fields if
there is implicit grouping (aggregate functions but no
group_list). In this case, the result set shall only contain one
row.
*/
if (tables_list && implicit_grouping)
{
int res;
/*
opt_sum_query() returns HA_ERR_KEY_NOT_FOUND if no rows match
to the WHERE conditions,
or 1 if all items were resolved,
or 1 if all items were resolved (optimized away),
or 0, or an error number HA_ERR_...
If all items were resolved by opt_sum_query, there is no need to
open any tables.
*/
if ((res=opt_sum_query(select_lex->leaf_tables, all_fields, conds)))
{
@ -1239,13 +1250,22 @@ JOIN::optimize()
sort_and_group= 0;
}
// Can't use sort on head table if using row cache
// Can't use sort on head table if using join buffering
if (full_join)
{
if (group_list)
simple_group=0;
if (order)
simple_order=0;
TABLE *stable= (sort_by_table == (TABLE *) 1 ?
join_tab[const_tables].table : sort_by_table);
/*
FORCE INDEX FOR ORDER BY can be used to prevent join buffering when
sorting on the first table.
*/
if (!stable || !stable->force_index_order)
{
if (group_list)
simple_group= 0;
if (order)
simple_order= 0;
}
}
/*
@ -2035,7 +2055,8 @@ JOIN::exec()
count_field_types(select_lex, &curr_join->tmp_table_param,
*curr_all_fields, 0);
if (curr_join->group || curr_join->tmp_table_param.sum_func_count ||
if (curr_join->group || curr_join->implicit_grouping ||
curr_join->tmp_table_param.sum_func_count ||
(procedure && (procedure->flags & PROC_GROUP)))
{
if (make_group_fields(this, curr_join))
@ -10905,6 +10926,12 @@ Next_select_func setup_end_select_func(JOIN *join)
}
else
{
/*
Choose method for presenting result to user. Use end_send_group
if the query requires grouping (has a GROUP BY clause and/or one or
more aggregate functions). Use end_send if the query should not
be grouped.
*/
if ((join->sort_and_group ||
(join->procedure && join->procedure->flags & PROC_GROUP)) &&
!tmp_tbl->precomputed_group_by)

View File

@ -283,7 +283,14 @@ public:
TABLE **table,**all_tables,*sort_by_table;
uint tables,const_tables;
uint send_group_parts;
bool sort_and_group,first_record,full_join,group, no_field_update;
/**
Indicates that grouping will be performed on the result set during
query execution. This field belongs to query execution.
@see make_group_fields, alloc_group_fields, JOIN::exec
*/
bool sort_and_group;
bool first_record,full_join,group, no_field_update;
bool do_send_rows;
/**
TRUE when we want to resume nested loop iterations when
@ -362,6 +369,8 @@ public:
simple_xxxxx is set if ORDER/GROUP BY doesn't include any references
to other tables than the first non-constant table in the JOIN.
It's also set if ORDER/GROUP BY is empty.
Used for deciding for or against using a temporary table to compute
GROUP/ORDER BY.
*/
bool simple_order, simple_group;
/**
@ -431,6 +440,7 @@ public:
tables= 0;
const_tables= 0;
join_list= 0;
implicit_grouping= FALSE;
sort_and_group= 0;
first_record= 0;
do_send_rows= 1;
@ -536,6 +546,11 @@ public:
select_lex == unit->fake_select_lex));
}
private:
/**
TRUE if the query contains an aggregate function but has no GROUP
BY clause.
*/
bool implicit_grouping;
bool make_simple_join(JOIN *join, TABLE *tmp_table);
};

View File

@ -4638,7 +4638,8 @@ Item_subselect *TABLE_LIST::containing_subselect()
(TABLE_LIST::index_hints). Using the information in this tagged list
this function sets the members st_table::keys_in_use_for_query,
st_table::keys_in_use_for_group_by, st_table::keys_in_use_for_order_by,
st_table::force_index and st_table::covering_keys.
st_table::force_index, st_table::force_index_order,
st_table::force_index_group and st_table::covering_keys.
Current implementation of the runtime does not allow mixing FORCE INDEX
and USE INDEX, so this is checked here. Then the FORCE INDEX list
@ -4766,14 +4767,28 @@ bool TABLE_LIST::process_index_hints(TABLE *tbl)
}
/* process FORCE INDEX as USE INDEX with a flag */
if (!index_order[INDEX_HINT_FORCE].is_clear_all())
{
tbl->force_index_order= TRUE;
index_order[INDEX_HINT_USE].merge(index_order[INDEX_HINT_FORCE]);
}
if (!index_group[INDEX_HINT_FORCE].is_clear_all())
{
tbl->force_index_group= TRUE;
index_group[INDEX_HINT_USE].merge(index_group[INDEX_HINT_FORCE]);
}
/*
TODO: get rid of tbl->force_index (on if any FORCE INDEX is specified) and
create tbl->force_index_join instead.
Then use the correct force_index_XX instead of the global one.
*/
if (!index_join[INDEX_HINT_FORCE].is_clear_all() ||
!index_order[INDEX_HINT_FORCE].is_clear_all() ||
!index_group[INDEX_HINT_FORCE].is_clear_all())
tbl->force_index_group || tbl->force_index_order)
{
tbl->force_index= TRUE;
index_join[INDEX_HINT_USE].merge(index_join[INDEX_HINT_FORCE]);
index_order[INDEX_HINT_USE].merge(index_order[INDEX_HINT_FORCE]);
index_group[INDEX_HINT_USE].merge(index_group[INDEX_HINT_FORCE]);
}
/* apply USE INDEX */

View File

@ -752,6 +752,18 @@ struct st_table {
bytes, it would take up 4.
*/
my_bool force_index;
/**
Flag set when the statement contains FORCE INDEX FOR ORDER BY
See TABLE_LIST::process_index_hints().
*/
my_bool force_index_order;
/**
Flag set when the statement contains FORCE INDEX FOR GROUP BY
See TABLE_LIST::process_index_hints().
*/
my_bool force_index_group;
my_bool distinct,const_table,no_rows;
/**

View File

@ -16,8 +16,21 @@
# This is the CMakeLists for InnoDB Plugin
# TODO: remove the two FLAGS_DEBUG settings when merging into
# 6.0-based trees, like is already the case for other engines in
# those trees.
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
INCLUDE("${PROJECT_SOURCE_DIR}/storage/mysql_storage_engine.cmake")
# Starting at 5.1.38, MySQL CMake files are simplified. But the plugin
# CMakeLists.txt still needs to work with previous versions of MySQL.
IF (MYSQL_VERSION_ID GREATER "50137")
INCLUDE("${PROJECT_SOURCE_DIR}/storage/mysql_storage_engine.cmake")
ENDIF (MYSQL_VERSION_ID GREATER "50137")
IF (CMAKE_SIZEOF_VOID_P MATCHES 8)
SET(WIN64 TRUE)
ENDIF (CMAKE_SIZEOF_VOID_P MATCHES 8)
# Include directories under innobase
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/storage/innobase/include
@ -32,10 +45,10 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include
# Removing compiler optimizations for innodb/mem/* files on 64-bit Windows
# due to 64-bit compiler error, See MySQL Bug #19424, #36366, #34297
IF(MSVC AND CMAKE_SIZEOF_VOID_P MATCHES 8)
IF (MSVC AND $(WIN64))
SET_SOURCE_FILES_PROPERTIES(mem/mem0mem.c mem/mem0pool.c
PROPERTIES COMPILE_FLAGS -Od)
ENDIF(MSVC AND CMAKE_SIZEOF_VOID_P MATCHES 8)
ENDIF (MSVC AND $(WIN64))
SET(INNOBASE_SOURCES btr/btr0btr.c btr/btr0cur.c btr/btr0pcur.c btr/btr0sea.c
buf/buf0buddy.c buf/buf0buf.c buf/buf0flu.c buf/buf0lru.c buf/buf0rea.c
@ -70,5 +83,5 @@ SET(INNOBASE_SOURCES btr/btr0btr.c btr/btr0cur.c btr/btr0pcur.c btr/btr0sea.c
usr/usr0sess.c
ut/ut0byte.c ut/ut0dbg.c ut/ut0mem.c ut/ut0rnd.c ut/ut0ut.c ut/ut0vec.c
ut/ut0list.c ut/ut0wqueue.c)
ADD_DEFINITIONS(-DHAVE_WINDOWS_ATOMICS -DINNODB_RW_LOCKS_USE_ATOMICS -DIB_HAVE_PAUSE_INSTRUCTION)
MYSQL_STORAGE_ENGINE(INNOBASE)
ADD_DEFINITIONS(-DHAVE_WINDOWS_ATOMICS -DIB_HAVE_PAUSE_INSTRUCTION)
MYSQL_STORAGE_ENGINE(INNOBASE)

View File

@ -1,3 +1,149 @@
2009-10-01 The InnoDB Team
* fsp/fsp0fsp.c, row/row0merge.c:
Clean up after a crash during DROP INDEX. When InnoDB crashes
while dropping an index, ensure that the index will be completely
dropped during crash recovery. The MySQL .frm file may still
contain the dropped index, but there is little that we can do
about it.
2009-09-28 The InnoDB Team
* handler/ha_innodb.cc:
When a secondary index exists in the MySQL .frm file but not in
the InnoDB data dictionary, return an error instead of letting an
assertion fail in index_read.
2009-09-28 The InnoDB Team
* btr/btr0btr.c, buf/buf0buf.c, include/page0page.h,
include/page0zip.h, page/page0cur.c, page/page0page.c,
page/page0zip.c:
Do not write to PAGE_INDEX_ID when restoring an uncompressed page
after a compression failure. The field should only be written
when creating a B-tree page. This fix addresses a race condition
in a debug assertion.
2009-09-28 The InnoDB Team
* fil/fil0fil.c:
Try to prevent the reuse of tablespace identifiers after InnoDB
has crashed during table creation. Also, refuse to start if files
with duplicate tablespace identifiers are encountered.
2009-09-25 The InnoDB Team
* include/os0file.h, os/os0file.c:
Fix Bug#47055 unconditional exit(1) on ERROR_WORKING_SET_QUOTA
1453 (0x5AD) for InnoDB backend
2009-09-19 The InnoDB Team
* handler/ha_innodb.cc, mysql-test/innodb-consistent-master.opt,
mysql-test/innodb-consistent.result,
mysql-test/innodb-consistent.test:
Fix Bug#37232 Innodb might get too many read locks for DML with
repeatable-read
2009-09-19 The InnoDB Team
* fsp/fsp0fsp.c:
Fix Bug#31183 Tablespace full problems not reported in error log,
error message unclear
2009-09-17 The InnoDB Team
* mysql-test/innodb-zip.result, mysql-test/innodb-zip.test:
Make the test pass with zlib 1.2.3.3. Apparently, the definition
of compressBound() has changed between zlib versions, and the
maximum record size of a table with 1K compressed page size has
been reduced by one byte. This is an arbitrary test. In practical
applications, for good write performance, the compressed page size
should be chosen to be bigger than the absolute minimum.
2009-09-16 The InnoDB Team
* handler/ha_innodb.cc:
Fix Bug#46256 drop table with unknown collation crashes innodb
2009-09-16 The InnoDB Team
* dict/dict0dict.c, handler/ha_innodb.cc,
mysql-test/innodb_bug44369.result, mysql-test/innodb_bug44369.test,
row/row0mysql.c:
Fix Bug#44369 InnoDB: Does not uniformly disallow disallowed column
names
2009-09-16 The InnoDB Team
* handler/ha_innodb.cc, include/db0err.h,
mysql-test/innodb_bug46000.result, mysql-test/innodb_bug46000.test:
Fix Bug#46000 using index called GEN_CLUST_INDEX crashes server
2009-09-02 The InnoDB Team
* include/lock0lock.h, include/row0mysql.h, lock/lock0lock.c,
row/row0mysql.c:
Fix a regression introduced by the fix for MySQL bug#26316. We check
whether a transaction holds any AUTOINC locks before we acquire
the kernel mutex and release those locks.
2009-08-27 The InnoDB Team
* dict/dict0dict.c, include/dict0dict.h,
mysql-test/innodb_bug44571.result, mysql-test/innodb_bug44571.test:
Fix Bug#44571 InnoDB Plugin crashes on ADD INDEX
2009-08-27 The InnoDB Team
* row/row0merge.c:
Fix a bug in the merge sort that can corrupt indexes in fast index
creation. Add some consistency checks. Check that the number of
records remains constant in every merge sort pass.
2009-08-27 The InnoDB Team
* buf/buf0buf.c, buf/buf0lru.c, buf/buf0rea.c, handler/ha_innodb.cc,
include/buf0buf.h, include/buf0buf.ic, include/buf0lru.h,
include/ut0ut.h, ut/ut0ut.c:
Make it possible to tune the buffer pool LRU eviction policy to be
more resistant against index scans. Introduce the settable global
variables innodb_old_blocks_pct and innodb_old_blocks_time for
controlling the buffer pool eviction policy. The parameter
innodb_old_blocks_pct (5..95) controls the desired amount of "old"
blocks in the LRU list. The default is 37, corresponding to the
old fixed ratio of 3/8. Each time a block is accessed, it will be
moved to the "new" blocks if its first access was at least
innodb_old_blocks_time milliseconds ago (default 0, meaning every
block). The idea is that in index scans, blocks will be accessed
a few times within innodb_old_blocks_time, and they will remain in
the "old" section of the LRU list. Thus, when innodb_old_blocks_time
is nonzero, blocks retrieved for one-time index scans will be more
likely candidates for eviction than blocks that are accessed in
random patterns.
2009-08-26 The InnoDB Team
* handler/ha_innodb.cc, os/os0file.c:
Fix Bug#42885 buf_read_ahead_random, buf_read_ahead_linear counters,
thread wakeups
2009-08-20 The InnoDB Team
* lock/lock0lock.c:
Fix Bug#46650 Innodb assertion autoinc_lock == lock in
lock_table_remove_low on INSERT SELECT
2009-08-13 The InnoDB Team
* handler/handler0alter.cc:
Fix Bug#46657 InnoDB plugin: invalid read in index_merge_innodb test
(Valgrind)
2009-08-11 The InnoDB Team
InnoDB Plugin 1.0.4 released
2009-07-20 The InnoDB Team
* buf/buf0rea.c, handler/ha_innodb.cc, include/srv0srv.h,

View File

@ -31,7 +31,6 @@ DEFS= @DEFS@
noinst_HEADERS= \
handler/ha_innodb.h \
handler/handler0vars.h \
handler/i_s.h \
include/btr0btr.h \
include/btr0btr.ic \

View File

@ -1,29 +0,0 @@
This is the source of the InnoDB Plugin 1.0.4 for MySQL 5.1
===========================================================
Instructions for compiling the plugin:
--------------------------------------
1. Get the latest MySQL 5.1 sources from
http://dev.mysql.com/downloads/mysql/5.1.html#source
2. Replace the contents of the mysql-5.1.N/storage/innobase/ directory
with the contents of this directory.
3. Optional (only necessary if you are going to run tests from the
mysql-test suite): cd into the innobase directory and run ./setup.sh
4. Compile MySQL as usual.
5. Enjoy!
See the online documentation for more detailed instructions:
http://www.innodb.com/doc/innodb_plugin-1.0/innodb-plugin-installation.html
For more information about InnoDB visit
http://www.innodb.com
Please report any problems or issues with the plugin in the InnoDB Forums
http://forums.innodb.com/ or in the MySQL Bugs database http://bugs.mysql.com
Thank you for using the InnoDB plugin!

View File

@ -797,7 +797,7 @@ btr_create(
buf_block_dbg_add_level(block, SYNC_TREE_NODE_NEW);
}
/* Create a new index page on the the allocated segment page */
/* Create a new index page on the allocated segment page */
page_zip = buf_block_get_page_zip(block);
if (UNIV_LIKELY_NULL(page_zip)) {
@ -1011,7 +1011,26 @@ btr_page_reorganize_low(
(!page_zip_compress(page_zip, page, index, NULL))) {
/* Restore the old page and exit. */
buf_frame_copy(page, temp_page);
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG
/* Check that the bytes that we skip are identical. */
ut_a(!memcmp(page, temp_page, PAGE_HEADER));
ut_a(!memcmp(PAGE_HEADER + PAGE_N_RECS + page,
PAGE_HEADER + PAGE_N_RECS + temp_page,
PAGE_DATA - (PAGE_HEADER + PAGE_N_RECS)));
ut_a(!memcmp(UNIV_PAGE_SIZE - FIL_PAGE_DATA_END + page,
UNIV_PAGE_SIZE - FIL_PAGE_DATA_END + temp_page,
FIL_PAGE_DATA_END));
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
memcpy(PAGE_HEADER + page, PAGE_HEADER + temp_page,
PAGE_N_RECS - PAGE_N_DIR_SLOTS);
memcpy(PAGE_DATA + page, PAGE_DATA + temp_page,
UNIV_PAGE_SIZE - PAGE_DATA - FIL_PAGE_DATA_END);
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG
ut_a(!memcmp(page, temp_page, UNIV_PAGE_SIZE));
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
goto func_exit;
}
@ -1902,7 +1921,7 @@ func_start:
n_uniq, &heap);
/* If the new record is less than the existing record
the the split in the middle will copy the existing
the split in the middle will copy the existing
record to the new node. */
if (cmp_dtuple_rec(tuple, first_rec, offsets) < 0) {
split_rec = page_get_middle_rec(page);

View File

@ -957,7 +957,7 @@ btr_search_guess_on_hash(
/* Increment the page get statistics though we did not really
fix the page: for user info only */
buf_pool->n_page_gets++;
buf_pool->stat.n_page_gets++;
return(TRUE);

View File

@ -837,16 +837,35 @@ buf_chunk_not_freed(
block = chunk->blocks;
for (i = chunk->size; i--; block++) {
mutex_enter(&block->mutex);
if (buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE
&& !buf_flush_ready_for_replace(&block->page)) {
ibool ready;
switch (buf_block_get_state(block)) {
case BUF_BLOCK_ZIP_FREE:
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
/* The uncompressed buffer pool should never
contain compressed block descriptors. */
ut_error;
break;
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_READY_FOR_USE:
case BUF_BLOCK_MEMORY:
case BUF_BLOCK_REMOVE_HASH:
/* Skip blocks that are not being used for
file pages. */
break;
case BUF_BLOCK_FILE_PAGE:
mutex_enter(&block->mutex);
ready = buf_flush_ready_for_replace(&block->page);
mutex_exit(&block->mutex);
return(block);
}
mutex_exit(&block->mutex);
if (!ready) {
return(block);
}
break;
}
}
return(NULL);
@ -966,8 +985,6 @@ buf_pool_init(void)
buf_pool->no_flush[i] = os_event_create(NULL);
}
buf_pool->ulint_clock = 1;
/* 3. Initialize LRU fields
--------------------------- */
/* All fields are initialized by mem_zalloc(). */
@ -1470,34 +1487,9 @@ buf_pool_resize(void)
buf_pool_page_hash_rebuild();
}
/********************************************************************//**
Moves the block to the start of the LRU list if there is a danger
that the block would drift out of the buffer pool. */
UNIV_INLINE
void
buf_block_make_young(
/*=================*/
buf_page_t* bpage) /*!< in: block to make younger */
{
ut_ad(!buf_pool_mutex_own());
/* Note that we read freed_page_clock's without holding any mutex:
this is allowed since the result is used only in heuristics */
if (buf_page_peek_if_too_old(bpage)) {
buf_pool_mutex_enter();
/* There has been freeing activity in the LRU list:
best to move to the head of the LRU list */
buf_LRU_make_block_young(bpage);
buf_pool_mutex_exit();
}
}
/********************************************************************//**
Moves a page to the start of the buffer pool LRU list. This high-level
function can be used to prevent an important page from from slipping out of
function can be used to prevent an important page from slipping out of
the buffer pool. */
UNIV_INTERN
void
@ -1514,6 +1506,36 @@ buf_page_make_young(
buf_pool_mutex_exit();
}
/********************************************************************//**
Sets the time of the first access of a page and moves a page to the
start of the buffer pool LRU list if it is too old. This high-level
function can be used to prevent an important page from slipping
out of the buffer pool. */
static
void
buf_page_set_accessed_make_young(
/*=============================*/
buf_page_t* bpage, /*!< in/out: buffer block of a
file page */
unsigned access_time) /*!< in: bpage->access_time
read under mutex protection,
or 0 if unknown */
{
ut_ad(!buf_pool_mutex_own());
ut_a(buf_page_in_file(bpage));
if (buf_page_peek_if_too_old(bpage)) {
buf_pool_mutex_enter();
buf_LRU_make_block_young(bpage);
buf_pool_mutex_exit();
} else if (!access_time) {
ulint time_ms = ut_time_ms();
buf_pool_mutex_enter();
buf_page_set_accessed(bpage, time_ms);
buf_pool_mutex_exit();
}
}
/********************************************************************//**
Resets the check_index_page_at_flush field of a page if found in the buffer
pool. */
@ -1645,11 +1667,12 @@ buf_page_get_zip(
buf_page_t* bpage;
mutex_t* block_mutex;
ibool must_read;
unsigned access_time;
#ifndef UNIV_LOG_DEBUG
ut_ad(!ibuf_inside());
#endif
buf_pool->n_page_gets++;
buf_pool->stat.n_page_gets++;
for (;;) {
buf_pool_mutex_enter();
@ -1712,14 +1735,13 @@ err_exit:
got_block:
must_read = buf_page_get_io_fix(bpage) == BUF_IO_READ;
access_time = buf_page_is_accessed(bpage);
buf_pool_mutex_exit();
buf_page_set_accessed(bpage, TRUE);
mutex_exit(block_mutex);
buf_block_make_young(bpage);
buf_page_set_accessed_make_young(bpage, access_time);
#ifdef UNIV_DEBUG_FILE_ACCESSES
ut_a(!bpage->file_page_was_freed);
@ -1812,7 +1834,7 @@ buf_zip_decompress(
switch (fil_page_get_type(frame)) {
case FIL_PAGE_INDEX:
if (page_zip_decompress(&block->page.zip,
block->frame)) {
block->frame, TRUE)) {
return(TRUE);
}
@ -2000,7 +2022,7 @@ buf_page_get_gen(
mtr_t* mtr) /*!< in: mini-transaction */
{
buf_block_t* block;
ibool accessed;
unsigned access_time;
ulint fix_type;
ibool must_read;
@ -2016,7 +2038,7 @@ buf_page_get_gen(
#ifndef UNIV_LOG_DEBUG
ut_ad(!ibuf_inside() || ibuf_page(space, zip_size, offset, NULL));
#endif
buf_pool->n_page_gets++;
buf_pool->stat.n_page_gets++;
loop:
block = guess;
buf_pool_mutex_enter();
@ -2243,17 +2265,16 @@ wait_until_unfixed:
UNIV_MEM_ASSERT_RW(&block->page, sizeof block->page);
buf_block_buf_fix_inc(block, file, line);
buf_pool_mutex_exit();
/* Check if this is the first access to the page */
accessed = buf_page_is_accessed(&block->page);
buf_page_set_accessed(&block->page, TRUE);
mutex_exit(&block->mutex);
buf_block_make_young(&block->page);
/* Check if this is the first access to the page */
access_time = buf_page_is_accessed(&block->page);
buf_pool_mutex_exit();
buf_page_set_accessed_make_young(&block->page, access_time);
#ifdef UNIV_DEBUG_FILE_ACCESSES
ut_a(!block->page.file_page_was_freed);
@ -2306,7 +2327,7 @@ wait_until_unfixed:
mtr_memo_push(mtr, block, fix_type);
if (!accessed) {
if (!access_time) {
/* In the case of a first access, try to apply linear
read-ahead */
@ -2336,7 +2357,7 @@ buf_page_optimistic_get_func(
ulint line, /*!< in: line where called */
mtr_t* mtr) /*!< in: mini-transaction */
{
ibool accessed;
unsigned access_time;
ibool success;
ulint fix_type;
@ -2353,14 +2374,16 @@ buf_page_optimistic_get_func(
}
buf_block_buf_fix_inc(block, file, line);
accessed = buf_page_is_accessed(&block->page);
buf_page_set_accessed(&block->page, TRUE);
mutex_exit(&block->mutex);
buf_block_make_young(&block->page);
/* Check if this is the first access to the page.
We do a dirty read on purpose, to avoid mutex contention.
This field is only used for heuristic purposes; it does not
affect correctness. */
/* Check if this is the first access to the page */
access_time = buf_page_is_accessed(&block->page);
buf_page_set_accessed_make_young(&block->page, access_time);
ut_ad(!ibuf_inside()
|| ibuf_page(buf_block_get_space(block),
@ -2412,7 +2435,7 @@ buf_page_optimistic_get_func(
#ifdef UNIV_DEBUG_FILE_ACCESSES
ut_a(block->page.file_page_was_freed == FALSE);
#endif
if (UNIV_UNLIKELY(!accessed)) {
if (UNIV_UNLIKELY(!access_time)) {
/* In the case of a first access, try to apply linear
read-ahead */
@ -2425,7 +2448,7 @@ buf_page_optimistic_get_func(
ut_a(ibuf_count_get(buf_block_get_space(block),
buf_block_get_page_no(block)) == 0);
#endif
buf_pool->n_page_gets++;
buf_pool->stat.n_page_gets++;
return(TRUE);
}
@ -2473,8 +2496,20 @@ buf_page_get_known_nowait(
mutex_exit(&block->mutex);
if (mode == BUF_MAKE_YOUNG) {
buf_block_make_young(&block->page);
if (mode == BUF_MAKE_YOUNG && buf_page_peek_if_too_old(&block->page)) {
buf_pool_mutex_enter();
buf_LRU_make_block_young(&block->page);
buf_pool_mutex_exit();
} else if (!buf_page_is_accessed(&block->page)) {
/* Above, we do a dirty read on purpose, to avoid
mutex contention. The field buf_page_t::access_time
is only used for heuristic purposes. Writes to the
field must be protected by mutex, however. */
ulint time_ms = ut_time_ms();
buf_pool_mutex_enter();
buf_page_set_accessed(&block->page, time_ms);
buf_pool_mutex_exit();
}
ut_ad(!ibuf_inside() || (mode == BUF_KEEP_OLD));
@ -2513,7 +2548,7 @@ buf_page_get_known_nowait(
|| (ibuf_count_get(buf_block_get_space(block),
buf_block_get_page_no(block)) == 0));
#endif
buf_pool->n_page_gets++;
buf_pool->stat.n_page_gets++;
return(TRUE);
}
@ -2589,7 +2624,7 @@ buf_page_try_get_func(
#endif /* UNIV_DEBUG_FILE_ACCESSES */
buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
buf_pool->n_page_gets++;
buf_pool->stat.n_page_gets++;
#ifdef UNIV_IBUF_COUNT_DEBUG
ut_a(ibuf_count_get(buf_block_get_space(block),
@ -2608,10 +2643,10 @@ buf_page_init_low(
buf_page_t* bpage) /*!< in: block to init */
{
bpage->flush_type = BUF_FLUSH_LRU;
bpage->accessed = FALSE;
bpage->io_fix = BUF_IO_NONE;
bpage->buf_fix_count = 0;
bpage->freed_page_clock = 0;
bpage->access_time = 0;
bpage->newest_modification = 0;
bpage->oldest_modification = 0;
HASH_INVALIDATE(bpage, hash);
@ -2907,6 +2942,7 @@ buf_page_create(
buf_frame_t* frame;
buf_block_t* block;
buf_block_t* free_block = NULL;
ulint time_ms = ut_time_ms();
ut_ad(mtr);
ut_ad(space || !zip_size);
@ -2953,7 +2989,7 @@ buf_page_create(
buf_LRU_add_block(&block->page, FALSE);
buf_block_buf_fix_inc(block, __FILE__, __LINE__);
buf_pool->n_pages_created++;
buf_pool->stat.n_pages_created++;
if (zip_size) {
void* data;
@ -2990,12 +3026,12 @@ buf_page_create(
rw_lock_x_unlock(&block->lock);
}
buf_page_set_accessed(&block->page, time_ms);
buf_pool_mutex_exit();
mtr_memo_push(mtr, block, MTR_MEMO_BUF_FIX);
buf_page_set_accessed(&block->page, TRUE);
mutex_exit(&block->mutex);
/* Delete possible entries for the page from the insert buffer:
@ -3201,7 +3237,7 @@ corrupt:
ut_ad(buf_pool->n_pend_reads > 0);
buf_pool->n_pend_reads--;
buf_pool->n_pages_read++;
buf_pool->stat.n_pages_read++;
if (uncompressed) {
rw_lock_x_unlock_gen(&((buf_block_t*) bpage)->lock,
@ -3221,7 +3257,7 @@ corrupt:
BUF_IO_WRITE);
}
buf_pool->n_pages_written++;
buf_pool->stat.n_pages_written++;
break;
@ -3251,7 +3287,32 @@ void
buf_pool_invalidate(void)
/*=====================*/
{
ibool freed;
ibool freed;
enum buf_flush i;
buf_pool_mutex_enter();
for (i = BUF_FLUSH_LRU; i < BUF_FLUSH_N_TYPES; i++) {
/* As this function is called during startup and
during redo application phase during recovery, InnoDB
is single threaded (apart from IO helper threads) at
this stage. No new write batch can be in intialization
stage at this point. */
ut_ad(buf_pool->init_flush[i] == FALSE);
/* However, it is possible that a write batch that has
been posted earlier is still not complete. For buffer
pool invalidation to proceed we must ensure there is NO
write activity happening. */
if (buf_pool->n_flush[i] > 0) {
buf_pool_mutex_exit();
buf_flush_wait_batch_end(i);
buf_pool_mutex_enter();
}
}
buf_pool_mutex_exit();
ut_ad(buf_all_freed());
@ -3266,6 +3327,14 @@ buf_pool_invalidate(void)
ut_ad(UT_LIST_GET_LEN(buf_pool->LRU) == 0);
ut_ad(UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0);
buf_pool->freed_page_clock = 0;
buf_pool->LRU_old = NULL;
buf_pool->LRU_old_len = 0;
buf_pool->LRU_flush_ended = 0;
memset(&buf_pool->stat, 0x00, sizeof(buf_pool->stat));
buf_refresh_io_stats();
buf_pool_mutex_exit();
}
@ -3528,6 +3597,7 @@ buf_print(void)
"n pending decompressions %lu\n"
"n pending reads %lu\n"
"n pending flush LRU %lu list %lu single page %lu\n"
"pages made young %lu, not young %lu\n"
"pages read %lu, created %lu, written %lu\n",
(ulong) size,
(ulong) UT_LIST_GET_LEN(buf_pool->LRU),
@ -3538,8 +3608,11 @@ buf_print(void)
(ulong) buf_pool->n_flush[BUF_FLUSH_LRU],
(ulong) buf_pool->n_flush[BUF_FLUSH_LIST],
(ulong) buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE],
(ulong) buf_pool->n_pages_read, buf_pool->n_pages_created,
(ulong) buf_pool->n_pages_written);
(ulong) buf_pool->stat.n_pages_made_young,
(ulong) buf_pool->stat.n_pages_not_made_young,
(ulong) buf_pool->stat.n_pages_read,
(ulong) buf_pool->stat.n_pages_created,
(ulong) buf_pool->stat.n_pages_written);
/* Count the number of blocks belonging to each index in the buffer */
@ -3744,10 +3817,9 @@ buf_print_io(
{
time_t current_time;
double time_elapsed;
ulint size;
ulint n_gets_diff;
ut_ad(buf_pool);
size = buf_pool->curr_size;
buf_pool_mutex_enter();
@ -3755,12 +3827,14 @@ buf_print_io(
"Buffer pool size %lu\n"
"Free buffers %lu\n"
"Database pages %lu\n"
"Old database pages %lu\n"
"Modified db pages %lu\n"
"Pending reads %lu\n"
"Pending writes: LRU %lu, flush list %lu, single page %lu\n",
(ulong) size,
(ulong) buf_pool->curr_size,
(ulong) UT_LIST_GET_LEN(buf_pool->free),
(ulong) UT_LIST_GET_LEN(buf_pool->LRU),
(ulong) buf_pool->LRU_old_len,
(ulong) UT_LIST_GET_LEN(buf_pool->flush_list),
(ulong) buf_pool->n_pend_reads,
(ulong) buf_pool->n_flush[BUF_FLUSH_LRU]
@ -3772,37 +3846,66 @@ buf_print_io(
current_time = time(NULL);
time_elapsed = 0.001 + difftime(current_time,
buf_pool->last_printout_time);
buf_pool->last_printout_time = current_time;
fprintf(file,
"Pages made young %lu, not young %lu\n"
"%.2f youngs/s, %.2f non-youngs/s\n"
"Pages read %lu, created %lu, written %lu\n"
"%.2f reads/s, %.2f creates/s, %.2f writes/s\n",
(ulong) buf_pool->n_pages_read,
(ulong) buf_pool->n_pages_created,
(ulong) buf_pool->n_pages_written,
(buf_pool->n_pages_read - buf_pool->n_pages_read_old)
(ulong) buf_pool->stat.n_pages_made_young,
(ulong) buf_pool->stat.n_pages_not_made_young,
(buf_pool->stat.n_pages_made_young
- buf_pool->old_stat.n_pages_made_young)
/ time_elapsed,
(buf_pool->n_pages_created - buf_pool->n_pages_created_old)
(buf_pool->stat.n_pages_not_made_young
- buf_pool->old_stat.n_pages_not_made_young)
/ time_elapsed,
(buf_pool->n_pages_written - buf_pool->n_pages_written_old)
(ulong) buf_pool->stat.n_pages_read,
(ulong) buf_pool->stat.n_pages_created,
(ulong) buf_pool->stat.n_pages_written,
(buf_pool->stat.n_pages_read
- buf_pool->old_stat.n_pages_read)
/ time_elapsed,
(buf_pool->stat.n_pages_created
- buf_pool->old_stat.n_pages_created)
/ time_elapsed,
(buf_pool->stat.n_pages_written
- buf_pool->old_stat.n_pages_written)
/ time_elapsed);
if (buf_pool->n_page_gets > buf_pool->n_page_gets_old) {
fprintf(file, "Buffer pool hit rate %lu / 1000\n",
n_gets_diff = buf_pool->stat.n_page_gets - buf_pool->old_stat.n_page_gets;
if (n_gets_diff) {
fprintf(file,
"Buffer pool hit rate %lu / 1000,"
" young-making rate %lu / 1000 not %lu / 1000\n",
(ulong)
(1000 - ((1000 * (buf_pool->n_pages_read
- buf_pool->n_pages_read_old))
/ (buf_pool->n_page_gets
- buf_pool->n_page_gets_old))));
(1000 - ((1000 * (buf_pool->stat.n_pages_read
- buf_pool->old_stat.n_pages_read))
/ (buf_pool->stat.n_page_gets
- buf_pool->old_stat.n_page_gets))),
(ulong)
(1000 * (buf_pool->stat.n_pages_made_young
- buf_pool->old_stat.n_pages_made_young)
/ n_gets_diff),
(ulong)
(1000 * (buf_pool->stat.n_pages_not_made_young
- buf_pool->old_stat.n_pages_not_made_young)
/ n_gets_diff));
} else {
fputs("No buffer pool page gets since the last printout\n",
file);
}
buf_pool->n_page_gets_old = buf_pool->n_page_gets;
buf_pool->n_pages_read_old = buf_pool->n_pages_read;
buf_pool->n_pages_created_old = buf_pool->n_pages_created;
buf_pool->n_pages_written_old = buf_pool->n_pages_written;
/* Statistics about read ahead algorithm */
fprintf(file, "Pages read ahead %.2f/s,"
" evicted without access %.2f/s\n",
(buf_pool->stat.n_ra_pages_read
- buf_pool->old_stat.n_ra_pages_read)
/ time_elapsed,
(buf_pool->stat.n_ra_pages_evicted
- buf_pool->old_stat.n_ra_pages_evicted)
/ time_elapsed);
/* Print some values to help us with visualizing what is
happening with LRU eviction. */
@ -3814,6 +3917,7 @@ buf_print_io(
buf_LRU_stat_sum.io, buf_LRU_stat_cur.io,
buf_LRU_stat_sum.unzip, buf_LRU_stat_cur.unzip);
buf_refresh_io_stats();
buf_pool_mutex_exit();
}
@ -3825,10 +3929,7 @@ buf_refresh_io_stats(void)
/*======================*/
{
buf_pool->last_printout_time = time(NULL);
buf_pool->n_page_gets_old = buf_pool->n_page_gets;
buf_pool->n_pages_read_old = buf_pool->n_pages_read;
buf_pool->n_pages_created_old = buf_pool->n_pages_created;
buf_pool->n_pages_written_old = buf_pool->n_pages_written;
buf_pool->old_stat = buf_pool->stat;
}
/*********************************************************************//**

View File

@ -49,18 +49,22 @@ Created 11/5/1995 Heikki Tuuri
#include "log0recv.h"
#include "srv0srv.h"
/** The number of blocks from the LRU_old pointer onward, including the block
pointed to, must be 3/8 of the whole LRU list length, except that the
tolerance defined below is allowed. Note that the tolerance must be small
enough such that for even the BUF_LRU_OLD_MIN_LEN long LRU list, the
LRU_old pointer is not allowed to point to either end of the LRU list. */
/** The number of blocks from the LRU_old pointer onward, including
the block pointed to, must be buf_LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV
of the whole LRU list length, except that the tolerance defined below
is allowed. Note that the tolerance must be small enough such that for
even the BUF_LRU_OLD_MIN_LEN long LRU list, the LRU_old pointer is not
allowed to point to either end of the LRU list. */
#define BUF_LRU_OLD_TOLERANCE 20
/** The whole LRU list length is divided by this number to determine an
initial segment in buf_LRU_get_recent_limit */
#define BUF_LRU_INITIAL_RATIO 8
/** The minimum amount of non-old blocks when the LRU_old list exists
(that is, when there are more than BUF_LRU_OLD_MIN_LEN blocks).
@see buf_LRU_old_adjust_len */
#define BUF_LRU_NON_OLD_MIN_LEN 5
#if BUF_LRU_NON_OLD_MIN_LEN >= BUF_LRU_OLD_MIN_LEN
# error "BUF_LRU_NON_OLD_MIN_LEN >= BUF_LRU_OLD_MIN_LEN"
#endif
/** When dropping the search hash index entries before deleting an ibd
file, we build a local array of pages belonging to that tablespace
@ -107,6 +111,15 @@ UNIV_INTERN buf_LRU_stat_t buf_LRU_stat_sum;
/* @} */
/** @name Heuristics for detecting index scan @{ */
/** Reserve this much/BUF_LRU_OLD_RATIO_DIV of the buffer pool for
"old" blocks. Protected by buf_pool_mutex. */
UNIV_INTERN uint buf_LRU_old_ratio;
/** Move blocks to "new" LRU list only if the first access was at
least this many milliseconds ago. Not protected by any mutex or latch. */
UNIV_INTERN uint buf_LRU_old_threshold_ms;
/* @} */
/******************************************************************//**
Takes a block out of the LRU list and page hash table.
If the block is compressed-only (BUF_BLOCK_ZIP_PAGE),
@ -428,42 +441,6 @@ next_page:
}
}
/******************************************************************//**
Gets the minimum LRU_position field for the blocks in an initial segment
(determined by BUF_LRU_INITIAL_RATIO) of the LRU list. The limit is not
guaranteed to be precise, because the ulint_clock may wrap around.
@return the limit; zero if could not determine it */
UNIV_INTERN
ulint
buf_LRU_get_recent_limit(void)
/*==========================*/
{
const buf_page_t* bpage;
ulint len;
ulint limit;
buf_pool_mutex_enter();
len = UT_LIST_GET_LEN(buf_pool->LRU);
if (len < BUF_LRU_OLD_MIN_LEN) {
/* The LRU list is too short to do read-ahead */
buf_pool_mutex_exit();
return(0);
}
bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
limit = buf_page_get_LRU_position(bpage);
len /= BUF_LRU_INITIAL_RATIO;
buf_pool_mutex_exit();
return(limit > len ? (limit - len) : 0);
}
/********************************************************************//**
Insert a compressed block into buf_pool->zip_clean in the LRU order. */
UNIV_INTERN
@ -594,6 +571,7 @@ buf_LRU_free_from_common_LRU_list(
bpage = UT_LIST_GET_PREV(LRU, bpage), distance--) {
enum buf_lru_free_block_status freed;
unsigned accessed;
mutex_t* block_mutex
= buf_page_get_mutex(bpage);
@ -601,11 +579,18 @@ buf_LRU_free_from_common_LRU_list(
ut_ad(bpage->in_LRU_list);
mutex_enter(block_mutex);
accessed = buf_page_is_accessed(bpage);
freed = buf_LRU_free_block(bpage, TRUE, NULL);
mutex_exit(block_mutex);
switch (freed) {
case BUF_LRU_FREED:
/* Keep track of pages that are evicted without
ever being accessed. This gives us a measure of
the effectiveness of readahead */
if (!accessed) {
++buf_pool->stat.n_ra_pages_evicted;
}
return(TRUE);
case BUF_LRU_NOT_FREED:
@ -953,8 +938,10 @@ buf_LRU_old_adjust_len(void)
ut_a(buf_pool->LRU_old);
ut_ad(buf_pool_mutex_own());
#if 3 * (BUF_LRU_OLD_MIN_LEN / 8) <= BUF_LRU_OLD_TOLERANCE + 5
# error "3 * (BUF_LRU_OLD_MIN_LEN / 8) <= BUF_LRU_OLD_TOLERANCE + 5"
ut_ad(buf_LRU_old_ratio >= BUF_LRU_OLD_RATIO_MIN);
ut_ad(buf_LRU_old_ratio <= BUF_LRU_OLD_RATIO_MAX);
#if BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN <= BUF_LRU_OLD_RATIO_DIV * (BUF_LRU_OLD_TOLERANCE + 5)
# error "BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN <= BUF_LRU_OLD_RATIO_DIV * (BUF_LRU_OLD_TOLERANCE + 5)"
#endif
#ifdef UNIV_LRU_DEBUG
/* buf_pool->LRU_old must be the first item in the LRU list
@ -966,34 +953,39 @@ buf_LRU_old_adjust_len(void)
|| UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)->old);
#endif /* UNIV_LRU_DEBUG */
for (;;) {
old_len = buf_pool->LRU_old_len;
new_len = 3 * (UT_LIST_GET_LEN(buf_pool->LRU) / 8);
old_len = buf_pool->LRU_old_len;
new_len = ut_min(UT_LIST_GET_LEN(buf_pool->LRU)
* buf_LRU_old_ratio / BUF_LRU_OLD_RATIO_DIV,
UT_LIST_GET_LEN(buf_pool->LRU)
- (BUF_LRU_OLD_TOLERANCE
+ BUF_LRU_NON_OLD_MIN_LEN));
ut_ad(buf_pool->LRU_old->in_LRU_list);
ut_a(buf_pool->LRU_old);
for (;;) {
buf_page_t* LRU_old = buf_pool->LRU_old;
ut_a(LRU_old);
ut_ad(LRU_old->in_LRU_list);
#ifdef UNIV_LRU_DEBUG
ut_a(buf_pool->LRU_old->old);
ut_a(LRU_old->old);
#endif /* UNIV_LRU_DEBUG */
/* Update the LRU_old pointer if necessary */
if (old_len < new_len - BUF_LRU_OLD_TOLERANCE) {
if (old_len + BUF_LRU_OLD_TOLERANCE < new_len) {
buf_pool->LRU_old = UT_LIST_GET_PREV(
LRU, buf_pool->LRU_old);
buf_pool->LRU_old = LRU_old = UT_LIST_GET_PREV(
LRU, LRU_old);
#ifdef UNIV_LRU_DEBUG
ut_a(!buf_pool->LRU_old->old);
ut_a(!LRU_old->old);
#endif /* UNIV_LRU_DEBUG */
buf_page_set_old(buf_pool->LRU_old, TRUE);
buf_pool->LRU_old_len++;
buf_page_set_old(LRU_old, TRUE);
old_len = ++buf_pool->LRU_old_len;
} else if (old_len > new_len + BUF_LRU_OLD_TOLERANCE) {
buf_page_set_old(buf_pool->LRU_old, FALSE);
buf_pool->LRU_old = UT_LIST_GET_NEXT(
LRU, buf_pool->LRU_old);
buf_pool->LRU_old_len--;
buf_page_set_old(LRU_old, FALSE);
buf_pool->LRU_old = UT_LIST_GET_NEXT(LRU, LRU_old);
old_len = --buf_pool->LRU_old_len;
} else {
return;
}
@ -1021,6 +1013,7 @@ buf_LRU_old_init(void)
while (bpage != NULL) {
ut_ad(bpage->in_LRU_list);
ut_ad(buf_page_in_file(bpage));
buf_page_set_old(bpage, TRUE);
bpage = UT_LIST_GET_NEXT(LRU, bpage);
}
@ -1075,16 +1068,19 @@ buf_LRU_remove_block(
if (UNIV_UNLIKELY(bpage == buf_pool->LRU_old)) {
/* Below: the previous block is guaranteed to exist, because
the LRU_old pointer is only allowed to differ by the
tolerance value from strict 3/8 of the LRU list length. */
/* Below: the previous block is guaranteed to exist,
because the LRU_old pointer is only allowed to differ
by BUF_LRU_OLD_TOLERANCE from strict
buf_LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV of the LRU
list length. */
buf_page_t* prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
buf_pool->LRU_old = UT_LIST_GET_PREV(LRU, bpage);
ut_a(buf_pool->LRU_old);
ut_a(prev_bpage);
#ifdef UNIV_LRU_DEBUG
ut_a(!buf_pool->LRU_old->old);
ut_a(!prev_bpage->old);
#endif /* UNIV_LRU_DEBUG */
buf_page_set_old(buf_pool->LRU_old, TRUE);
buf_pool->LRU_old = prev_bpage;
buf_page_set_old(prev_bpage, TRUE);
buf_pool->LRU_old_len++;
}
@ -1149,39 +1145,25 @@ buf_LRU_add_block_to_end_low(
/*=========================*/
buf_page_t* bpage) /*!< in: control block */
{
buf_page_t* last_bpage;
ut_ad(buf_pool);
ut_ad(bpage);
ut_ad(buf_pool_mutex_own());
ut_a(buf_page_in_file(bpage));
last_bpage = UT_LIST_GET_LAST(buf_pool->LRU);
if (last_bpage) {
bpage->LRU_position = last_bpage->LRU_position;
} else {
bpage->LRU_position = buf_pool_clock_tic();
}
ut_ad(!bpage->in_LRU_list);
UT_LIST_ADD_LAST(LRU, buf_pool->LRU, bpage);
ut_d(bpage->in_LRU_list = TRUE);
buf_page_set_old(bpage, TRUE);
if (UT_LIST_GET_LEN(buf_pool->LRU) >= BUF_LRU_OLD_MIN_LEN) {
buf_pool->LRU_old_len++;
}
if (UT_LIST_GET_LEN(buf_pool->LRU) > BUF_LRU_OLD_MIN_LEN) {
ut_ad(buf_pool->LRU_old);
/* Adjust the length of the old block list if necessary */
buf_pool->LRU_old_len++;
buf_LRU_old_adjust_len();
} else if (UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN) {
@ -1189,6 +1171,7 @@ buf_LRU_add_block_to_end_low(
/* The LRU list is now long enough for LRU_old to become
defined: init it */
buf_pool->LRU_old_len++;
buf_LRU_old_init();
}
@ -1222,7 +1205,6 @@ buf_LRU_add_block_low(
UT_LIST_ADD_FIRST(LRU, buf_pool->LRU, bpage);
bpage->LRU_position = buf_pool_clock_tic();
bpage->freed_page_clock = buf_pool->freed_page_clock;
} else {
#ifdef UNIV_LRU_DEBUG
@ -1237,11 +1219,6 @@ buf_LRU_add_block_low(
UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU, buf_pool->LRU_old,
bpage);
buf_pool->LRU_old_len++;
/* We copy the LRU position field of the previous block
to the new block */
bpage->LRU_position = (buf_pool->LRU_old)->LRU_position;
}
ut_d(bpage->in_LRU_list = TRUE);
@ -1295,6 +1272,12 @@ buf_LRU_make_block_young(
/*=====================*/
buf_page_t* bpage) /*!< in: control block */
{
ut_ad(buf_pool_mutex_own());
if (bpage->old) {
buf_pool->stat.n_pages_made_young++;
}
buf_LRU_remove_block(bpage);
buf_LRU_add_block_low(bpage, FALSE);
}
@ -1847,6 +1830,50 @@ buf_LRU_block_free_hashed_page(
buf_LRU_block_free_non_file_page(block);
}
/**********************************************************************//**
Updates buf_LRU_old_ratio.
@return updated old_pct */
UNIV_INTERN
uint
buf_LRU_old_ratio_update(
/*=====================*/
uint old_pct,/*!< in: Reserve this percentage of
the buffer pool for "old" blocks. */
ibool adjust) /*!< in: TRUE=adjust the LRU list;
FALSE=just assign buf_LRU_old_ratio
during the initialization of InnoDB */
{
uint ratio;
ratio = old_pct * BUF_LRU_OLD_RATIO_DIV / 100;
if (ratio < BUF_LRU_OLD_RATIO_MIN) {
ratio = BUF_LRU_OLD_RATIO_MIN;
} else if (ratio > BUF_LRU_OLD_RATIO_MAX) {
ratio = BUF_LRU_OLD_RATIO_MAX;
}
if (adjust) {
buf_pool_mutex_enter();
if (ratio != buf_LRU_old_ratio) {
buf_LRU_old_ratio = ratio;
if (UT_LIST_GET_LEN(buf_pool->LRU)
>= BUF_LRU_OLD_MIN_LEN) {
buf_LRU_old_adjust_len();
}
}
buf_pool_mutex_exit();
} else {
buf_LRU_old_ratio = ratio;
}
/* the reverse of
ratio = old_pct * BUF_LRU_OLD_RATIO_DIV / 100 */
return((uint) (ratio * 100 / (double) BUF_LRU_OLD_RATIO_DIV + 0.5));
}
/********************************************************************//**
Update the historical stats that we are collecting for LRU eviction
policy at the end of each interval. */
@ -1896,7 +1923,6 @@ buf_LRU_validate(void)
buf_block_t* block;
ulint old_len;
ulint new_len;
ulint LRU_pos;
ut_ad(buf_pool);
buf_pool_mutex_enter();
@ -1905,7 +1931,11 @@ buf_LRU_validate(void)
ut_a(buf_pool->LRU_old);
old_len = buf_pool->LRU_old_len;
new_len = 3 * (UT_LIST_GET_LEN(buf_pool->LRU) / 8);
new_len = ut_min(UT_LIST_GET_LEN(buf_pool->LRU)
* buf_LRU_old_ratio / BUF_LRU_OLD_RATIO_DIV,
UT_LIST_GET_LEN(buf_pool->LRU)
- (BUF_LRU_OLD_TOLERANCE
+ BUF_LRU_NON_OLD_MIN_LEN));
ut_a(old_len >= new_len - BUF_LRU_OLD_TOLERANCE);
ut_a(old_len <= new_len + BUF_LRU_OLD_TOLERANCE);
}
@ -1943,16 +1973,7 @@ buf_LRU_validate(void)
ut_a(buf_pool->LRU_old == bpage);
}
LRU_pos = buf_page_get_LRU_position(bpage);
bpage = UT_LIST_GET_NEXT(LRU, bpage);
if (bpage) {
/* If the following assert fails, it may
not be an error: just the buf_pool clock
has wrapped around */
ut_a(LRU_pos >= buf_page_get_LRU_position(bpage));
}
}
if (buf_pool->LRU_old) {
@ -2000,9 +2021,6 @@ buf_LRU_print(void)
ut_ad(buf_pool);
buf_pool_mutex_enter();
fprintf(stderr, "Pool ulint clock %lu\n",
(ulong) buf_pool->ulint_clock);
bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
while (bpage != NULL) {
@ -2033,18 +2051,16 @@ buf_LRU_print(void)
const byte* frame;
case BUF_BLOCK_FILE_PAGE:
frame = buf_block_get_frame((buf_block_t*) bpage);
fprintf(stderr, "\nLRU pos %lu type %lu"
fprintf(stderr, "\ntype %lu"
" index id %lu\n",
(ulong) buf_page_get_LRU_position(bpage),
(ulong) fil_page_get_type(frame),
(ulong) ut_dulint_get_low(
btr_page_get_index_id(frame)));
break;
case BUF_BLOCK_ZIP_PAGE:
frame = bpage->zip.data;
fprintf(stderr, "\nLRU pos %lu type %lu size %lu"
fprintf(stderr, "\ntype %lu size %lu"
" index id %lu\n",
(ulong) buf_page_get_LRU_position(bpage),
(ulong) fil_page_get_type(frame),
(ulong) buf_page_get_zip_size(bpage),
(ulong) ut_dulint_get_low(
@ -2052,8 +2068,7 @@ buf_LRU_print(void)
break;
default:
fprintf(stderr, "\nLRU pos %lu !state %lu!\n",
(ulong) buf_page_get_LRU_position(bpage),
fprintf(stderr, "\n!state %lu!\n",
(ulong) buf_page_get_state(bpage));
break;
}

View File

@ -38,14 +38,6 @@ Created 11/5/1995 Heikki Tuuri
#include "srv0start.h"
#include "srv0srv.h"
/** The size in blocks of the area where the random read-ahead algorithm counts
the accessed pages when deciding whether to read-ahead */
#define BUF_READ_AHEAD_RANDOM_AREA BUF_READ_AHEAD_AREA
/** There must be at least this many pages in buf_pool in the area to start
a random read-ahead */
#define BUF_READ_AHEAD_RANDOM_THRESHOLD (1 + BUF_READ_AHEAD_RANDOM_AREA / 2)
/** The linear read-ahead area size */
#define BUF_READ_AHEAD_LINEAR_AREA BUF_READ_AHEAD_AREA
@ -62,7 +54,8 @@ flag is cleared and the x-lock released by an i/o-handler thread.
@return 1 if a read request was queued, 0 if the page already resided
in buf_pool, or if the page is in the doublewrite buffer blocks in
which case it is never read into the pool, or if the tablespace does
not exist or is being dropped */
not exist or is being dropped
@return 1 if read request is issued. 0 if it is not */
static
ulint
buf_read_page_low(
@ -164,175 +157,14 @@ buf_read_page_low(
return(1);
}
/********************************************************************//**
Applies a random read-ahead in buf_pool if there are at least a threshold
value of accessed pages from the random read-ahead area. Does not read any
page, not even the one at the position (space, offset), if the read-ahead
mechanism is not activated. NOTE 1: the calling thread may own latches on
pages: to avoid deadlocks this function must be written such that it cannot
end up waiting for these latches! NOTE 2: the calling thread must want
access to the page given: this rule is set to prevent unintended read-aheads
performed by ibuf routines, a situation which could result in a deadlock if
the OS does not support asynchronous i/o.
@return number of page read requests issued; NOTE that if we read ibuf
pages, it may happen that the page at the given page number does not
get read even if we return a positive value! */
static
ulint
buf_read_ahead_random(
/*==================*/
ulint space, /*!< in: space id */
ulint zip_size,/*!< in: compressed page size in bytes, or 0 */
ulint offset) /*!< in: page number of a page which the current thread
wants to access */
{
ib_int64_t tablespace_version;
ulint recent_blocks = 0;
ulint count;
ulint LRU_recent_limit;
ulint ibuf_mode;
ulint low, high;
ulint err;
ulint i;
ulint buf_read_ahead_random_area;
/* We have currently disabled random readahead */
return(0);
if (srv_startup_is_before_trx_rollback_phase) {
/* No read-ahead to avoid thread deadlocks */
return(0);
}
if (ibuf_bitmap_page(zip_size, offset)
|| trx_sys_hdr_page(space, offset)) {
/* If it is an ibuf bitmap page or trx sys hdr, we do
no read-ahead, as that could break the ibuf page access
order */
return(0);
}
/* Remember the tablespace version before we ask te tablespace size
below: if DISCARD + IMPORT changes the actual .ibd file meanwhile, we
do not try to read outside the bounds of the tablespace! */
tablespace_version = fil_space_get_version(space);
buf_read_ahead_random_area = BUF_READ_AHEAD_RANDOM_AREA;
low = (offset / buf_read_ahead_random_area)
* buf_read_ahead_random_area;
high = (offset / buf_read_ahead_random_area + 1)
* buf_read_ahead_random_area;
if (high > fil_space_get_size(space)) {
high = fil_space_get_size(space);
}
/* Get the minimum LRU_position field value for an initial segment
of the LRU list, to determine which blocks have recently been added
to the start of the list. */
LRU_recent_limit = buf_LRU_get_recent_limit();
buf_pool_mutex_enter();
if (buf_pool->n_pend_reads
> buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
buf_pool_mutex_exit();
return(0);
}
/* Count how many blocks in the area have been recently accessed,
that is, reside near the start of the LRU list. */
for (i = low; i < high; i++) {
const buf_page_t* bpage = buf_page_hash_get(space, i);
if (bpage
&& buf_page_is_accessed(bpage)
&& (buf_page_get_LRU_position(bpage) > LRU_recent_limit)) {
recent_blocks++;
if (recent_blocks >= BUF_READ_AHEAD_RANDOM_THRESHOLD) {
buf_pool_mutex_exit();
goto read_ahead;
}
}
}
buf_pool_mutex_exit();
/* Do nothing */
return(0);
read_ahead:
/* Read all the suitable blocks within the area */
if (ibuf_inside()) {
ibuf_mode = BUF_READ_IBUF_PAGES_ONLY;
} else {
ibuf_mode = BUF_READ_ANY_PAGE;
}
count = 0;
for (i = low; i < high; i++) {
/* It is only sensible to do read-ahead in the non-sync aio
mode: hence FALSE as the first parameter */
if (!ibuf_bitmap_page(zip_size, i)) {
count += buf_read_page_low(
&err, FALSE,
ibuf_mode | OS_AIO_SIMULATED_WAKE_LATER,
space, zip_size, FALSE,
tablespace_version, i);
if (err == DB_TABLESPACE_DELETED) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Warning: in random"
" readahead trying to access\n"
"InnoDB: tablespace %lu page %lu,\n"
"InnoDB: but the tablespace does not"
" exist or is just being dropped.\n",
(ulong) space, (ulong) i);
}
}
}
/* In simulated aio we wake the aio handler threads only after
queuing all aio requests, in native aio the following call does
nothing: */
os_aio_simulated_wake_handler_threads();
#ifdef UNIV_DEBUG
if (buf_debug_prints && (count > 0)) {
fprintf(stderr,
"Random read-ahead space %lu offset %lu pages %lu\n",
(ulong) space, (ulong) offset,
(ulong) count);
}
#endif /* UNIV_DEBUG */
++srv_read_ahead_rnd;
return(count);
}
/********************************************************************//**
High-level function which reads a page asynchronously from a file to the
buffer buf_pool if it is not already there. Sets the io_fix flag and sets
an exclusive lock on the buffer frame. The flag is cleared and the x-lock
released by the i/o-handler thread. Does a random read-ahead if it seems
sensible.
@return number of page read requests issued: this can be greater than
1 if read-ahead occurred */
released by the i/o-handler thread.
@return TRUE if page has been read in, FALSE in case of failure */
UNIV_INTERN
ulint
ibool
buf_read_page(
/*==========*/
ulint space, /*!< in: space id */
@ -341,20 +173,17 @@ buf_read_page(
{
ib_int64_t tablespace_version;
ulint count;
ulint count2;
ulint err;
tablespace_version = fil_space_get_version(space);
count = buf_read_ahead_random(space, zip_size, offset);
/* We do the i/o in the synchronous aio mode to save thread
switches: hence TRUE */
count2 = buf_read_page_low(&err, TRUE, BUF_READ_ANY_PAGE, space,
zip_size, FALSE,
tablespace_version, offset);
srv_buf_pool_reads+= count2;
count = buf_read_page_low(&err, TRUE, BUF_READ_ANY_PAGE, space,
zip_size, FALSE,
tablespace_version, offset);
srv_buf_pool_reads += count;
if (err == DB_TABLESPACE_DELETED) {
ut_print_timestamp(stderr);
fprintf(stderr,
@ -371,14 +200,14 @@ buf_read_page(
/* Increment number of I/O operations used for LRU policy. */
buf_LRU_stat_inc_io();
return(count + count2);
return(count > 0);
}
/********************************************************************//**
Applies linear read-ahead if in the buf_pool the page is a border page of
a linear read-ahead area and all the pages in the area have been accessed.
Does not read any page if the read-ahead mechanism is not activated. Note
that the the algorithm looks at the 'natural' adjacent successor and
that the algorithm looks at the 'natural' adjacent successor and
predecessor of the page, which on the leaf level of a B-tree are the next
and previous page in the chain of leaves. To know these, the page specified
in (space, offset) must already be present in the buf_pool. Thus, the
@ -498,9 +327,17 @@ buf_read_ahead_linear(
fail_count++;
} else if (pred_bpage) {
int res = (ut_ulint_cmp(
buf_page_get_LRU_position(bpage),
buf_page_get_LRU_position(pred_bpage)));
/* Note that buf_page_is_accessed() returns
the time of the first access. If some blocks
of the extent existed in the buffer pool at
the time of a linear access pattern, the first
access times may be nonmonotonic, even though
the latest access times were linear. The
threshold (srv_read_ahead_factor) should help
a little against this. */
int res = ut_ulint_cmp(
buf_page_is_accessed(bpage),
buf_page_is_accessed(pred_bpage));
/* Accesses not in the right order */
if (res != 0 && res != asc_or_desc) {
fail_count++;
@ -643,7 +480,7 @@ buf_read_ahead_linear(
LRU policy decision. */
buf_LRU_stat_inc_io();
++srv_read_ahead_seq;
buf_pool->stat.n_ra_pages_read += count;
return(count);
}

View File

@ -1379,7 +1379,7 @@ dict_create_add_foreign_field_to_dictionary(
Add a single foreign key definition to the data dictionary tables in the
database. We also generate names to constraints that were not named by the
user. A generated constraint has a name of the format
databasename/tablename_ibfk_<number>, where the numbers start from 1, and
databasename/tablename_ibfk_NUMBER, where the numbers start from 1, and
are given locally for this table, that is, the number is not global, as in
the old format constraints < 4.0.18 it used to be.
@return error code or DB_SUCCESS */

View File

@ -82,9 +82,10 @@ static char dict_ibfk[] = "_ibfk_";
/*******************************************************************//**
Tries to find column names for the index and sets the col field of the
index. */
index.
@return TRUE if the column names were found */
static
void
ibool
dict_index_find_cols(
/*=================*/
dict_table_t* table, /*!< in: table */
@ -1169,7 +1170,7 @@ dict_col_name_is_reserved(
ulint i;
for (i = 0; i < UT_ARR_SIZE(reserved_names); i++) {
if (strcmp(name, reserved_names[i]) == 0) {
if (innobase_strcasecmp(name, reserved_names[i]) == 0) {
return(TRUE);
}
@ -1431,7 +1432,7 @@ add_field_size:
/**********************************************************************//**
Adds an index to the dictionary cache.
@return DB_SUCCESS or DB_TOO_BIG_RECORD */
@return DB_SUCCESS, DB_TOO_BIG_RECORD, or DB_CORRUPTION */
UNIV_INTERN
ulint
dict_index_add_to_cache(
@ -1457,7 +1458,10 @@ dict_index_add_to_cache(
ut_a(!dict_index_is_clust(index)
|| UT_LIST_GET_LEN(table->indexes) == 0);
dict_index_find_cols(table, index);
if (!dict_index_find_cols(table, index)) {
return(DB_CORRUPTION);
}
/* Build the cache internal representation of the index,
containing also the added system fields */
@ -1665,9 +1669,10 @@ dict_index_remove_from_cache(
/*******************************************************************//**
Tries to find column names for the index and sets the col field of the
index. */
index.
@return TRUE if the column names were found */
static
void
ibool
dict_index_find_cols(
/*=================*/
dict_table_t* table, /*!< in: table */
@ -1692,17 +1697,21 @@ dict_index_find_cols(
}
}
#ifdef UNIV_DEBUG
/* It is an error not to find a matching column. */
fputs("InnoDB: Error: no matching column for ", stderr);
ut_print_name(stderr, NULL, FALSE, field->name);
fputs(" in ", stderr);
dict_index_name_print(stderr, NULL, index);
fputs("!\n", stderr);
ut_error;
#endif /* UNIV_DEBUG */
return(FALSE);
found:
;
}
return(TRUE);
}
#endif /* !UNIV_HOTBACKUP */

View File

@ -594,6 +594,11 @@ fil_node_create(
UT_LIST_ADD_LAST(chain, space->chain, node);
if (id < SRV_LOG_SPACE_FIRST_ID && fil_system->max_assigned_id < id) {
fil_system->max_assigned_id = id;
}
mutex_exit(&fil_system->mutex);
}
@ -613,12 +618,10 @@ fil_node_open_file(
ulint size_high;
ibool ret;
ibool success;
#ifndef UNIV_HOTBACKUP
byte* buf2;
byte* page;
ulint space_id;
ulint flags;
#endif /* !UNIV_HOTBACKUP */
ut_ad(mutex_own(&(system->mutex)));
ut_a(node->n_pending == 0);
@ -654,9 +657,11 @@ fil_node_open_file(
size_bytes = (((ib_int64_t)size_high) << 32)
+ (ib_int64_t)size_low;
#ifdef UNIV_HOTBACKUP
node->size = (ulint) (size_bytes / UNIV_PAGE_SIZE);
/* TODO: adjust to zip_size, like below? */
#else
if (space->id == 0) {
node->size = (ulint) (size_bytes / UNIV_PAGE_SIZE);
goto add_size;
}
#endif /* UNIV_HOTBACKUP */
ut_a(space->purpose != FIL_LOG);
ut_a(space->id != 0);
@ -735,7 +740,10 @@ fil_node_open_file(
(size_bytes
/ dict_table_flags_to_zip_size(flags));
}
#endif
#ifdef UNIV_HOTBACKUP
add_size:
#endif /* UNIV_HOTBACKUP */
space->size += node->size;
}
@ -955,7 +963,7 @@ close_more:
" while the maximum\n"
"InnoDB: allowed value would be %lu.\n"
"InnoDB: You may need to raise the value of"
" innodb_max_files_open in\n"
" innodb_open_files in\n"
"InnoDB: my.cnf.\n",
(ulong) fil_system->n_open,
(ulong) fil_system->max_n_open);
@ -1535,7 +1543,7 @@ fil_open_log_and_system_tablespace_files(void)
fprintf(stderr,
"InnoDB: Warning: you must"
" raise the value of"
" innodb_max_open_files in\n"
" innodb_open_files in\n"
"InnoDB: my.cnf! Remember that"
" InnoDB keeps all log files"
" and all system\n"
@ -2923,7 +2931,6 @@ fil_open_single_table_tablespace(
byte* page;
ulint space_id;
ulint space_flags;
ibool ret = TRUE;
filepath = fil_make_ibd_name(name, FALSE);
@ -3001,7 +3008,7 @@ fil_open_single_table_tablespace(
(ulong) space_id, (ulong) space_flags,
(ulong) id, (ulong) flags);
ret = FALSE;
success = FALSE;
goto func_exit;
}
@ -3021,7 +3028,7 @@ func_exit:
os_file_close(file);
mem_free(filepath);
return(ret);
return(success);
}
#endif /* !UNIV_HOTBACKUP */
@ -3299,7 +3306,17 @@ fil_load_single_table_tablespace(
if (!success) {
goto func_exit;
if (srv_force_recovery > 0) {
fprintf(stderr,
"InnoDB: innodb_force_recovery"
" was set to %lu. Continuing crash recovery\n"
"InnoDB: even though the tablespace creation"
" of this table failed.\n",
srv_force_recovery);
goto func_exit;
}
exit(1);
}
/* We do not use the size information we have about the file, because

View File

@ -232,6 +232,9 @@ the extent are free and which contain old tuple version to clean. */
#define XDES_ARR_OFFSET (FSP_HEADER_OFFSET + FSP_HEADER_SIZE)
#ifndef UNIV_HOTBACKUP
/* Flag to indicate if we have printed the tablespace full error. */
static ibool fsp_tbs_full_error_printed = FALSE;
/**********************************************************************//**
Returns an extent to the free list of a space. */
static
@ -1099,7 +1102,7 @@ fsp_header_inc_size(
/**********************************************************************//**
Gets the current free limit of the system tablespace. The free limit
means the place of the first page which has never been put to the the
means the place of the first page which has never been put to the
free list for allocation. The space above that address is initialized
to zero. Sets also the global variable log_fsp_current_free_limit.
@return free limit in megabytes */
@ -1218,6 +1221,19 @@ fsp_try_extend_data_file(
if (space == 0 && !srv_auto_extend_last_data_file) {
/* We print the error message only once to avoid
spamming the error log. Note that we don't need
to reset the flag to FALSE as dealing with this
error requires server restart. */
if (fsp_tbs_full_error_printed == FALSE) {
fprintf(stderr,
"InnoDB: Error: Data file(s) ran"
" out of space.\n"
"Please add another data file or"
" use \'autoextend\' for the last"
" data file.\n");
fsp_tbs_full_error_printed = TRUE;
}
return(FALSE);
}
@ -1832,6 +1848,8 @@ fsp_seg_inode_page_find_used(
if (!ut_dulint_is_zero(mach_read_from_8(inode + FSEG_ID))) {
/* This is used */
ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N)
== FSEG_MAGIC_N_VALUE);
return(i);
}
}
@ -1863,6 +1881,9 @@ fsp_seg_inode_page_find_free(
return(i);
}
ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N)
== FSEG_MAGIC_N_VALUE);
}
return(ULINT_UNDEFINED);
@ -1981,6 +2002,8 @@ fsp_alloc_seg_inode(
page + FSEG_INODE_PAGE_NODE, mtr);
}
ut_ad(ut_dulint_is_zero(mach_read_from_8(inode + FSEG_ID))
|| mach_read_from_4(inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE);
return(inode);
}
@ -2018,7 +2041,7 @@ fsp_free_seg_inode(
}
mlog_write_dulint(inode + FSEG_ID, ut_dulint_zero, mtr);
mlog_write_ulint(inode + FSEG_MAGIC_N, 0, MLOG_4BYTES, mtr);
mlog_write_ulint(inode + FSEG_MAGIC_N, 0xfa051ce3, MLOG_4BYTES, mtr);
if (ULINT_UNDEFINED
== fsp_seg_inode_page_find_used(page, zip_size, mtr)) {
@ -2034,11 +2057,11 @@ fsp_free_seg_inode(
/**********************************************************************//**
Returns the file segment inode, page x-latched.
@return segment inode, page x-latched */
@return segment inode, page x-latched; NULL if the inode is free */
static
fseg_inode_t*
fseg_inode_get(
/*===========*/
fseg_inode_try_get(
/*===============*/
fseg_header_t* header, /*!< in: segment header */
ulint space, /*!< in: space id */
ulint zip_size,/*!< in: compressed page size in bytes
@ -2054,8 +2077,34 @@ fseg_inode_get(
inode = fut_get_ptr(space, zip_size, inode_addr, RW_X_LATCH, mtr);
ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE);
if (UNIV_UNLIKELY
(ut_dulint_is_zero(mach_read_from_8(inode + FSEG_ID)))) {
inode = NULL;
} else {
ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N)
== FSEG_MAGIC_N_VALUE);
}
return(inode);
}
/**********************************************************************//**
Returns the file segment inode, page x-latched.
@return segment inode, page x-latched */
static
fseg_inode_t*
fseg_inode_get(
/*===========*/
fseg_header_t* header, /*!< in: segment header */
ulint space, /*!< in: space id */
ulint zip_size,/*!< in: compressed page size in bytes
or 0 for uncompressed pages */
mtr_t* mtr) /*!< in: mtr handle */
{
fseg_inode_t* inode
= fseg_inode_try_get(header, space, zip_size, mtr);
ut_a(inode);
return(inode);
}
@ -2073,6 +2122,7 @@ fseg_get_nth_frag_page_no(
ut_ad(inode && mtr);
ut_ad(n < FSEG_FRAG_ARR_N_SLOTS);
ut_ad(mtr_memo_contains_page(mtr, inode, MTR_MEMO_PAGE_X_FIX));
ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE);
return(mach_read_from_4(inode + FSEG_FRAG_ARR
+ n * FSEG_FRAG_SLOT_SIZE));
}
@ -2091,6 +2141,7 @@ fseg_set_nth_frag_page_no(
ut_ad(inode && mtr);
ut_ad(n < FSEG_FRAG_ARR_N_SLOTS);
ut_ad(mtr_memo_contains_page(mtr, inode, MTR_MEMO_PAGE_X_FIX));
ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE);
mlog_write_ulint(inode + FSEG_FRAG_ARR + n * FSEG_FRAG_SLOT_SIZE,
page_no, MLOG_4BYTES, mtr);
@ -2451,6 +2502,8 @@ fseg_fill_free_list(
xdes_set_state(descr, XDES_FSEG, mtr);
seg_id = mtr_read_dulint(inode + FSEG_ID, mtr);
ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N)
== FSEG_MAGIC_N_VALUE);
mlog_write_dulint(descr + XDES_ID, seg_id, mtr);
flst_add_last(inode + FSEG_FREE, descr + XDES_FLST_NODE, mtr);
@ -2479,6 +2532,7 @@ fseg_alloc_free_extent(
fil_addr_t first;
ut_ad(!((page_offset(inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE));
ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE);
if (flst_get_len(inode + FSEG_FREE, mtr) > 0) {
/* Segment free list is not empty, allocate from it */
@ -3136,6 +3190,8 @@ fseg_mark_page_used(
ut_ad(seg_inode && mtr);
ut_ad(!((page_offset(seg_inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE));
ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N)
== FSEG_MAGIC_N_VALUE);
descr = xdes_get_descriptor(space, zip_size, page, mtr);
@ -3373,6 +3429,8 @@ fseg_free_extent(
ut_a(xdes_get_state(descr, mtr) == XDES_FSEG);
ut_a(0 == ut_dulint_cmp(mtr_read_dulint(descr + XDES_ID, mtr),
mtr_read_dulint(seg_inode + FSEG_ID, mtr)));
ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N)
== FSEG_MAGIC_N_VALUE);
first_page_in_extent = page - (page % FSP_EXTENT_SIZE);
@ -3463,7 +3521,13 @@ fseg_free_step(
ut_a(descr);
ut_a(xdes_get_bit(descr, XDES_FREE_BIT,
header_page % FSP_EXTENT_SIZE, mtr) == FALSE);
inode = fseg_inode_get(header, space, zip_size, mtr);
inode = fseg_inode_try_get(header, space, zip_size, mtr);
if (UNIV_UNLIKELY(inode == NULL)) {
fprintf(stderr, "double free of inode from %u:%u\n",
(unsigned) space, (unsigned) header_page);
return(TRUE);
}
descr = fseg_get_first_extent(inode, space, zip_size, mtr);
@ -3587,6 +3651,7 @@ fseg_get_first_extent(
ut_ad(inode && mtr);
ut_ad(space == page_get_space_id(page_align(inode)));
ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE);
first = fil_addr_null;
@ -3801,6 +3866,7 @@ fseg_print_low(
(ulong) reserved, (ulong) used, (ulong) n_full,
(ulong) n_frag, (ulong) n_free, (ulong) n_not_full,
(ulong) n_used);
ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE);
}
#ifdef UNIV_BTR_PRINT

View File

@ -72,6 +72,7 @@ with this program; if not, write to the Free Software Foundation, Inc.,
/* Include necessary InnoDB headers */
extern "C" {
#include "univ.i"
#include "buf0lru.h"
#include "btr0sea.h"
#include "os0file.h"
#include "os0thread.h"
@ -106,6 +107,9 @@ extern "C" {
#include "i_s.h"
#ifndef MYSQL_SERVER
# ifndef MYSQL_PLUGIN_IMPORT
# define MYSQL_PLUGIN_IMPORT /* nothing */
# endif /* MYSQL_PLUGIN_IMPORT */
/* This is needed because of Bug #3596. Let us hope that pthread_mutex_t
is defined the same in both builds: the MySQL server and the InnoDB plugin. */
extern MYSQL_PLUGIN_IMPORT pthread_mutex_t LOCK_thread_count;
@ -152,6 +156,10 @@ static ulong innobase_write_io_threads;
static long long innobase_buffer_pool_size, innobase_log_file_size;
/** Percentage of the buffer pool to reserve for 'old' blocks.
Connected to buf_LRU_old_ratio. */
static uint innobase_old_blocks_pct;
/* The default values for the following char* start-up parameters
are determined in innobase_init below: */
@ -166,9 +174,7 @@ file formats in the configuration file, but can only be set to any
of the supported file formats during runtime. */
static char* innobase_file_format_check = NULL;
/* The following has a misleading name: starting from 4.0.5, this also
affects Windows: */
static char* innobase_unix_file_flush_method = NULL;
static char* innobase_file_flush_method = NULL;
/* Below we have boolean-valued start-up parameters, and their default
values */
@ -214,15 +220,34 @@ static void free_share(INNOBASE_SHARE *share);
static int innobase_close_connection(handlerton *hton, THD* thd);
static int innobase_commit(handlerton *hton, THD* thd, bool all);
static int innobase_rollback(handlerton *hton, THD* thd, bool all);
static int innobase_rollback_to_savepoint(handlerton *hton, THD* thd,
static int innobase_rollback_to_savepoint(handlerton *hton, THD* thd,
void *savepoint);
static int innobase_savepoint(handlerton *hton, THD* thd, void *savepoint);
static int innobase_release_savepoint(handlerton *hton, THD* thd,
static int innobase_release_savepoint(handlerton *hton, THD* thd,
void *savepoint);
static handler *innobase_create_handler(handlerton *hton,
TABLE_SHARE *table,
MEM_ROOT *mem_root);
/***********************************************************************
This function checks each index name for a table against reserved
system default primary index name 'GEN_CLUST_INDEX'. If a name matches,
this function pushes an error message to the client, and returns true. */
static
bool
innobase_index_name_is_reserved(
/*============================*/
/* out: true if index name matches a
reserved name */
const trx_t* trx, /* in: InnoDB transaction handle */
const TABLE* form, /* in: information on table
columns and indexes */
const char* norm_name); /* in: table name */
/* "GEN_CLUST_INDEX" is the name reserved for Innodb default
system primary index. */
static const char innobase_index_reserve_name[]= "GEN_CLUST_INDEX";
/** @brief Initialize the default value of innodb_commit_concurrency.
Once InnoDB is running, the innodb_commit_concurrency must not change
@ -492,10 +517,10 @@ static SHOW_VAR innodb_status_variables[]= {
(char*) &export_vars.innodb_buffer_pool_pages_misc, SHOW_LONG},
{"buffer_pool_pages_total",
(char*) &export_vars.innodb_buffer_pool_pages_total, SHOW_LONG},
{"buffer_pool_read_ahead_rnd",
(char*) &export_vars.innodb_buffer_pool_read_ahead_rnd, SHOW_LONG},
{"buffer_pool_read_ahead_seq",
(char*) &export_vars.innodb_buffer_pool_read_ahead_seq, SHOW_LONG},
{"buffer_pool_read_ahead",
(char*) &export_vars.innodb_buffer_pool_read_ahead, SHOW_LONG},
{"buffer_pool_read_ahead_evicted",
(char*) &export_vars.innodb_buffer_pool_read_ahead_evicted, SHOW_LONG},
{"buffer_pool_read_requests",
(char*) &export_vars.innodb_buffer_pool_read_requests, SHOW_LONG},
{"buffer_pool_reads",
@ -865,17 +890,14 @@ convert_error_code_to_mysql(
return(ER_PRIMARY_CANT_HAVE_NULL);
case DB_TOO_MANY_CONCURRENT_TRXS:
/* Once MySQL add the appropriate code to errmsg.txt then
we can get rid of this #ifdef. NOTE: The code checked by
the #ifdef is the suggested name for the error condition
and the actual error code name could very well be different.
This will require some monitoring, ie. the status
of this request on our part.*/
#ifdef ER_TOO_MANY_CONCURRENT_TRXS
return(ER_TOO_MANY_CONCURRENT_TRXS);
#else
/* New error code HA_ERR_TOO_MANY_CONCURRENT_TRXS is only
available in 5.1.38 and later, but the plugin should still
work with previous versions of MySQL. */
#ifdef HA_ERR_TOO_MANY_CONCURRENT_TRXS
return(HA_ERR_TOO_MANY_CONCURRENT_TRXS);
#else /* HA_ERR_TOO_MANY_CONCURRENT_TRXS */
return(HA_ERR_RECORD_FILE_FULL);
#endif
#endif /* HA_ERR_TOO_MANY_CONCURRENT_TRXS */
case DB_UNSUPPORTED:
return(HA_ERR_UNSUPPORTED);
}
@ -949,7 +971,23 @@ innobase_get_cset_width(
*mbminlen = cs->mbminlen;
*mbmaxlen = cs->mbmaxlen;
} else {
ut_a(cset == 0);
THD* thd = current_thd;
if (thd && thd_sql_command(thd) == SQLCOM_DROP_TABLE) {
/* Fix bug#46256: allow tables to be dropped if the
collation is not found, but issue a warning. */
if ((global_system_variables.log_warnings)
&& (cset != 0)){
sql_print_warning(
"Unknown collation #%lu.", cset);
}
} else {
ut_a(cset == 0);
}
*mbminlen = *mbmaxlen = 0;
}
}
@ -2173,7 +2211,7 @@ innobase_change_buffering_inited_ok:
/* --------------------------------------------------*/
srv_file_flush_method_str = innobase_unix_file_flush_method;
srv_file_flush_method_str = innobase_file_flush_method;
srv_n_log_groups = (ulint) innobase_mirrored_log_groups;
srv_n_log_files = (ulint) innobase_log_files_in_group;
@ -2228,6 +2266,9 @@ innobase_change_buffering_inited_ok:
ut_a(0 == strcmp(my_charset_latin1.name, "latin1_swedish_ci"));
srv_latin1_ordering = my_charset_latin1.sort_order;
innobase_old_blocks_pct = buf_LRU_old_ratio_update(
innobase_old_blocks_pct, FALSE);
innobase_commit_concurrency_init_default();
/* Since we in this module access directly the fields of a trx
@ -2481,6 +2522,19 @@ retry:
}
}
/* The following calls to read the MySQL binary log
file name and the position return consistent results:
1) Other InnoDB transactions cannot intervene between
these calls as we are holding prepare_commit_mutex.
2) Binary logging of other engines is not relevant
to InnoDB as all InnoDB requires is that committing
InnoDB transactions appear in the same order in the
MySQL binary log as they appear in InnoDB logs.
3) A MySQL log file rotation cannot happen because
MySQL protects against this by having a counter of
transactions in prepared state and it only allows
a rotation when the counter drops to zero. See
LOCK_prep_xids and COND_prep_xids in log.cc. */
trx->mysql_log_file_name = mysql_bin_log_file_name();
trx->mysql_log_offset = (ib_int64_t) mysql_bin_log_file_pos();
@ -3127,7 +3181,7 @@ retry:
if (is_part) {
sql_print_error("Failed to open table %s after "
"%lu attemtps.\n", norm_name,
"%lu attempts.\n", norm_name,
retries);
}
@ -5028,6 +5082,11 @@ ha_innobase::index_read(
index = prebuilt->index;
if (UNIV_UNLIKELY(index == NULL)) {
prebuilt->index_usable = FALSE;
DBUG_RETURN(HA_ERR_CRASHED);
}
/* Note that if the index for which the search template is built is not
necessarily prebuilt->index, but can also be the clustered index */
@ -5187,6 +5246,7 @@ ha_innobase::change_active_index(
if (UNIV_UNLIKELY(!prebuilt->index)) {
sql_print_warning("InnoDB: change_active_index(%u) failed",
keynr);
prebuilt->index_usable = FALSE;
DBUG_RETURN(1);
}
@ -5679,6 +5739,28 @@ create_table_def(
}
}
/* First check whether the column to be added has a
system reserved name. */
if (dict_col_name_is_reserved(field->field_name)){
push_warning_printf(
(THD*) trx->mysql_thd,
MYSQL_ERROR::WARN_LEVEL_ERROR,
ER_CANT_CREATE_TABLE,
"Error creating table '%s' with "
"column name '%s'. '%s' is a "
"reserved name. Please try to "
"re-create the table with a "
"different column name.",
table->name, (char*) field->field_name,
(char*) field->field_name);
dict_mem_table_free(table);
trx_commit_for_mysql(trx);
error = DB_ERROR;
goto error_ret;
}
dict_mem_table_add_col(table, table->heap,
(char*) field->field_name,
col_type,
@ -5692,6 +5774,7 @@ create_table_def(
error = row_create_table_for_mysql(table, trx);
error_ret:
error = convert_error_code_to_mysql(error, flags, NULL);
DBUG_RETURN(error);
@ -5730,6 +5813,9 @@ create_index(
n_fields = key->key_parts;
/* Assert that "GEN_CLUST_INDEX" cannot be used as non-primary index */
ut_a(innobase_strcasecmp(key->name, innobase_index_reserve_name) != 0);
ind_type = 0;
if (key_num == form->s->primary_key) {
@ -5838,8 +5924,8 @@ create_clustered_index_when_no_primary(
/* We pass 0 as the space id, and determine at a lower level the space
id where to store the table */
index = dict_mem_index_create(table_name, "GEN_CLUST_INDEX",
index = dict_mem_index_create(table_name,
innobase_index_reserve_name,
0, DICT_CLUSTERED, 0);
error = row_create_index_for_mysql(index, trx, NULL);
@ -6265,14 +6351,6 @@ ha_innobase::create(
flags = DICT_TF_COMPACT;
}
error = create_table_def(trx, form, norm_name,
create_info->options & HA_LEX_CREATE_TMP_TABLE ? name2 : NULL,
flags);
if (error) {
goto cleanup;
}
/* Look for a primary key */
primary_key_no= (form->s->primary_key != MAX_KEY ?
@ -6284,6 +6362,22 @@ ha_innobase::create(
ut_a(primary_key_no == -1 || primary_key_no == 0);
/* Check for name conflicts (with reserved name) for
any user indices to be created. */
if (innobase_index_name_is_reserved(trx, form, norm_name)) {
error = -1;
goto cleanup;
}
error = create_table_def(trx, form, norm_name,
create_info->options & HA_LEX_CREATE_TMP_TABLE ? name2 : NULL,
flags);
if (error) {
goto cleanup;
}
/* Create the keys */
if (form->s->keys == 0 || primary_key_no == -1) {
@ -8468,6 +8562,7 @@ ha_innobase::store_lock(
&& isolation_level != TRX_ISO_SERIALIZABLE
&& (lock_type == TL_READ || lock_type == TL_READ_NO_INSERT)
&& (sql_command == SQLCOM_INSERT_SELECT
|| sql_command == SQLCOM_REPLACE_SELECT
|| sql_command == SQLCOM_UPDATE
|| sql_command == SQLCOM_CREATE_TABLE)) {
@ -8475,10 +8570,11 @@ ha_innobase::store_lock(
option set or this session is using READ COMMITTED
isolation level and isolation level of the transaction
is not set to serializable and MySQL is doing
INSERT INTO...SELECT or UPDATE ... = (SELECT ...) or
CREATE ... SELECT... without FOR UPDATE or
IN SHARE MODE in select, then we use consistent
read for select. */
INSERT INTO...SELECT or REPLACE INTO...SELECT
or UPDATE ... = (SELECT ...) or CREATE ...
SELECT... without FOR UPDATE or IN SHARE
MODE in select, then we use consistent read
for select. */
prebuilt->select_lock_type = LOCK_NONE;
prebuilt->stored_select_lock_type = LOCK_NONE;
@ -9634,6 +9730,25 @@ innodb_adaptive_hash_index_update(
}
}
/****************************************************************//**
Update the system variable innodb_old_blocks_pct using the "saved"
value. This function is registered as a callback with MySQL. */
static
void
innodb_old_blocks_pct_update(
/*=========================*/
THD* thd, /*!< in: thread handle */
struct st_mysql_sys_var* var, /*!< in: pointer to
system variable */
void* var_ptr,/*!< out: where the
formal string goes */
const void* save) /*!< in: immediate result
from check function */
{
innobase_old_blocks_pct = buf_LRU_old_ratio_update(
*static_cast<const uint*>(save), TRUE);
}
/*************************************************************//**
Check if it is a valid value of innodb_change_buffering. This function is
registered as a callback with MySQL.
@ -9707,6 +9822,46 @@ static int show_innodb_vars(THD *thd, SHOW_VAR *var, char *buff)
return 0;
}
/***********************************************************************
This function checks each index name for a table against reserved
system default primary index name 'GEN_CLUST_INDEX'. If a name matches,
this function pushes an error message to the client, and returns true. */
static
bool
innobase_index_name_is_reserved(
/*============================*/
/* out: true if an index name
matches the reserved name */
const trx_t* trx, /* in: InnoDB transaction handle */
const TABLE* form, /* in: information on table
columns and indexes */
const char* norm_name) /* in: table name */
{
KEY* key;
uint key_num; /* index number */
for (key_num = 0; key_num < form->s->keys; key_num++) {
key = form->key_info + key_num;
if (innobase_strcasecmp(key->name,
innobase_index_reserve_name) == 0) {
/* Push warning to mysql */
push_warning_printf((THD*) trx->mysql_thd,
MYSQL_ERROR::WARN_LEVEL_ERROR,
ER_CANT_CREATE_TABLE,
"Cannot Create Index with name "
"'%s'. The name is reserved "
"for the system default primary "
"index.",
innobase_index_reserve_name);
return(true);
}
}
return(false);
}
static SHOW_VAR innodb_status_variables_export[]= {
{"Innodb", (char*) &show_innodb_vars, SHOW_FUNC},
{NullS, NullS, SHOW_LONG}
@ -9775,7 +9930,7 @@ static MYSQL_SYSVAR_ULONG(flush_log_at_trx_commit, srv_flush_log_at_trx_commit,
" or 2 (write at commit, flush once per second).",
NULL, NULL, 1, 0, 2, 0);
static MYSQL_SYSVAR_STR(flush_method, innobase_unix_file_flush_method,
static MYSQL_SYSVAR_STR(flush_method, innobase_file_flush_method,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"With which method to flush data.", NULL, NULL, NULL);
@ -9871,7 +10026,7 @@ static MYSQL_SYSVAR_ULONG(concurrency_tickets, srv_n_free_tickets_to_enter,
NULL, NULL, 500L, 1L, ~0L, 0);
static MYSQL_SYSVAR_LONG(file_io_threads, innobase_file_io_threads,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY | PLUGIN_VAR_NOSYSVAR,
"Number of file I/O threads in InnoDB.",
NULL, NULL, 4, 4, 64, 0);
@ -9910,6 +10065,18 @@ static MYSQL_SYSVAR_LONG(mirrored_log_groups, innobase_mirrored_log_groups,
"Number of identical copies of log groups we keep for the database. Currently this should be set to 1.",
NULL, NULL, 1, 1, 10, 0);
static MYSQL_SYSVAR_UINT(old_blocks_pct, innobase_old_blocks_pct,
PLUGIN_VAR_RQCMDARG,
"Percentage of the buffer pool to reserve for 'old' blocks.",
NULL, innodb_old_blocks_pct_update, 100 * 3 / 8, 5, 95, 0);
static MYSQL_SYSVAR_UINT(old_blocks_time, buf_LRU_old_threshold_ms,
PLUGIN_VAR_RQCMDARG,
"Move blocks to the 'new' end of the buffer pool if the first access"
" was at least this many milliseconds ago."
" The timeout is disabled if 0 (the default).",
NULL, NULL, 0, 0, UINT_MAX32, 0);
static MYSQL_SYSVAR_LONG(open_files, innobase_open_files,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"How many files at the maximum InnoDB keeps open at the same time.",
@ -10008,6 +10175,8 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(adaptive_flushing),
MYSQL_SYSVAR(max_purge_lag),
MYSQL_SYSVAR(mirrored_log_groups),
MYSQL_SYSVAR(old_blocks_pct),
MYSQL_SYSVAR(old_blocks_time),
MYSQL_SYSVAR(open_files),
MYSQL_SYSVAR(rollback_on_timeout),
MYSQL_SYSVAR(stats_on_metadata),

View File

@ -663,7 +663,7 @@ ha_innobase::add_index(
if (UNIV_UNLIKELY(error)) {
err_exit:
mem_heap_free(heap);
trx_general_rollback_for_mysql(trx, FALSE, NULL);
trx_general_rollback_for_mysql(trx, NULL);
trx_free_for_mysql(trx);
trx_commit_for_mysql(prebuilt->trx);
DBUG_RETURN(error);
@ -801,7 +801,7 @@ error_handling:
alter table t drop index b, add index (b);
The fix will have to parse the SQL and note that the index
being added has the same name as the the one being dropped and
being added has the same name as the one being dropped and
ignore that in the dup index check.*/
//dict_table_check_for_dup_indexes(prebuilt->table);
#endif

View File

@ -1,69 +0,0 @@
/*****************************************************************************
Copyright (c) 2008, 2009, Innobase Oy. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
Place, Suite 330, Boston, MA 02111-1307 USA
*****************************************************************************/
/*******************************************************************//**
@file handler/handler0vars.h
This file contains accessor functions for dynamic plugin on Windows.
***********************************************************************/
#if defined __WIN__ && defined MYSQL_DYNAMIC_PLUGIN
/*******************************************************************//**
This is a list of externals that can not be resolved by delay loading.
They have to be resolved indirectly via their addresses in the .map file.
All of them are external variables. */
extern CHARSET_INFO* wdl_my_charset_bin;
extern CHARSET_INFO* wdl_my_charset_latin1;
extern CHARSET_INFO* wdl_my_charset_filename;
extern CHARSET_INFO** wdl_system_charset_info;
extern CHARSET_INFO** wdl_default_charset_info;
extern CHARSET_INFO** wdl_all_charsets;
extern system_variables* wdl_global_system_variables;
extern char* wdl_mysql_real_data_home;
extern char** wdl_mysql_data_home;
extern char** wdl_tx_isolation_names;
extern char** wdl_binlog_format_names;
extern char* wdl_reg_ext;
extern pthread_mutex_t* wdl_LOCK_thread_count;
extern key_map* wdl_key_map_full;
extern MY_TMPDIR* wdl_mysql_tmpdir_list;
extern bool* wdl_mysqld_embedded;
extern uint* wdl_lower_case_table_names;
extern ulong* wdl_specialflag;
extern int* wdl_my_umask;
#define my_charset_bin (*wdl_my_charset_bin)
#define my_charset_latin1 (*wdl_my_charset_latin1)
#define my_charset_filename (*wdl_my_charset_filename)
#define system_charset_info (*wdl_system_charset_info)
#define default_charset_info (*wdl_default_charset_info)
#define all_charsets (wdl_all_charsets)
#define global_system_variables (*wdl_global_system_variables)
#define mysql_real_data_home (wdl_mysql_real_data_home)
#define mysql_data_home (*wdl_mysql_data_home)
#define tx_isolation_names (wdl_tx_isolation_names)
#define binlog_format_names (wdl_binlog_format_names)
#define reg_ext (wdl_reg_ext)
#define LOCK_thread_count (*wdl_LOCK_thread_count)
#define key_map_full (*wdl_key_map_full)
#define mysql_tmpdir_list (*wdl_mysql_tmpdir_list)
#define mysqld_embedded (*wdl_mysqld_embedded)
#define lower_case_table_names (*wdl_lower_case_table_names)
#define specialflag (*wdl_specialflag)
#define my_umask (*wdl_my_umask)
#endif

Some files were not shown because too many files have changed in this diff Show More