autocommit
This commit is contained in:
commit
a041c79ec9
@ -1,4 +1,4 @@
|
|||||||
[MYSQL]
|
[MYSQL]
|
||||||
post_commit_to = "commits@lists.mysql.com"
|
post_commit_to = "commits@lists.mysql.com"
|
||||||
post_push_to = "commits@lists.mysql.com"
|
post_push_to = "commits@lists.mysql.com"
|
||||||
tree_name = "mysql-5.1"
|
tree_name = "mysql-5.1-bugteam"
|
||||||
|
@ -1165,6 +1165,7 @@ void free_used_memory()
|
|||||||
mysql_server_end();
|
mysql_server_end();
|
||||||
|
|
||||||
/* Don't use DBUG after mysql_server_end() */
|
/* Don't use DBUG after mysql_server_end() */
|
||||||
|
DBUG_VIOLATION_HELPER_LEAVE;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2487,7 +2488,7 @@ void do_source(struct st_command *command)
|
|||||||
}
|
}
|
||||||
|
|
||||||
dynstr_free(&ds_filename);
|
dynstr_free(&ds_filename);
|
||||||
return;
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -7507,6 +7508,8 @@ static void init_signal_handling(void)
|
|||||||
#endif
|
#endif
|
||||||
sigaction(SIGILL, &sa, NULL);
|
sigaction(SIGILL, &sa, NULL);
|
||||||
sigaction(SIGFPE, &sa, NULL);
|
sigaction(SIGFPE, &sa, NULL);
|
||||||
|
|
||||||
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* !__WIN__ */
|
#endif /* !__WIN__ */
|
||||||
@ -8121,6 +8124,8 @@ void do_get_replace_column(struct st_command *command)
|
|||||||
}
|
}
|
||||||
my_free(start, MYF(0));
|
my_free(start, MYF(0));
|
||||||
command->last_argument= command->end;
|
command->last_argument= command->end;
|
||||||
|
|
||||||
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -16,6 +16,29 @@
|
|||||||
#ifndef _dbug_h
|
#ifndef _dbug_h
|
||||||
#define _dbug_h
|
#define _dbug_h
|
||||||
|
|
||||||
|
#if defined(__cplusplus) && !defined(DBUG_OFF)
|
||||||
|
class Dbug_violation_helper
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
inline Dbug_violation_helper() :
|
||||||
|
_entered(TRUE)
|
||||||
|
{ }
|
||||||
|
|
||||||
|
inline ~Dbug_violation_helper()
|
||||||
|
{
|
||||||
|
assert(!_entered);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void leave()
|
||||||
|
{
|
||||||
|
_entered= FALSE;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
bool _entered;
|
||||||
|
};
|
||||||
|
#endif /* C++ */
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
@ -47,11 +70,31 @@ extern void _db_lock_file_(void);
|
|||||||
extern void _db_unlock_file_(void);
|
extern void _db_unlock_file_(void);
|
||||||
extern FILE *_db_fp_(void);
|
extern FILE *_db_fp_(void);
|
||||||
|
|
||||||
#define DBUG_ENTER(a) const char *_db_func_, *_db_file_; uint _db_level_; \
|
#ifdef __cplusplus
|
||||||
char **_db_framep_; \
|
|
||||||
_db_enter_ (a,__FILE__,__LINE__,&_db_func_,&_db_file_,&_db_level_, \
|
#define DBUG_ENTER(a) \
|
||||||
&_db_framep_)
|
const char *_db_func_, *_db_file_; \
|
||||||
|
uint _db_level_; \
|
||||||
|
char **_db_framep_; \
|
||||||
|
Dbug_violation_helper dbug_violation_helper; \
|
||||||
|
_db_enter_ (a, __FILE__, __LINE__, &_db_func_, &_db_file_, \
|
||||||
|
&_db_level_, &_db_framep_)
|
||||||
|
#define DBUG_VIOLATION_HELPER_LEAVE dbug_violation_helper.leave()
|
||||||
|
|
||||||
|
#else /* C */
|
||||||
|
|
||||||
|
#define DBUG_ENTER(a) \
|
||||||
|
const char *_db_func_, *_db_file_; \
|
||||||
|
uint _db_level_; \
|
||||||
|
char **_db_framep_; \
|
||||||
|
_db_enter_ (a, __FILE__, __LINE__, &_db_func_, &_db_file_, \
|
||||||
|
&_db_level_, &_db_framep_)
|
||||||
|
#define DBUG_VIOLATION_HELPER_LEAVE do { } while(0)
|
||||||
|
|
||||||
|
#endif /* C++ */
|
||||||
|
|
||||||
#define DBUG_LEAVE \
|
#define DBUG_LEAVE \
|
||||||
|
DBUG_VIOLATION_HELPER_LEAVE; \
|
||||||
_db_return_ (__LINE__, &_db_func_, &_db_file_, &_db_level_)
|
_db_return_ (__LINE__, &_db_func_, &_db_file_, &_db_level_)
|
||||||
#define DBUG_RETURN(a1) do {DBUG_LEAVE; return(a1);} while(0)
|
#define DBUG_RETURN(a1) do {DBUG_LEAVE; return(a1);} while(0)
|
||||||
#define DBUG_VOID_RETURN do {DBUG_LEAVE; return;} while(0)
|
#define DBUG_VOID_RETURN do {DBUG_LEAVE; return;} while(0)
|
||||||
@ -85,6 +128,7 @@ extern FILE *_db_fp_(void);
|
|||||||
|
|
||||||
#define DBUG_ENTER(a1)
|
#define DBUG_ENTER(a1)
|
||||||
#define DBUG_LEAVE
|
#define DBUG_LEAVE
|
||||||
|
#define DBUG_VIOLATION_HELPER_LEAVE
|
||||||
#define DBUG_RETURN(a1) do { return(a1); } while(0)
|
#define DBUG_RETURN(a1) do { return(a1); } while(0)
|
||||||
#define DBUG_VOID_RETURN do { return; } while(0)
|
#define DBUG_VOID_RETURN do { return; } while(0)
|
||||||
#define DBUG_EXECUTE(keyword,a1) do { } while(0)
|
#define DBUG_EXECUTE(keyword,a1) do { } while(0)
|
||||||
|
@ -67,6 +67,7 @@ extern int NEAR my_errno; /* Last error in mysys */
|
|||||||
#define MY_HOLD_ON_ERROR 256 /* my_realloc() ; Return old ptr on error */
|
#define MY_HOLD_ON_ERROR 256 /* my_realloc() ; Return old ptr on error */
|
||||||
#define MY_DONT_OVERWRITE_FILE 1024 /* my_copy: Don't overwrite file */
|
#define MY_DONT_OVERWRITE_FILE 1024 /* my_copy: Don't overwrite file */
|
||||||
#define MY_THREADSAFE 2048 /* my_seek(): lock fd mutex */
|
#define MY_THREADSAFE 2048 /* my_seek(): lock fd mutex */
|
||||||
|
#define MY_SYNC 4096 /* my_copy(): sync dst file */
|
||||||
|
|
||||||
#define MY_CHECK_ERROR 1 /* Params to my_end; Check open-close */
|
#define MY_CHECK_ERROR 1 /* Params to my_end; Check open-close */
|
||||||
#define MY_GIVE_INFO 2 /* Give time info about process*/
|
#define MY_GIVE_INFO 2 /* Give time info about process*/
|
||||||
|
@ -163,5 +163,81 @@ show create table t1;
|
|||||||
connection master;
|
connection master;
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
|
||||||
# End cleanup
|
#
|
||||||
|
# BUG#45999 Row based replication fails when auto_increment field = 0.
|
||||||
|
# Store engine of Slaves auto-generates new sequence numbers for
|
||||||
|
# auto_increment fields if the values of them are 0. There is an inconsistency
|
||||||
|
# between slave and master. When MODE_NO_AUTO_VALUE_ON_ZERO are masters treat
|
||||||
|
#
|
||||||
|
source include/master-slave-reset.inc;
|
||||||
|
|
||||||
|
connection master;
|
||||||
|
--disable_warnings
|
||||||
|
DROP TABLE IF EXISTS t1;
|
||||||
|
DROP TABLE IF EXISTS t2;
|
||||||
|
--enable_warnings
|
||||||
|
|
||||||
|
eval CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT PRIMARY KEY) ENGINE=$engine_type;
|
||||||
|
eval CREATE TABLE t2 (id INT NOT NULL AUTO_INCREMENT PRIMARY KEY) ENGINE=$engine_type2;
|
||||||
|
SET SQL_MODE='';
|
||||||
|
# Value of the id will be 1;
|
||||||
|
INSERT INTO t1 VALUES(NULL);
|
||||||
|
INSERT INTO t2 VALUES(NULL);
|
||||||
|
SELECT * FROM t1;
|
||||||
|
SELECT * FROM t2;
|
||||||
|
# Value of the id will be 2;
|
||||||
|
INSERT INTO t1 VALUES();
|
||||||
|
INSERT INTO t2 VALUES();
|
||||||
|
SELECT * FROM t1;
|
||||||
|
SELECT * FROM t2;
|
||||||
|
# Value of the id will be 3. The master treats 0 as NULL or empty because
|
||||||
|
# NO_AUTO_VALUE_ON_ZERO is not assign to SQL_MODE.
|
||||||
|
INSERT INTO t1 VALUES(0);
|
||||||
|
INSERT INTO t2 VALUES(0);
|
||||||
|
SELECT * FROM t1;
|
||||||
|
SELECT * FROM t2;
|
||||||
|
|
||||||
|
SET SQL_MODE=NO_AUTO_VALUE_ON_ZERO;
|
||||||
|
# Value of the id will be 0. The master does not treat 0 as NULL or empty
|
||||||
|
# because NO_AUTO_VALUE_ON_ZERO has assigned to SQL_MODE.
|
||||||
|
INSERT INTO t1 VALUES(0);
|
||||||
|
INSERT INTO t2 VALUES(0);
|
||||||
|
SELECT * FROM t1;
|
||||||
|
SELECT * FROM t2;
|
||||||
|
|
||||||
|
INSERT INTO t1 VALUES(4);
|
||||||
|
INSERT INTO t2 VALUES(4);
|
||||||
|
FLUSH LOGS;
|
||||||
|
sync_slave_with_master;
|
||||||
|
|
||||||
|
let $diff_table_1= master:test.t1;
|
||||||
|
let $diff_table_2= slave:test.t1;
|
||||||
|
source include/diff_tables.inc;
|
||||||
|
|
||||||
|
let $diff_table_1= master:test.t2;
|
||||||
|
let $diff_table_2= slave:test.t2;
|
||||||
|
source include/diff_tables.inc;
|
||||||
|
|
||||||
|
connection master;
|
||||||
|
DROP TABLE t1;
|
||||||
|
DROP TABLE t2;
|
||||||
|
sync_slave_with_master;
|
||||||
|
|
||||||
|
connection master;
|
||||||
|
let $MYSQLD_DATADIR= `SELECT @@DATADIR`;
|
||||||
|
--exec $MYSQL_BINLOG $MYSQLD_DATADIR/master-bin.000001 | $MYSQL test
|
||||||
|
sync_slave_with_master;
|
||||||
|
|
||||||
|
let $diff_table_1= master:test.t1;
|
||||||
|
let $diff_table_2= slave:test.t1;
|
||||||
|
source include/diff_tables.inc;
|
||||||
|
|
||||||
|
let $diff_table_1= master:test.t2;
|
||||||
|
let $diff_table_2= slave:test.t2;
|
||||||
|
source include/diff_tables.inc;
|
||||||
|
|
||||||
|
# End cleanup
|
||||||
|
DROP TABLE t1;
|
||||||
|
DROP TABLE t2;
|
||||||
|
SET SQL_MODE='';
|
||||||
sync_slave_with_master;
|
sync_slave_with_master;
|
||||||
|
@ -9,29 +9,27 @@
|
|||||||
#############################################################################
|
#############################################################################
|
||||||
|
|
||||||
# Begin clean up test section
|
# Begin clean up test section
|
||||||
connection master;
|
|
||||||
--disable_warnings
|
--disable_warnings
|
||||||
create database if not exists mysqltest1;
|
DROP TABLE IF EXISTS t1;
|
||||||
DROP PROCEDURE IF EXISTS mysqltest1.p1;
|
DROP TABLE IF EXISTS t2;
|
||||||
DROP PROCEDURE IF EXISTS mysqltest1.p2;
|
DROP PROCEDURE IF EXISTS p1;
|
||||||
DROP TABLE IF EXISTS mysqltest1.t2;
|
DROP PROCEDURE IF EXISTS p2;
|
||||||
DROP TABLE IF EXISTS mysqltest1.t1;
|
|
||||||
--enable_warnings
|
--enable_warnings
|
||||||
# End of cleanup
|
# End of cleanup
|
||||||
|
|
||||||
# Begin test section 1
|
# Begin test section 1
|
||||||
eval CREATE TABLE IF NOT EXISTS mysqltest1.t1(name CHAR(16), birth DATE,PRIMARY KEY(name))ENGINE=$engine_type;
|
eval CREATE TABLE IF NOT EXISTS t1(name CHAR(16), birth DATE,PRIMARY KEY(name))ENGINE=$engine_type;
|
||||||
eval CREATE TABLE IF NOT EXISTS mysqltest1.t2(name CHAR(16), age INT ,PRIMARY KEY(name))ENGINE=$engine_type;
|
eval CREATE TABLE IF NOT EXISTS t2(name CHAR(16), age INT ,PRIMARY KEY(name))ENGINE=$engine_type;
|
||||||
|
|
||||||
delimiter |;
|
delimiter |;
|
||||||
CREATE PROCEDURE mysqltest1.p1()
|
CREATE PROCEDURE p1()
|
||||||
BEGIN
|
BEGIN
|
||||||
DECLARE done INT DEFAULT 0;
|
DECLARE done INT DEFAULT 0;
|
||||||
DECLARE spa CHAR(16);
|
DECLARE spa CHAR(16);
|
||||||
DECLARE spb INT;
|
DECLARE spb INT;
|
||||||
DECLARE cur1 CURSOR FOR SELECT name,
|
DECLARE cur1 CURSOR FOR SELECT name,
|
||||||
(YEAR(CURDATE())-YEAR(birth))-(RIGHT(CURDATE(),5)<RIGHT(birth,5))
|
(YEAR(CURDATE())-YEAR(birth))-(RIGHT(CURDATE(),5)<RIGHT(birth,5))
|
||||||
FROM mysqltest1.t1;
|
FROM t1;
|
||||||
DECLARE CONTINUE HANDLER FOR SQLSTATE '02000' SET done = 1;
|
DECLARE CONTINUE HANDLER FOR SQLSTATE '02000' SET done = 1;
|
||||||
|
|
||||||
OPEN cur1;
|
OPEN cur1;
|
||||||
@ -41,7 +39,7 @@ BEGIN
|
|||||||
FETCH cur1 INTO spa, spb;
|
FETCH cur1 INTO spa, spb;
|
||||||
IF NOT done THEN
|
IF NOT done THEN
|
||||||
START TRANSACTION;
|
START TRANSACTION;
|
||||||
INSERT INTO mysqltest1.t2 VALUES (spa,spb);
|
INSERT INTO t2 VALUES (spa,spb);
|
||||||
COMMIT;
|
COMMIT;
|
||||||
END IF;
|
END IF;
|
||||||
UNTIL done END REPEAT;
|
UNTIL done END REPEAT;
|
||||||
@ -49,30 +47,29 @@ BEGIN
|
|||||||
SET AUTOCOMMIT=1;
|
SET AUTOCOMMIT=1;
|
||||||
CLOSE cur1;
|
CLOSE cur1;
|
||||||
END|
|
END|
|
||||||
CREATE PROCEDURE mysqltest1.p2()
|
CREATE PROCEDURE p2()
|
||||||
BEGIN
|
BEGIN
|
||||||
INSERT INTO mysqltest1.t1 VALUES ('MySQL','1993-02-04'),('ROCKS', '1990-08-27'),('Texas', '1999-03-30'),('kyle','2005-1-1');
|
INSERT INTO t1 VALUES ('MySQL','1993-02-04'),('ROCKS', '1990-08-27'),('Texas', '1999-03-30'),('kyle','2005-1-1');
|
||||||
END|
|
END|
|
||||||
delimiter ;|
|
delimiter ;|
|
||||||
|
|
||||||
CALL mysqltest1.p2();
|
CALL p2();
|
||||||
sync_slave_with_master;
|
sync_slave_with_master;
|
||||||
|
|
||||||
connection master;
|
connection master;
|
||||||
CALL mysqltest1.p1();
|
CALL p1();
|
||||||
sync_slave_with_master;
|
sync_slave_with_master;
|
||||||
|
|
||||||
connection master;
|
connection master;
|
||||||
|
|
||||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/sp006_master.sql
|
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/sp006_master.sql
|
||||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/sp006_slave.sql
|
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/sp006_slave.sql
|
||||||
|
|
||||||
|
|
||||||
DROP PROCEDURE IF EXISTS mysqltest1.p1;
|
DROP TABLE t1;
|
||||||
DROP PROCEDURE IF EXISTS mysqltest1.p2;
|
DROP TABLE t2;
|
||||||
DROP TABLE IF EXISTS mysqltest1.t1;
|
DROP PROCEDURE p1;
|
||||||
DROP TABLE IF EXISTS mysqltest1.t2;
|
DROP PROCEDURE p2;
|
||||||
DROP DATABASE mysqltest1;
|
|
||||||
|
|
||||||
# Lets compare. Note: If they match test will pass, if they do not match
|
# Lets compare. Note: If they match test will pass, if they do not match
|
||||||
# the test will show that the diff statement failed and not reject file
|
# the test will show that the diff statement failed and not reject file
|
||||||
|
7
mysql-test/include/have_dynamic_loading.inc
Normal file
7
mysql-test/include/have_dynamic_loading.inc
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
#
|
||||||
|
# Whether server supports dynamic loading.
|
||||||
|
#
|
||||||
|
--require r/have_dynamic_loading.require
|
||||||
|
disable_query_log;
|
||||||
|
show variables like 'have_dynamic_loading';
|
||||||
|
enable_query_log;
|
@ -2,10 +2,7 @@
|
|||||||
# Check if server has support for loading udf's
|
# Check if server has support for loading udf's
|
||||||
# i.e it will support dlopen
|
# i.e it will support dlopen
|
||||||
#
|
#
|
||||||
--require r/have_dynamic_loading.require
|
--source include/have_dynamic_loading.inc
|
||||||
disable_query_log;
|
|
||||||
show variables like 'have_dynamic_loading';
|
|
||||||
enable_query_log;
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Check if the variable EXAMPLE_PLUGIN is set
|
# Check if the variable EXAMPLE_PLUGIN is set
|
||||||
|
@ -2,10 +2,7 @@
|
|||||||
# Check if server has support for loading udf's
|
# Check if server has support for loading udf's
|
||||||
# i.e it will support dlopen
|
# i.e it will support dlopen
|
||||||
#
|
#
|
||||||
--require r/have_dynamic_loading.require
|
--source include/have_dynamic_loading.inc
|
||||||
disable_query_log;
|
|
||||||
show variables like 'have_dynamic_loading';
|
|
||||||
enable_query_log;
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Check if the variable SIMPLE_PARSER is set
|
# Check if the variable SIMPLE_PARSER is set
|
||||||
|
@ -2,10 +2,7 @@
|
|||||||
# Check if server has support for loading udf's
|
# Check if server has support for loading udf's
|
||||||
# i.e it will support dlopen
|
# i.e it will support dlopen
|
||||||
#
|
#
|
||||||
--require r/have_dynamic_loading.require
|
--source include/have_dynamic_loading.inc
|
||||||
disable_query_log;
|
|
||||||
show variables like 'have_dynamic_loading';
|
|
||||||
enable_query_log;
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Check if the variable UDF_EXAMPLE_LIB is set
|
# Check if the variable UDF_EXAMPLE_LIB is set
|
||||||
|
@ -12695,3 +12695,25 @@ a b
|
|||||||
1 NULL
|
1 NULL
|
||||||
2 NULL
|
2 NULL
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
CREATE TABLE t1(a INT, b BLOB) ENGINE=archive;
|
||||||
|
SELECT DATA_LENGTH, AVG_ROW_LENGTH FROM
|
||||||
|
INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1' AND TABLE_SCHEMA='test';
|
||||||
|
DATA_LENGTH AVG_ROW_LENGTH
|
||||||
|
8666 15
|
||||||
|
INSERT INTO t1 VALUES(1, 'sampleblob1'),(2, 'sampleblob2');
|
||||||
|
SELECT DATA_LENGTH, AVG_ROW_LENGTH FROM
|
||||||
|
INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1' AND TABLE_SCHEMA='test';
|
||||||
|
DATA_LENGTH AVG_ROW_LENGTH
|
||||||
|
8700 4350
|
||||||
|
DROP TABLE t1;
|
||||||
|
SET @save_join_buffer_size= @@join_buffer_size;
|
||||||
|
SET @@join_buffer_size= 8228;
|
||||||
|
CREATE TABLE t1(a CHAR(255)) ENGINE=archive;
|
||||||
|
INSERT INTO t1 VALUES('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'),
|
||||||
|
('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'),
|
||||||
|
('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa');
|
||||||
|
SELECT COUNT(t1.a) FROM t1, t1 a, t1 b, t1 c, t1 d, t1 e;
|
||||||
|
COUNT(t1.a)
|
||||||
|
729
|
||||||
|
DROP TABLE t1;
|
||||||
|
SET @@join_buffer_size= @save_join_buffer_size;
|
||||||
|
@ -1572,6 +1572,19 @@ CREATE TABLE IF NOT EXISTS t2 (a INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY)
|
|||||||
SELECT a FROM t1;
|
SELECT a FROM t1;
|
||||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||||
DROP TABLE t1, t2;
|
DROP TABLE t1, t2;
|
||||||
|
#
|
||||||
|
# BUG#46384 - mysqld segfault when trying to create table with same
|
||||||
|
# name as existing view
|
||||||
|
#
|
||||||
|
CREATE TABLE t1 (a INT);
|
||||||
|
CREATE TABLE t2 (a INT);
|
||||||
|
INSERT INTO t1 VALUES (1),(2),(3);
|
||||||
|
INSERT INTO t2 VALUES (1),(2),(3);
|
||||||
|
CREATE VIEW v1 AS SELECT t1.a FROM t1, t2;
|
||||||
|
CREATE TABLE v1 AS SELECT * FROM t1;
|
||||||
|
ERROR 42S01: Table 'v1' already exists
|
||||||
|
DROP VIEW v1;
|
||||||
|
DROP TABLE t1,t2;
|
||||||
End of 5.0 tests
|
End of 5.0 tests
|
||||||
CREATE TABLE t1 (a int, b int);
|
CREATE TABLE t1 (a int, b int);
|
||||||
insert into t1 values (1,1),(1,2);
|
insert into t1 values (1,1),(1,2);
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
RESET MASTER;
|
||||||
SET NAMES gbk;
|
SET NAMES gbk;
|
||||||
CREATE TABLE t1 (
|
CREATE TABLE t1 (
|
||||||
f1 BLOB
|
f1 BLOB
|
||||||
|
@ -763,4 +763,34 @@ a b d c
|
|||||||
1 2 0 2
|
1 2 0 2
|
||||||
1 2 0 3
|
1 2 0 3
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
#
|
||||||
|
# Bug #46159: simple query that never returns
|
||||||
|
#
|
||||||
|
SET @old_max_heap_table_size = @@max_heap_table_size;
|
||||||
|
SET @@max_heap_table_size = 16384;
|
||||||
|
SET @old_sort_buffer_size = @@sort_buffer_size;
|
||||||
|
SET @@sort_buffer_size = 32804;
|
||||||
|
CREATE TABLE t1(c1 int, c2 VARCHAR(20));
|
||||||
|
INSERT INTO t1 VALUES (1, '1'), (1, '1'), (2, '2'), (3, '1'), (3, '1'), (4, '4');
|
||||||
|
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||||
|
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||||
|
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||||
|
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||||
|
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||||
|
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||||
|
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||||
|
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||||
|
SELECT c1, c2, COUNT(*) FROM t1 GROUP BY c1 LIMIT 4;
|
||||||
|
c1 c2 COUNT(*)
|
||||||
|
1 1 2
|
||||||
|
2 2 1
|
||||||
|
3 1 2
|
||||||
|
4 4 1
|
||||||
|
SELECT DISTINCT c2 FROM t1 GROUP BY c1 HAVING COUNT(*) > 1;
|
||||||
|
c2
|
||||||
|
1
|
||||||
|
5
|
||||||
|
DROP TABLE t1;
|
||||||
|
SET @@sort_buffer_size = @old_sort_buffer_size;
|
||||||
|
SET @@max_heap_table_size = @old_max_heap_table_size;
|
||||||
End of 5.1 tests
|
End of 5.1 tests
|
||||||
|
@ -159,6 +159,14 @@ CREATE TABLE t1 (a INT PRIMARY KEY);
|
|||||||
EXPLAIN EXTENDED SELECT COUNT(a) FROM t1 USE KEY(a);
|
EXPLAIN EXTENDED SELECT COUNT(a) FROM t1 USE KEY(a);
|
||||||
ERROR 42000: Key 'a' doesn't exist in table 't1'
|
ERROR 42000: Key 'a' doesn't exist in table 't1'
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
CREATE TABLE t1(a LONGTEXT);
|
||||||
|
INSERT INTO t1 VALUES (repeat('a',@@global.max_allowed_packet));
|
||||||
|
INSERT INTO t1 VALUES (repeat('b',@@global.max_allowed_packet));
|
||||||
|
EXPLAIN SELECT DISTINCT 1 FROM t1,
|
||||||
|
(SELECT DISTINCTROW a AS away FROM t1 GROUP BY a WITH ROLLUP) as d1
|
||||||
|
WHERE t1.a = d1.a;
|
||||||
|
ERROR 42S22: Unknown column 'd1.a' in 'where clause'
|
||||||
|
DROP TABLE t1;
|
||||||
#
|
#
|
||||||
# Bug#37870: Usage of uninitialized value caused failed assertion.
|
# Bug#37870: Usage of uninitialized value caused failed assertion.
|
||||||
#
|
#
|
||||||
|
@ -2534,6 +2534,15 @@ SELECT LOAD_FILE(a) FROM t1;
|
|||||||
LOAD_FILE(a)
|
LOAD_FILE(a)
|
||||||
NULL
|
NULL
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
CREATE TABLE t1 (f2 VARCHAR(20));
|
||||||
|
CREATE TABLE t2 (f2 VARCHAR(20));
|
||||||
|
INSERT INTO t1 VALUES ('MIN'),('MAX');
|
||||||
|
INSERT INTO t2 VALUES ('LOAD');
|
||||||
|
SELECT CONCAT_WS('_', (SELECT t2.f2 FROM t2), t1.f2) AS concat_name FROM t1;
|
||||||
|
concat_name
|
||||||
|
LOAD_MIN
|
||||||
|
LOAD_MAX
|
||||||
|
DROP TABLE t1, t2;
|
||||||
End of 5.0 tests
|
End of 5.0 tests
|
||||||
drop table if exists t1;
|
drop table if exists t1;
|
||||||
create table t1(f1 tinyint default null)engine=myisam;
|
create table t1(f1 tinyint default null)engine=myisam;
|
||||||
|
6
mysql-test/r/lowercase_mixed_tmpdir_innodb.result
Executable file
6
mysql-test/r/lowercase_mixed_tmpdir_innodb.result
Executable file
@ -0,0 +1,6 @@
|
|||||||
|
drop table if exists t1;
|
||||||
|
create table t1 (id int) engine=InnoDB;
|
||||||
|
insert into t1 values (1);
|
||||||
|
create temporary table t2 engine=InnoDB select * from t1;
|
||||||
|
drop temporary table t2;
|
||||||
|
drop table t1;
|
@ -1,4 +1,4 @@
|
|||||||
flush logs;
|
RESET MASTER;
|
||||||
create table t3 (f text character set utf8);
|
create table t3 (f text character set utf8);
|
||||||
create table t4 (f text character set cp932);
|
create table t4 (f text character set cp932);
|
||||||
flush logs;
|
flush logs;
|
||||||
|
@ -1,4 +1,18 @@
|
|||||||
drop table if exists t1;
|
drop table if exists t1;
|
||||||
|
create table t1 (a int not null,
|
||||||
|
b datetime not null,
|
||||||
|
primary key (a,b))
|
||||||
|
engine=innodb
|
||||||
|
partition by range (to_days(b))
|
||||||
|
subpartition by hash (a)
|
||||||
|
subpartitions 2
|
||||||
|
( partition p0 values less than (to_days('2009-01-01')),
|
||||||
|
partition p1 values less than (to_days('2009-02-01')),
|
||||||
|
partition p2 values less than (to_days('2009-03-01')),
|
||||||
|
partition p3 values less than maxvalue);
|
||||||
|
alter table t1 reorganize partition p1,p2 into
|
||||||
|
( partition p2 values less than (to_days('2009-03-01')));
|
||||||
|
drop table t1;
|
||||||
CREATE TABLE t1 (id INT PRIMARY KEY, data INT) ENGINE = InnoDB
|
CREATE TABLE t1 (id INT PRIMARY KEY, data INT) ENGINE = InnoDB
|
||||||
PARTITION BY RANGE(id) (
|
PARTITION BY RANGE(id) (
|
||||||
PARTITION p0 VALUES LESS THAN (5),
|
PARTITION p0 VALUES LESS THAN (5),
|
||||||
@ -256,3 +270,7 @@ SUBPARTITION BY KEY (char_column)
|
|||||||
SUBPARTITIONS 2
|
SUBPARTITIONS 2
|
||||||
(PARTITION p1 VALUES LESS THAN (5) ENGINE = MyISAM) */
|
(PARTITION p1 VALUES LESS THAN (5) ENGINE = MyISAM) */
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
CREATE TABLE t1 (a INT) ENGINE=InnoDB
|
||||||
|
PARTITION BY list(a) (PARTITION p1 VALUES IN (1));
|
||||||
|
CREATE INDEX i1 ON t1 (a);
|
||||||
|
DROP TABLE t1;
|
||||||
|
@ -6963,6 +6963,22 @@ CALL p1();
|
|||||||
CALL p1();
|
CALL p1();
|
||||||
DROP PROCEDURE p1;
|
DROP PROCEDURE p1;
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
#
|
||||||
|
# Bug #46629: Item_in_subselect::val_int(): Assertion `0'
|
||||||
|
# on subquery inside a SP
|
||||||
|
#
|
||||||
|
CREATE TABLE t1(a INT);
|
||||||
|
CREATE TABLE t2(a INT, b INT PRIMARY KEY);
|
||||||
|
CREATE PROCEDURE p1 ()
|
||||||
|
BEGIN
|
||||||
|
SELECT a FROM t1 A WHERE A.b IN (SELECT b FROM t2 AS B);
|
||||||
|
END|
|
||||||
|
CALL p1;
|
||||||
|
ERROR 42S22: Unknown column 'A.b' in 'IN/ALL/ANY subquery'
|
||||||
|
CALL p1;
|
||||||
|
ERROR 42S22: Unknown column 'A.b' in 'IN/ALL/ANY subquery'
|
||||||
|
DROP PROCEDURE p1;
|
||||||
|
DROP TABLE t1, t2;
|
||||||
# ------------------------------------------------------------------
|
# ------------------------------------------------------------------
|
||||||
# -- End of 5.1 tests
|
# -- End of 5.1 tests
|
||||||
# ------------------------------------------------------------------
|
# ------------------------------------------------------------------
|
||||||
|
30
mysql-test/r/subselect4.result
Normal file
30
mysql-test/r/subselect4.result
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
#
|
||||||
|
# Bug #46791: Assertion failed:(table->key_read==0),function unknown
|
||||||
|
# function,file sql_base.cc
|
||||||
|
#
|
||||||
|
CREATE TABLE t1 (a INT, b INT, KEY(a));
|
||||||
|
INSERT INTO t1 VALUES (1,1),(2,2);
|
||||||
|
CREATE TABLE t2 LIKE t1;
|
||||||
|
INSERT INTO t2 VALUES (1,1),(2,2);
|
||||||
|
CREATE TABLE t3 LIKE t1;
|
||||||
|
# should have 1 impossible where and 2 dependent subqueries
|
||||||
|
EXPLAIN
|
||||||
|
SELECT 1 FROM t1
|
||||||
|
WHERE NOT EXISTS (SELECT 1 FROM t2 WHERE 1 = (SELECT MIN(t2.b) FROM t3))
|
||||||
|
ORDER BY count(*);
|
||||||
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
|
1 PRIMARY t1 index NULL a 5 NULL 2 Using index; Using temporary
|
||||||
|
2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where
|
||||||
|
3 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL no matching row in const table
|
||||||
|
# should not crash the next statement
|
||||||
|
SELECT 1 FROM t1
|
||||||
|
WHERE NOT EXISTS (SELECT 1 FROM t2 WHERE 1 = (SELECT MIN(t2.b) FROM t3))
|
||||||
|
ORDER BY count(*);
|
||||||
|
1
|
||||||
|
1
|
||||||
|
# should not crash: the crash is caused by the previous statement
|
||||||
|
SELECT 1;
|
||||||
|
1
|
||||||
|
1
|
||||||
|
DROP TABLE t1,t2,t3;
|
||||||
|
End of 5.0 tests.
|
@ -392,4 +392,20 @@ a
|
|||||||
4
|
4
|
||||||
DROP FUNCTION sequence;
|
DROP FUNCTION sequence;
|
||||||
DROP TABLE t1,t2;
|
DROP TABLE t1,t2;
|
||||||
|
#
|
||||||
|
# Bug#46259: 5.0.83 -> 5.1.36, query doesn't work
|
||||||
|
#
|
||||||
|
CREATE TABLE t1 ( a INT );
|
||||||
|
INSERT INTO t1 VALUES (1), (2), (3);
|
||||||
|
SELECT IF( a = 1, a, a ) AS `b` FROM t1 ORDER BY field( `b` + 1, 1 );
|
||||||
|
b
|
||||||
|
1
|
||||||
|
2
|
||||||
|
3
|
||||||
|
SELECT IF( a = 1, a, a ) AS `b` FROM t1 ORDER BY field( `b`, 1 );
|
||||||
|
b
|
||||||
|
2
|
||||||
|
3
|
||||||
|
1
|
||||||
|
DROP TABLE t1;
|
||||||
End of 5.0 tests.
|
End of 5.0 tests.
|
||||||
|
@ -313,4 +313,9 @@ ERROR 22001: Data too long for column 'c_tinytext' at row 1
|
|||||||
insert into t2 values(@q);
|
insert into t2 values(@q);
|
||||||
ERROR 22001: Data too long for column 'c_tinyblob' at row 1
|
ERROR 22001: Data too long for column 'c_tinyblob' at row 1
|
||||||
drop table t1, t2;
|
drop table t1, t2;
|
||||||
|
DROP TABLE t1;
|
||||||
|
ERROR 42S02: Unknown table 't1'
|
||||||
|
SHOW ERRORS;
|
||||||
|
Level Code Message
|
||||||
|
Error 1051 Unknown table 't1'
|
||||||
End of 5.0 tests
|
End of 5.0 tests
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
RESET MASTER;
|
||||||
CREATE TABLE t1 (a INT);
|
CREATE TABLE t1 (a INT);
|
||||||
INSERT INTO t1 VALUES (1),(2),(3);
|
INSERT INTO t1 VALUES (1),(2),(3);
|
||||||
SELECT * FROM t1;
|
SELECT * FROM t1;
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
RESET MASTER;
|
||||||
create table foo (a int);
|
create table foo (a int);
|
||||||
flush logs;
|
flush logs;
|
||||||
create temporary table tmp1_foo like foo;
|
create temporary table tmp1_foo like foo;
|
||||||
|
@ -6,6 +6,7 @@ source include/have_log_bin.inc;
|
|||||||
source include/have_debug.inc;
|
source include/have_debug.inc;
|
||||||
|
|
||||||
let $MYSQLD_DATADIR= `select @@datadir`;
|
let $MYSQLD_DATADIR= `select @@datadir`;
|
||||||
|
RESET MASTER;
|
||||||
|
|
||||||
CREATE TABLE t1 (a INT);
|
CREATE TABLE t1 (a INT);
|
||||||
|
|
||||||
@ -24,4 +25,4 @@ exec $MYSQL_BINLOG --start-position=106 $MYSQLD_DATADIR/master-bin.000001 >$MYSQ
|
|||||||
eval SELECT cont LIKE '%RELOAD DATABASE; # Shall generate syntax error%' AS `Contain RELOAD DATABASE` FROM (SELECT load_file('$MYSQLTEST_VARDIR/tmp/binlog_incident-bug44442.sql') AS cont) AS tbl;
|
eval SELECT cont LIKE '%RELOAD DATABASE; # Shall generate syntax error%' AS `Contain RELOAD DATABASE` FROM (SELECT load_file('$MYSQLTEST_VARDIR/tmp/binlog_incident-bug44442.sql') AS cont) AS tbl;
|
||||||
--enable_query_log
|
--enable_query_log
|
||||||
|
|
||||||
remove_file $MYSQLTEST_VARDIR/tmp/binlog_incident-bug44442.sql;
|
remove_file $MYSQLTEST_VARDIR/tmp/binlog_incident-bug44442.sql;
|
||||||
|
@ -30,6 +30,7 @@ source include/have_binlog_format_mixed_or_statement.inc;
|
|||||||
|
|
||||||
connect (master,127.0.0.1,root,,test,$MASTER_MYPORT,);
|
connect (master,127.0.0.1,root,,test,$MASTER_MYPORT,);
|
||||||
connect (master1,127.0.0.1,root,,test,$MASTER_MYPORT,);
|
connect (master1,127.0.0.1,root,,test,$MASTER_MYPORT,);
|
||||||
|
RESET MASTER;
|
||||||
|
|
||||||
create table foo (a int);
|
create table foo (a int);
|
||||||
|
|
||||||
|
@ -623,3 +623,195 @@ SHOW CREATE TABLE t1;
|
|||||||
SELECT * FROM t1 ORDER BY c1;
|
SELECT * FROM t1 ORDER BY c1;
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
if (!$skip_negative_auto_inc)
|
||||||
|
{
|
||||||
|
--echo #############################################################################
|
||||||
|
--echo # Bug #45823 - Assertion failure in file row/row0mysql.c line 1386
|
||||||
|
--echo # Bug #43988 - AUTO_INCREMENT errors with partitioned InnoDB tables in 5.1.31
|
||||||
|
--echo ##############################################################################
|
||||||
|
|
||||||
|
--echo # Inserting negative autoincrement values into a partition table (partitions >= 4)
|
||||||
|
|
||||||
|
eval CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE=$engine PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-1,-10);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
INSERT INTO t(c2) VALUES (40);
|
||||||
|
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
|
||||||
|
DROP TABLE t;
|
||||||
|
|
||||||
|
--echo # Reading from a partition table (partitions >= 2 ) after inserting a negative
|
||||||
|
--echo # value into the auto increment column
|
||||||
|
|
||||||
|
|
||||||
|
eval CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE=$engine PARTITION BY HASH(c1) PARTITIONS 2;
|
||||||
|
|
||||||
|
INSERT INTO t VALUES (-2,-20);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
|
||||||
|
DROP TABLE t;
|
||||||
|
|
||||||
|
--echo # Inserting negative auto increment value into a partition table (partitions >= 2)
|
||||||
|
--echo # auto increment value > 2.
|
||||||
|
|
||||||
|
eval CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE=$engine PARTITION BY HASH(c1) PARTITIONS 2;
|
||||||
|
|
||||||
|
INSERT INTO t VALUES (-4,-20);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
INSERT INTO t(c2) VALUES (40);
|
||||||
|
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
|
||||||
|
DROP TABLE t;
|
||||||
|
|
||||||
|
--echo # Inserting -1 into autoincrement column of a partition table (partition >= 4)
|
||||||
|
|
||||||
|
eval CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE=$engine PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-1,-10);
|
||||||
|
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
|
||||||
|
DROP TABLE t;
|
||||||
|
|
||||||
|
--echo # Deleting from an auto increment table after inserting negative values
|
||||||
|
|
||||||
|
eval CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE=$engine PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-1,-10);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
INSERT INTO t VALUES (-3,-20);
|
||||||
|
INSERT INTO t(c2) VALUES (40);
|
||||||
|
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
|
||||||
|
if (!$skip_delete)
|
||||||
|
{
|
||||||
|
DELETE FROM t WHERE c1 > 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
|
||||||
|
DROP TABLE t;
|
||||||
|
|
||||||
|
--echo # Inserting a positive value that exceeds maximum allowed value for an
|
||||||
|
--echo # Auto Increment column (positive maximum)
|
||||||
|
|
||||||
|
eval CREATE TABLE t (c1 TINYINT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE=$engine PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (126,30);
|
||||||
|
INSERT INTO t VALUES (127,40);
|
||||||
|
|
||||||
|
--error ER_DUP_ENTRY
|
||||||
|
INSERT INTO t VALUES (128,50);
|
||||||
|
--error ER_DUP_ENTRY
|
||||||
|
INSERT INTO t VALUES (129,60);
|
||||||
|
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
|
||||||
|
DROP TABLE t;
|
||||||
|
|
||||||
|
--echo # Inserting a negative value that goes below minimum allowed value for an
|
||||||
|
--echo # Auto Increment column (negative minimum)
|
||||||
|
|
||||||
|
eval CREATE TABLE t (c1 TINYINT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE=$engine PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-127,30);
|
||||||
|
INSERT INTO t VALUES (-128,40);
|
||||||
|
|
||||||
|
--error ER_DUP_ENTRY
|
||||||
|
INSERT INTO t VALUES (-129,50);
|
||||||
|
--error ER_DUP_ENTRY
|
||||||
|
INSERT INTO t VALUES (-130,60);
|
||||||
|
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
|
||||||
|
DROP TABLE t;
|
||||||
|
|
||||||
|
--echo # Updating the partition table with a negative Auto Increment value
|
||||||
|
|
||||||
|
eval CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE=$engine PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-1,-10);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
|
||||||
|
if (!$skip_update)
|
||||||
|
{
|
||||||
|
UPDATE t SET c1 = -6 WHERE c1 = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
|
||||||
|
INSERT INTO t(c2) VALUES (40);
|
||||||
|
INSERT INTO t(c2) VALUES (50);
|
||||||
|
|
||||||
|
if (!$skip_update)
|
||||||
|
{
|
||||||
|
UPDATE t SET c1 = -6 WHERE c1 = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
|
||||||
|
DROP TABLE t;
|
||||||
|
|
||||||
|
--echo # Updating the partition table with a value that crosses the upper limits
|
||||||
|
--echo # on both the positive and the negative side.
|
||||||
|
|
||||||
|
eval CREATE TABLE t (c1 TINYINT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE=$engine PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (126,30);
|
||||||
|
INSERT INTO t VALUES (127,40);
|
||||||
|
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
|
||||||
|
if (!$skip_update)
|
||||||
|
{
|
||||||
|
UPDATE t SET c1 = 130 where c1 = 127;
|
||||||
|
}
|
||||||
|
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
|
||||||
|
if (!$skip_update)
|
||||||
|
{
|
||||||
|
UPDATE t SET c1 = -140 where c1 = 126;
|
||||||
|
}
|
||||||
|
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
|
||||||
|
DROP TABLE t;
|
||||||
|
|
||||||
|
--echo ##############################################################################
|
||||||
|
}
|
||||||
|
@ -825,3 +825,194 @@ c1
|
|||||||
4
|
4
|
||||||
5
|
5
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
#############################################################################
|
||||||
|
# Bug #45823 - Assertion failure in file row/row0mysql.c line 1386
|
||||||
|
# Bug #43988 - AUTO_INCREMENT errors with partitioned InnoDB tables in 5.1.31
|
||||||
|
##############################################################################
|
||||||
|
# Inserting negative autoincrement values into a partition table (partitions >= 4)
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='InnoDB' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-1,-10);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
INSERT INTO t(c2) VALUES (40);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
3 30
|
||||||
|
4 40
|
||||||
|
DROP TABLE t;
|
||||||
|
# Reading from a partition table (partitions >= 2 ) after inserting a negative
|
||||||
|
# value into the auto increment column
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='InnoDB' PARTITION BY HASH(c1) PARTITIONS 2;
|
||||||
|
INSERT INTO t VALUES (-2,-20);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-2 -20
|
||||||
|
1 30
|
||||||
|
DROP TABLE t;
|
||||||
|
# Inserting negative auto increment value into a partition table (partitions >= 2)
|
||||||
|
# auto increment value > 2.
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='InnoDB' PARTITION BY HASH(c1) PARTITIONS 2;
|
||||||
|
INSERT INTO t VALUES (-4,-20);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
INSERT INTO t(c2) VALUES (40);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-4 -20
|
||||||
|
1 30
|
||||||
|
2 40
|
||||||
|
DROP TABLE t;
|
||||||
|
# Inserting -1 into autoincrement column of a partition table (partition >= 4)
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='InnoDB' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-1,-10);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
3 30
|
||||||
|
DROP TABLE t;
|
||||||
|
# Deleting from an auto increment table after inserting negative values
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='InnoDB' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-1,-10);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
INSERT INTO t VALUES (-3,-20);
|
||||||
|
INSERT INTO t(c2) VALUES (40);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-3 -20
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
3 30
|
||||||
|
4 40
|
||||||
|
DELETE FROM t WHERE c1 > 1;
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-3 -20
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
DROP TABLE t;
|
||||||
|
# Inserting a positive value that exceeds maximum allowed value for an
|
||||||
|
# Auto Increment column (positive maximum)
|
||||||
|
CREATE TABLE t (c1 TINYINT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='InnoDB' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (126,30);
|
||||||
|
INSERT INTO t VALUES (127,40);
|
||||||
|
INSERT INTO t VALUES (128,50);
|
||||||
|
ERROR 23000: Duplicate entry '127' for key 'PRIMARY'
|
||||||
|
INSERT INTO t VALUES (129,60);
|
||||||
|
ERROR 23000: Duplicate entry '127' for key 'PRIMARY'
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
126 30
|
||||||
|
127 40
|
||||||
|
DROP TABLE t;
|
||||||
|
# Inserting a negative value that goes below minimum allowed value for an
|
||||||
|
# Auto Increment column (negative minimum)
|
||||||
|
CREATE TABLE t (c1 TINYINT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='InnoDB' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-127,30);
|
||||||
|
INSERT INTO t VALUES (-128,40);
|
||||||
|
INSERT INTO t VALUES (-129,50);
|
||||||
|
ERROR 23000: Duplicate entry '-128' for key 'PRIMARY'
|
||||||
|
INSERT INTO t VALUES (-130,60);
|
||||||
|
ERROR 23000: Duplicate entry '-128' for key 'PRIMARY'
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-128 40
|
||||||
|
-127 30
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
DROP TABLE t;
|
||||||
|
# Updating the partition table with a negative Auto Increment value
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='InnoDB' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-1,-10);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
3 30
|
||||||
|
UPDATE t SET c1 = -6 WHERE c1 = 2;
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-6 20
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
3 30
|
||||||
|
INSERT INTO t(c2) VALUES (40);
|
||||||
|
INSERT INTO t(c2) VALUES (50);
|
||||||
|
UPDATE t SET c1 = -6 WHERE c1 = 2;
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-6 20
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
3 30
|
||||||
|
4 40
|
||||||
|
5 50
|
||||||
|
DROP TABLE t;
|
||||||
|
# Updating the partition table with a value that crosses the upper limits
|
||||||
|
# on both the positive and the negative side.
|
||||||
|
CREATE TABLE t (c1 TINYINT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='InnoDB' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (126,30);
|
||||||
|
INSERT INTO t VALUES (127,40);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
126 30
|
||||||
|
127 40
|
||||||
|
UPDATE t SET c1 = 130 where c1 = 127;
|
||||||
|
Warnings:
|
||||||
|
Warning 1264 Out of range value for column 'c1' at row 1
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
126 30
|
||||||
|
127 40
|
||||||
|
UPDATE t SET c1 = -140 where c1 = 126;
|
||||||
|
Warnings:
|
||||||
|
Warning 1264 Out of range value for column 'c1' at row 1
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-128 30
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
127 40
|
||||||
|
DROP TABLE t;
|
||||||
|
##############################################################################
|
||||||
|
@ -851,3 +851,194 @@ c1
|
|||||||
4
|
4
|
||||||
5
|
5
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
#############################################################################
|
||||||
|
# Bug #45823 - Assertion failure in file row/row0mysql.c line 1386
|
||||||
|
# Bug #43988 - AUTO_INCREMENT errors with partitioned InnoDB tables in 5.1.31
|
||||||
|
##############################################################################
|
||||||
|
# Inserting negative autoincrement values into a partition table (partitions >= 4)
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='Memory' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-1,-10);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
INSERT INTO t(c2) VALUES (40);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
3 30
|
||||||
|
4 40
|
||||||
|
DROP TABLE t;
|
||||||
|
# Reading from a partition table (partitions >= 2 ) after inserting a negative
|
||||||
|
# value into the auto increment column
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='Memory' PARTITION BY HASH(c1) PARTITIONS 2;
|
||||||
|
INSERT INTO t VALUES (-2,-20);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-2 -20
|
||||||
|
1 30
|
||||||
|
DROP TABLE t;
|
||||||
|
# Inserting negative auto increment value into a partition table (partitions >= 2)
|
||||||
|
# auto increment value > 2.
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='Memory' PARTITION BY HASH(c1) PARTITIONS 2;
|
||||||
|
INSERT INTO t VALUES (-4,-20);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
INSERT INTO t(c2) VALUES (40);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-4 -20
|
||||||
|
1 30
|
||||||
|
2 40
|
||||||
|
DROP TABLE t;
|
||||||
|
# Inserting -1 into autoincrement column of a partition table (partition >= 4)
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='Memory' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-1,-10);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
3 30
|
||||||
|
DROP TABLE t;
|
||||||
|
# Deleting from an auto increment table after inserting negative values
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='Memory' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-1,-10);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
INSERT INTO t VALUES (-3,-20);
|
||||||
|
INSERT INTO t(c2) VALUES (40);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-3 -20
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
3 30
|
||||||
|
4 40
|
||||||
|
DELETE FROM t WHERE c1 > 1;
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-3 -20
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
DROP TABLE t;
|
||||||
|
# Inserting a positive value that exceeds maximum allowed value for an
|
||||||
|
# Auto Increment column (positive maximum)
|
||||||
|
CREATE TABLE t (c1 TINYINT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='Memory' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (126,30);
|
||||||
|
INSERT INTO t VALUES (127,40);
|
||||||
|
INSERT INTO t VALUES (128,50);
|
||||||
|
ERROR 23000: Duplicate entry '127' for key 'PRIMARY'
|
||||||
|
INSERT INTO t VALUES (129,60);
|
||||||
|
ERROR 23000: Duplicate entry '127' for key 'PRIMARY'
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
126 30
|
||||||
|
127 40
|
||||||
|
DROP TABLE t;
|
||||||
|
# Inserting a negative value that goes below minimum allowed value for an
|
||||||
|
# Auto Increment column (negative minimum)
|
||||||
|
CREATE TABLE t (c1 TINYINT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='Memory' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-127,30);
|
||||||
|
INSERT INTO t VALUES (-128,40);
|
||||||
|
INSERT INTO t VALUES (-129,50);
|
||||||
|
ERROR 23000: Duplicate entry '-128' for key 'PRIMARY'
|
||||||
|
INSERT INTO t VALUES (-130,60);
|
||||||
|
ERROR 23000: Duplicate entry '-128' for key 'PRIMARY'
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-128 40
|
||||||
|
-127 30
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
DROP TABLE t;
|
||||||
|
# Updating the partition table with a negative Auto Increment value
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='Memory' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-1,-10);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
3 30
|
||||||
|
UPDATE t SET c1 = -6 WHERE c1 = 2;
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-6 20
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
3 30
|
||||||
|
INSERT INTO t(c2) VALUES (40);
|
||||||
|
INSERT INTO t(c2) VALUES (50);
|
||||||
|
UPDATE t SET c1 = -6 WHERE c1 = 2;
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-6 20
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
3 30
|
||||||
|
4 40
|
||||||
|
5 50
|
||||||
|
DROP TABLE t;
|
||||||
|
# Updating the partition table with a value that crosses the upper limits
|
||||||
|
# on both the positive and the negative side.
|
||||||
|
CREATE TABLE t (c1 TINYINT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='Memory' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (126,30);
|
||||||
|
INSERT INTO t VALUES (127,40);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
126 30
|
||||||
|
127 40
|
||||||
|
UPDATE t SET c1 = 130 where c1 = 127;
|
||||||
|
Warnings:
|
||||||
|
Warning 1264 Out of range value for column 'c1' at row 1
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
126 30
|
||||||
|
127 40
|
||||||
|
UPDATE t SET c1 = -140 where c1 = 126;
|
||||||
|
Warnings:
|
||||||
|
Warning 1264 Out of range value for column 'c1' at row 1
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-128 30
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
127 40
|
||||||
|
DROP TABLE t;
|
||||||
|
##############################################################################
|
||||||
|
@ -870,3 +870,194 @@ c1
|
|||||||
4
|
4
|
||||||
5
|
5
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
#############################################################################
|
||||||
|
# Bug #45823 - Assertion failure in file row/row0mysql.c line 1386
|
||||||
|
# Bug #43988 - AUTO_INCREMENT errors with partitioned InnoDB tables in 5.1.31
|
||||||
|
##############################################################################
|
||||||
|
# Inserting negative autoincrement values into a partition table (partitions >= 4)
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='MyISAM' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-1,-10);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
INSERT INTO t(c2) VALUES (40);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
3 30
|
||||||
|
4 40
|
||||||
|
DROP TABLE t;
|
||||||
|
# Reading from a partition table (partitions >= 2 ) after inserting a negative
|
||||||
|
# value into the auto increment column
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='MyISAM' PARTITION BY HASH(c1) PARTITIONS 2;
|
||||||
|
INSERT INTO t VALUES (-2,-20);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-2 -20
|
||||||
|
1 30
|
||||||
|
DROP TABLE t;
|
||||||
|
# Inserting negative auto increment value into a partition table (partitions >= 2)
|
||||||
|
# auto increment value > 2.
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='MyISAM' PARTITION BY HASH(c1) PARTITIONS 2;
|
||||||
|
INSERT INTO t VALUES (-4,-20);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
INSERT INTO t(c2) VALUES (40);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-4 -20
|
||||||
|
1 30
|
||||||
|
2 40
|
||||||
|
DROP TABLE t;
|
||||||
|
# Inserting -1 into autoincrement column of a partition table (partition >= 4)
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='MyISAM' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-1,-10);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
3 30
|
||||||
|
DROP TABLE t;
|
||||||
|
# Deleting from an auto increment table after inserting negative values
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='MyISAM' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-1,-10);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
INSERT INTO t VALUES (-3,-20);
|
||||||
|
INSERT INTO t(c2) VALUES (40);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-3 -20
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
3 30
|
||||||
|
4 40
|
||||||
|
DELETE FROM t WHERE c1 > 1;
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-3 -20
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
DROP TABLE t;
|
||||||
|
# Inserting a positive value that exceeds maximum allowed value for an
|
||||||
|
# Auto Increment column (positive maximum)
|
||||||
|
CREATE TABLE t (c1 TINYINT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='MyISAM' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (126,30);
|
||||||
|
INSERT INTO t VALUES (127,40);
|
||||||
|
INSERT INTO t VALUES (128,50);
|
||||||
|
ERROR 23000: Duplicate entry '127' for key 'PRIMARY'
|
||||||
|
INSERT INTO t VALUES (129,60);
|
||||||
|
ERROR 23000: Duplicate entry '127' for key 'PRIMARY'
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
126 30
|
||||||
|
127 40
|
||||||
|
DROP TABLE t;
|
||||||
|
# Inserting a negative value that goes below minimum allowed value for an
|
||||||
|
# Auto Increment column (negative minimum)
|
||||||
|
CREATE TABLE t (c1 TINYINT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='MyISAM' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-127,30);
|
||||||
|
INSERT INTO t VALUES (-128,40);
|
||||||
|
INSERT INTO t VALUES (-129,50);
|
||||||
|
ERROR 23000: Duplicate entry '-128' for key 'PRIMARY'
|
||||||
|
INSERT INTO t VALUES (-130,60);
|
||||||
|
ERROR 23000: Duplicate entry '-128' for key 'PRIMARY'
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-128 40
|
||||||
|
-127 30
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
DROP TABLE t;
|
||||||
|
# Updating the partition table with a negative Auto Increment value
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='MyISAM' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-1,-10);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
3 30
|
||||||
|
UPDATE t SET c1 = -6 WHERE c1 = 2;
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-6 20
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
3 30
|
||||||
|
INSERT INTO t(c2) VALUES (40);
|
||||||
|
INSERT INTO t(c2) VALUES (50);
|
||||||
|
UPDATE t SET c1 = -6 WHERE c1 = 2;
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-6 20
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
3 30
|
||||||
|
4 40
|
||||||
|
5 50
|
||||||
|
DROP TABLE t;
|
||||||
|
# Updating the partition table with a value that crosses the upper limits
|
||||||
|
# on both the positive and the negative side.
|
||||||
|
CREATE TABLE t (c1 TINYINT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='MyISAM' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (126,30);
|
||||||
|
INSERT INTO t VALUES (127,40);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
126 30
|
||||||
|
127 40
|
||||||
|
UPDATE t SET c1 = 130 where c1 = 127;
|
||||||
|
Warnings:
|
||||||
|
Warning 1264 Out of range value for column 'c1' at row 1
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
126 30
|
||||||
|
127 40
|
||||||
|
UPDATE t SET c1 = -140 where c1 = 126;
|
||||||
|
Warnings:
|
||||||
|
Warning 1264 Out of range value for column 'c1' at row 1
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-128 30
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
127 40
|
||||||
|
DROP TABLE t;
|
||||||
|
##############################################################################
|
||||||
|
@ -846,3 +846,194 @@ c1
|
|||||||
4
|
4
|
||||||
5
|
5
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
#############################################################################
|
||||||
|
# Bug #45823 - Assertion failure in file row/row0mysql.c line 1386
|
||||||
|
# Bug #43988 - AUTO_INCREMENT errors with partitioned InnoDB tables in 5.1.31
|
||||||
|
##############################################################################
|
||||||
|
# Inserting negative autoincrement values into a partition table (partitions >= 4)
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='NDB' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-1,-10);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
INSERT INTO t(c2) VALUES (40);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
3 30
|
||||||
|
4 40
|
||||||
|
DROP TABLE t;
|
||||||
|
# Reading from a partition table (partitions >= 2 ) after inserting a negative
|
||||||
|
# value into the auto increment column
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='NDB' PARTITION BY HASH(c1) PARTITIONS 2;
|
||||||
|
INSERT INTO t VALUES (-2,-20);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-2 -20
|
||||||
|
1 30
|
||||||
|
DROP TABLE t;
|
||||||
|
# Inserting negative auto increment value into a partition table (partitions >= 2)
|
||||||
|
# auto increment value > 2.
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='NDB' PARTITION BY HASH(c1) PARTITIONS 2;
|
||||||
|
INSERT INTO t VALUES (-4,-20);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
INSERT INTO t(c2) VALUES (40);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-4 -20
|
||||||
|
1 30
|
||||||
|
2 40
|
||||||
|
DROP TABLE t;
|
||||||
|
# Inserting -1 into autoincrement column of a partition table (partition >= 4)
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='NDB' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-1,-10);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
3 30
|
||||||
|
DROP TABLE t;
|
||||||
|
# Deleting from an auto increment table after inserting negative values
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='NDB' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-1,-10);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
INSERT INTO t VALUES (-3,-20);
|
||||||
|
INSERT INTO t(c2) VALUES (40);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-3 -20
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
3 30
|
||||||
|
4 40
|
||||||
|
DELETE FROM t WHERE c1 > 1;
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-3 -20
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
DROP TABLE t;
|
||||||
|
# Inserting a positive value that exceeds maximum allowed value for an
|
||||||
|
# Auto Increment column (positive maximum)
|
||||||
|
CREATE TABLE t (c1 TINYINT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='NDB' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (126,30);
|
||||||
|
INSERT INTO t VALUES (127,40);
|
||||||
|
INSERT INTO t VALUES (128,50);
|
||||||
|
ERROR 23000: Duplicate entry '127' for key 'PRIMARY'
|
||||||
|
INSERT INTO t VALUES (129,60);
|
||||||
|
ERROR 23000: Duplicate entry '127' for key 'PRIMARY'
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
126 30
|
||||||
|
127 40
|
||||||
|
DROP TABLE t;
|
||||||
|
# Inserting a negative value that goes below minimum allowed value for an
|
||||||
|
# Auto Increment column (negative minimum)
|
||||||
|
CREATE TABLE t (c1 TINYINT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='NDB' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-127,30);
|
||||||
|
INSERT INTO t VALUES (-128,40);
|
||||||
|
INSERT INTO t VALUES (-129,50);
|
||||||
|
ERROR 23000: Duplicate entry '-128' for key 'PRIMARY'
|
||||||
|
INSERT INTO t VALUES (-130,60);
|
||||||
|
ERROR 23000: Duplicate entry '-128' for key 'PRIMARY'
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-128 40
|
||||||
|
-127 30
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
DROP TABLE t;
|
||||||
|
# Updating the partition table with a negative Auto Increment value
|
||||||
|
CREATE TABLE t (c1 INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='NDB' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (-1,-10);
|
||||||
|
INSERT INTO t(c2) VALUES (30);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
3 30
|
||||||
|
UPDATE t SET c1 = -6 WHERE c1 = 2;
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-6 20
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
3 30
|
||||||
|
INSERT INTO t(c2) VALUES (40);
|
||||||
|
INSERT INTO t(c2) VALUES (50);
|
||||||
|
UPDATE t SET c1 = -6 WHERE c1 = 2;
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-6 20
|
||||||
|
-1 -10
|
||||||
|
1 10
|
||||||
|
3 30
|
||||||
|
4 40
|
||||||
|
5 50
|
||||||
|
DROP TABLE t;
|
||||||
|
# Updating the partition table with a value that crosses the upper limits
|
||||||
|
# on both the positive and the negative side.
|
||||||
|
CREATE TABLE t (c1 TINYINT NOT NULL AUTO_INCREMENT, PRIMARY KEY(c1),
|
||||||
|
c2 INT) ENGINE='NDB' PARTITION BY HASH(c1) PARTITIONS 4;
|
||||||
|
INSERT INTO t(c2) VALUES (10);
|
||||||
|
INSERT INTO t(c2) VALUES (20);
|
||||||
|
INSERT INTO t VALUES (126,30);
|
||||||
|
INSERT INTO t VALUES (127,40);
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
126 30
|
||||||
|
127 40
|
||||||
|
UPDATE t SET c1 = 130 where c1 = 127;
|
||||||
|
Warnings:
|
||||||
|
Warning 1264 Out of range value for column 'c1' at row 1
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
126 30
|
||||||
|
127 40
|
||||||
|
UPDATE t SET c1 = -140 where c1 = 126;
|
||||||
|
Warnings:
|
||||||
|
Warning 1264 Out of range value for column 'c1' at row 1
|
||||||
|
SELECT * FROM t ORDER BY c1 ASC;
|
||||||
|
c1 c2
|
||||||
|
-128 30
|
||||||
|
1 10
|
||||||
|
2 20
|
||||||
|
127 40
|
||||||
|
DROP TABLE t;
|
||||||
|
##############################################################################
|
||||||
|
@ -30,6 +30,9 @@ let $skip_delete= 1;
|
|||||||
let $skip_truncate= 1;
|
let $skip_truncate= 1;
|
||||||
let $skip_update= 1;
|
let $skip_update= 1;
|
||||||
let $only_ai_pk= 1;
|
let $only_ai_pk= 1;
|
||||||
|
# Bug#45823 Assertion failure in file row/row0mysql.c line 1386
|
||||||
|
# Archive does not handle negative autoincrement values correctly
|
||||||
|
let $skip_negative_auto_inc= 1;
|
||||||
|
|
||||||
##### Storage engine to be tested
|
##### Storage engine to be tested
|
||||||
let $engine= 'Archive';
|
let $engine= 'Archive';
|
||||||
|
@ -25,6 +25,9 @@
|
|||||||
#------------------------------------------------------------------------------#
|
#------------------------------------------------------------------------------#
|
||||||
# Engine specific settings and requirements
|
# Engine specific settings and requirements
|
||||||
--source include/have_blackhole.inc
|
--source include/have_blackhole.inc
|
||||||
|
# Bug#45823 Assertion failure in file row/row0mysql.c line 1386
|
||||||
|
# Blackhole does not handle negative autoincrement values correctly
|
||||||
|
let $skip_negative_auto_inc= 1;
|
||||||
|
|
||||||
##### Storage engine to be tested
|
##### Storage engine to be tested
|
||||||
let $engine= 'Blackhole';
|
let $engine= 'Blackhole';
|
||||||
|
@ -244,3 +244,71 @@ t1 CREATE TABLE `t1` (
|
|||||||
PRIMARY KEY (`id`)
|
PRIMARY KEY (`id`)
|
||||||
) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1
|
) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
stop slave;
|
||||||
|
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||||
|
reset master;
|
||||||
|
reset slave;
|
||||||
|
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||||
|
start slave;
|
||||||
|
DROP TABLE IF EXISTS t1;
|
||||||
|
DROP TABLE IF EXISTS t2;
|
||||||
|
CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT PRIMARY KEY) ENGINE=innodb;
|
||||||
|
CREATE TABLE t2 (id INT NOT NULL AUTO_INCREMENT PRIMARY KEY) ENGINE=myisam;
|
||||||
|
SET SQL_MODE='';
|
||||||
|
INSERT INTO t1 VALUES(NULL);
|
||||||
|
INSERT INTO t2 VALUES(NULL);
|
||||||
|
SELECT * FROM t1;
|
||||||
|
id
|
||||||
|
1
|
||||||
|
SELECT * FROM t2;
|
||||||
|
id
|
||||||
|
1
|
||||||
|
INSERT INTO t1 VALUES();
|
||||||
|
INSERT INTO t2 VALUES();
|
||||||
|
SELECT * FROM t1;
|
||||||
|
id
|
||||||
|
1
|
||||||
|
2
|
||||||
|
SELECT * FROM t2;
|
||||||
|
id
|
||||||
|
1
|
||||||
|
2
|
||||||
|
INSERT INTO t1 VALUES(0);
|
||||||
|
INSERT INTO t2 VALUES(0);
|
||||||
|
SELECT * FROM t1;
|
||||||
|
id
|
||||||
|
1
|
||||||
|
2
|
||||||
|
3
|
||||||
|
SELECT * FROM t2;
|
||||||
|
id
|
||||||
|
1
|
||||||
|
2
|
||||||
|
3
|
||||||
|
SET SQL_MODE=NO_AUTO_VALUE_ON_ZERO;
|
||||||
|
INSERT INTO t1 VALUES(0);
|
||||||
|
INSERT INTO t2 VALUES(0);
|
||||||
|
SELECT * FROM t1;
|
||||||
|
id
|
||||||
|
0
|
||||||
|
1
|
||||||
|
2
|
||||||
|
3
|
||||||
|
SELECT * FROM t2;
|
||||||
|
id
|
||||||
|
0
|
||||||
|
1
|
||||||
|
2
|
||||||
|
3
|
||||||
|
INSERT INTO t1 VALUES(4);
|
||||||
|
INSERT INTO t2 VALUES(4);
|
||||||
|
FLUSH LOGS;
|
||||||
|
Comparing tables master:test.t1 and slave:test.t1
|
||||||
|
Comparing tables master:test.t2 and slave:test.t2
|
||||||
|
DROP TABLE t1;
|
||||||
|
DROP TABLE t2;
|
||||||
|
Comparing tables master:test.t1 and slave:test.t1
|
||||||
|
Comparing tables master:test.t2 and slave:test.t2
|
||||||
|
DROP TABLE t1;
|
||||||
|
DROP TABLE t2;
|
||||||
|
SET SQL_MODE='';
|
||||||
|
@ -4,21 +4,20 @@ reset master;
|
|||||||
reset slave;
|
reset slave;
|
||||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||||
start slave;
|
start slave;
|
||||||
create database if not exists mysqltest1;
|
DROP TABLE IF EXISTS t1;
|
||||||
DROP PROCEDURE IF EXISTS mysqltest1.p1;
|
DROP TABLE IF EXISTS t2;
|
||||||
DROP PROCEDURE IF EXISTS mysqltest1.p2;
|
DROP PROCEDURE IF EXISTS p1;
|
||||||
DROP TABLE IF EXISTS mysqltest1.t2;
|
DROP PROCEDURE IF EXISTS p2;
|
||||||
DROP TABLE IF EXISTS mysqltest1.t1;
|
CREATE TABLE IF NOT EXISTS t1(name CHAR(16), birth DATE,PRIMARY KEY(name))ENGINE=InnoDB;
|
||||||
CREATE TABLE IF NOT EXISTS mysqltest1.t1(name CHAR(16), birth DATE,PRIMARY KEY(name))ENGINE=InnoDB;
|
CREATE TABLE IF NOT EXISTS t2(name CHAR(16), age INT ,PRIMARY KEY(name))ENGINE=InnoDB;
|
||||||
CREATE TABLE IF NOT EXISTS mysqltest1.t2(name CHAR(16), age INT ,PRIMARY KEY(name))ENGINE=InnoDB;
|
CREATE PROCEDURE p1()
|
||||||
CREATE PROCEDURE mysqltest1.p1()
|
|
||||||
BEGIN
|
BEGIN
|
||||||
DECLARE done INT DEFAULT 0;
|
DECLARE done INT DEFAULT 0;
|
||||||
DECLARE spa CHAR(16);
|
DECLARE spa CHAR(16);
|
||||||
DECLARE spb INT;
|
DECLARE spb INT;
|
||||||
DECLARE cur1 CURSOR FOR SELECT name,
|
DECLARE cur1 CURSOR FOR SELECT name,
|
||||||
(YEAR(CURDATE())-YEAR(birth))-(RIGHT(CURDATE(),5)<RIGHT(birth,5))
|
(YEAR(CURDATE())-YEAR(birth))-(RIGHT(CURDATE(),5)<RIGHT(birth,5))
|
||||||
FROM mysqltest1.t1;
|
FROM t1;
|
||||||
DECLARE CONTINUE HANDLER FOR SQLSTATE '02000' SET done = 1;
|
DECLARE CONTINUE HANDLER FOR SQLSTATE '02000' SET done = 1;
|
||||||
OPEN cur1;
|
OPEN cur1;
|
||||||
SET AUTOCOMMIT=0;
|
SET AUTOCOMMIT=0;
|
||||||
@ -26,21 +25,20 @@ REPEAT
|
|||||||
FETCH cur1 INTO spa, spb;
|
FETCH cur1 INTO spa, spb;
|
||||||
IF NOT done THEN
|
IF NOT done THEN
|
||||||
START TRANSACTION;
|
START TRANSACTION;
|
||||||
INSERT INTO mysqltest1.t2 VALUES (spa,spb);
|
INSERT INTO t2 VALUES (spa,spb);
|
||||||
COMMIT;
|
COMMIT;
|
||||||
END IF;
|
END IF;
|
||||||
UNTIL done END REPEAT;
|
UNTIL done END REPEAT;
|
||||||
SET AUTOCOMMIT=1;
|
SET AUTOCOMMIT=1;
|
||||||
CLOSE cur1;
|
CLOSE cur1;
|
||||||
END|
|
END|
|
||||||
CREATE PROCEDURE mysqltest1.p2()
|
CREATE PROCEDURE p2()
|
||||||
BEGIN
|
BEGIN
|
||||||
INSERT INTO mysqltest1.t1 VALUES ('MySQL','1993-02-04'),('ROCKS', '1990-08-27'),('Texas', '1999-03-30'),('kyle','2005-1-1');
|
INSERT INTO t1 VALUES ('MySQL','1993-02-04'),('ROCKS', '1990-08-27'),('Texas', '1999-03-30'),('kyle','2005-1-1');
|
||||||
END|
|
END|
|
||||||
CALL mysqltest1.p2();
|
CALL p2();
|
||||||
CALL mysqltest1.p1();
|
CALL p1();
|
||||||
DROP PROCEDURE IF EXISTS mysqltest1.p1;
|
DROP TABLE t1;
|
||||||
DROP PROCEDURE IF EXISTS mysqltest1.p2;
|
DROP TABLE t2;
|
||||||
DROP TABLE IF EXISTS mysqltest1.t1;
|
DROP PROCEDURE p1;
|
||||||
DROP TABLE IF EXISTS mysqltest1.t2;
|
DROP PROCEDURE p2;
|
||||||
DROP DATABASE mysqltest1;
|
|
||||||
|
@ -129,6 +129,9 @@ CREATE DATABASE bug42217_db;
|
|||||||
GRANT CREATE ROUTINE ON bug42217_db.* TO 'create_rout_db'@'localhost'
|
GRANT CREATE ROUTINE ON bug42217_db.* TO 'create_rout_db'@'localhost'
|
||||||
IDENTIFIED BY 'create_rout_db' WITH GRANT OPTION;
|
IDENTIFIED BY 'create_rout_db' WITH GRANT OPTION;
|
||||||
|
|
||||||
|
-- sync_slave_with_master
|
||||||
|
-- connection master
|
||||||
|
|
||||||
connect (create_rout_db_master, localhost, create_rout_db, create_rout_db, bug42217_db,$MASTER_MYPORT,);
|
connect (create_rout_db_master, localhost, create_rout_db, create_rout_db, bug42217_db,$MASTER_MYPORT,);
|
||||||
connect (create_rout_db_slave, localhost, create_rout_db, create_rout_db, bug42217_db, $SLAVE_MYPORT,);
|
connect (create_rout_db_slave, localhost, create_rout_db, create_rout_db, bug42217_db, $SLAVE_MYPORT,);
|
||||||
|
|
||||||
|
@ -23,6 +23,8 @@ disconnect con_temp;
|
|||||||
--source include/wait_until_disconnected.inc
|
--source include/wait_until_disconnected.inc
|
||||||
|
|
||||||
connection master;
|
connection master;
|
||||||
|
-- let $wait_binlog_event= DROP
|
||||||
|
-- source include/wait_for_binlog_event.inc
|
||||||
sync_slave_with_master;
|
sync_slave_with_master;
|
||||||
|
|
||||||
connection slave;
|
connection slave;
|
||||||
|
@ -4,21 +4,20 @@ reset master;
|
|||||||
reset slave;
|
reset slave;
|
||||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||||
start slave;
|
start slave;
|
||||||
create database if not exists mysqltest1;
|
DROP TABLE IF EXISTS t1;
|
||||||
DROP PROCEDURE IF EXISTS mysqltest1.p1;
|
DROP TABLE IF EXISTS t2;
|
||||||
DROP PROCEDURE IF EXISTS mysqltest1.p2;
|
DROP PROCEDURE IF EXISTS p1;
|
||||||
DROP TABLE IF EXISTS mysqltest1.t2;
|
DROP PROCEDURE IF EXISTS p2;
|
||||||
DROP TABLE IF EXISTS mysqltest1.t1;
|
CREATE TABLE IF NOT EXISTS t1(name CHAR(16), birth DATE,PRIMARY KEY(name))ENGINE=NDBCLUSTER;
|
||||||
CREATE TABLE IF NOT EXISTS mysqltest1.t1(name CHAR(16), birth DATE,PRIMARY KEY(name))ENGINE=NDBCLUSTER;
|
CREATE TABLE IF NOT EXISTS t2(name CHAR(16), age INT ,PRIMARY KEY(name))ENGINE=NDBCLUSTER;
|
||||||
CREATE TABLE IF NOT EXISTS mysqltest1.t2(name CHAR(16), age INT ,PRIMARY KEY(name))ENGINE=NDBCLUSTER;
|
CREATE PROCEDURE p1()
|
||||||
CREATE PROCEDURE mysqltest1.p1()
|
|
||||||
BEGIN
|
BEGIN
|
||||||
DECLARE done INT DEFAULT 0;
|
DECLARE done INT DEFAULT 0;
|
||||||
DECLARE spa CHAR(16);
|
DECLARE spa CHAR(16);
|
||||||
DECLARE spb INT;
|
DECLARE spb INT;
|
||||||
DECLARE cur1 CURSOR FOR SELECT name,
|
DECLARE cur1 CURSOR FOR SELECT name,
|
||||||
(YEAR(CURDATE())-YEAR(birth))-(RIGHT(CURDATE(),5)<RIGHT(birth,5))
|
(YEAR(CURDATE())-YEAR(birth))-(RIGHT(CURDATE(),5)<RIGHT(birth,5))
|
||||||
FROM mysqltest1.t1;
|
FROM t1;
|
||||||
DECLARE CONTINUE HANDLER FOR SQLSTATE '02000' SET done = 1;
|
DECLARE CONTINUE HANDLER FOR SQLSTATE '02000' SET done = 1;
|
||||||
OPEN cur1;
|
OPEN cur1;
|
||||||
SET AUTOCOMMIT=0;
|
SET AUTOCOMMIT=0;
|
||||||
@ -26,21 +25,20 @@ REPEAT
|
|||||||
FETCH cur1 INTO spa, spb;
|
FETCH cur1 INTO spa, spb;
|
||||||
IF NOT done THEN
|
IF NOT done THEN
|
||||||
START TRANSACTION;
|
START TRANSACTION;
|
||||||
INSERT INTO mysqltest1.t2 VALUES (spa,spb);
|
INSERT INTO t2 VALUES (spa,spb);
|
||||||
COMMIT;
|
COMMIT;
|
||||||
END IF;
|
END IF;
|
||||||
UNTIL done END REPEAT;
|
UNTIL done END REPEAT;
|
||||||
SET AUTOCOMMIT=1;
|
SET AUTOCOMMIT=1;
|
||||||
CLOSE cur1;
|
CLOSE cur1;
|
||||||
END|
|
END|
|
||||||
CREATE PROCEDURE mysqltest1.p2()
|
CREATE PROCEDURE p2()
|
||||||
BEGIN
|
BEGIN
|
||||||
INSERT INTO mysqltest1.t1 VALUES ('MySQL','1993-02-04'),('ROCKS', '1990-08-27'),('Texas', '1999-03-30'),('kyle','2005-1-1');
|
INSERT INTO t1 VALUES ('MySQL','1993-02-04'),('ROCKS', '1990-08-27'),('Texas', '1999-03-30'),('kyle','2005-1-1');
|
||||||
END|
|
END|
|
||||||
CALL mysqltest1.p2();
|
CALL p2();
|
||||||
CALL mysqltest1.p1();
|
CALL p1();
|
||||||
DROP PROCEDURE IF EXISTS mysqltest1.p1;
|
DROP TABLE t1;
|
||||||
DROP PROCEDURE IF EXISTS mysqltest1.p2;
|
DROP TABLE t2;
|
||||||
DROP TABLE IF EXISTS mysqltest1.t1;
|
DROP PROCEDURE p1;
|
||||||
DROP TABLE IF EXISTS mysqltest1.t2;
|
DROP PROCEDURE p2;
|
||||||
DROP DATABASE mysqltest1;
|
|
||||||
|
@ -1599,3 +1599,27 @@ INSERT INTO t1 VALUES (NULL, NULL),(NULL, NULL);
|
|||||||
FLUSH TABLE t1;
|
FLUSH TABLE t1;
|
||||||
SELECT * FROM t1 ORDER BY a;
|
SELECT * FROM t1 ORDER BY a;
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
#
|
||||||
|
# BUG#29203 - archive tables have weird values in show table status
|
||||||
|
#
|
||||||
|
CREATE TABLE t1(a INT, b BLOB) ENGINE=archive;
|
||||||
|
SELECT DATA_LENGTH, AVG_ROW_LENGTH FROM
|
||||||
|
INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1' AND TABLE_SCHEMA='test';
|
||||||
|
INSERT INTO t1 VALUES(1, 'sampleblob1'),(2, 'sampleblob2');
|
||||||
|
SELECT DATA_LENGTH, AVG_ROW_LENGTH FROM
|
||||||
|
INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1' AND TABLE_SCHEMA='test';
|
||||||
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
#
|
||||||
|
# BUG#46961 - archive engine loses rows during self joining select!
|
||||||
|
#
|
||||||
|
SET @save_join_buffer_size= @@join_buffer_size;
|
||||||
|
SET @@join_buffer_size= 8228;
|
||||||
|
CREATE TABLE t1(a CHAR(255)) ENGINE=archive;
|
||||||
|
INSERT INTO t1 VALUES('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'),
|
||||||
|
('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'),
|
||||||
|
('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa');
|
||||||
|
SELECT COUNT(t1.a) FROM t1, t1 a, t1 b, t1 c, t1 d, t1 e;
|
||||||
|
DROP TABLE t1;
|
||||||
|
SET @@join_buffer_size= @save_join_buffer_size;
|
||||||
|
@ -1198,6 +1198,23 @@ CREATE TABLE IF NOT EXISTS t2 (a INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY)
|
|||||||
|
|
||||||
DROP TABLE t1, t2;
|
DROP TABLE t1, t2;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # BUG#46384 - mysqld segfault when trying to create table with same
|
||||||
|
--echo # name as existing view
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
CREATE TABLE t1 (a INT);
|
||||||
|
CREATE TABLE t2 (a INT);
|
||||||
|
|
||||||
|
INSERT INTO t1 VALUES (1),(2),(3);
|
||||||
|
INSERT INTO t2 VALUES (1),(2),(3);
|
||||||
|
|
||||||
|
CREATE VIEW v1 AS SELECT t1.a FROM t1, t2;
|
||||||
|
--error ER_TABLE_EXISTS_ERROR
|
||||||
|
CREATE TABLE v1 AS SELECT * FROM t1;
|
||||||
|
|
||||||
|
DROP VIEW v1;
|
||||||
|
DROP TABLE t1,t2;
|
||||||
|
|
||||||
--echo End of 5.0 tests
|
--echo End of 5.0 tests
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
-- source include/have_binlog_format_mixed_or_statement.inc
|
-- source include/have_binlog_format_mixed_or_statement.inc
|
||||||
-- source include/have_gbk.inc
|
-- source include/have_gbk.inc
|
||||||
|
|
||||||
|
RESET MASTER;
|
||||||
SET NAMES gbk;
|
SET NAMES gbk;
|
||||||
--character_set gbk
|
--character_set gbk
|
||||||
|
|
||||||
|
@ -573,4 +573,44 @@ SELECT DISTINCT a, b, d, c FROM t1;
|
|||||||
|
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # Bug #46159: simple query that never returns
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
# Set max_heap_table_size to the minimum value so that GROUP BY table in the
|
||||||
|
# SELECT query below gets converted to MyISAM
|
||||||
|
SET @old_max_heap_table_size = @@max_heap_table_size;
|
||||||
|
SET @@max_heap_table_size = 16384;
|
||||||
|
|
||||||
|
# Set sort_buffer_size to the mininum value so that remove_duplicates() calls
|
||||||
|
# remove_dup_with_compare()
|
||||||
|
SET @old_sort_buffer_size = @@sort_buffer_size;
|
||||||
|
SET @@sort_buffer_size = 32804;
|
||||||
|
|
||||||
|
CREATE TABLE t1(c1 int, c2 VARCHAR(20));
|
||||||
|
INSERT INTO t1 VALUES (1, '1'), (1, '1'), (2, '2'), (3, '1'), (3, '1'), (4, '4');
|
||||||
|
# Now we just need to pad the table with random data so we have enough unique
|
||||||
|
# values to force conversion of the GROUP BY table to MyISAM
|
||||||
|
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||||
|
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||||
|
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||||
|
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||||
|
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||||
|
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||||
|
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||||
|
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||||
|
|
||||||
|
# First rows of the GROUP BY table that will be processed by
|
||||||
|
# remove_dup_with_compare()
|
||||||
|
SELECT c1, c2, COUNT(*) FROM t1 GROUP BY c1 LIMIT 4;
|
||||||
|
|
||||||
|
# The actual test case
|
||||||
|
SELECT DISTINCT c2 FROM t1 GROUP BY c1 HAVING COUNT(*) > 1;
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
|
||||||
|
DROP TABLE t1;
|
||||||
|
SET @@sort_buffer_size = @old_sort_buffer_size;
|
||||||
|
SET @@max_heap_table_size = @old_max_heap_table_size;
|
||||||
|
|
||||||
--echo End of 5.1 tests
|
--echo End of 5.1 tests
|
||||||
|
@ -135,6 +135,17 @@ EXPLAIN EXTENDED SELECT COUNT(a) FROM t1 USE KEY(a);
|
|||||||
|
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
#
|
||||||
|
# Bug#45989 memory leak after explain encounters an error in the query
|
||||||
|
#
|
||||||
|
CREATE TABLE t1(a LONGTEXT);
|
||||||
|
INSERT INTO t1 VALUES (repeat('a',@@global.max_allowed_packet));
|
||||||
|
INSERT INTO t1 VALUES (repeat('b',@@global.max_allowed_packet));
|
||||||
|
--error ER_BAD_FIELD_ERROR
|
||||||
|
EXPLAIN SELECT DISTINCT 1 FROM t1,
|
||||||
|
(SELECT DISTINCTROW a AS away FROM t1 GROUP BY a WITH ROLLUP) as d1
|
||||||
|
WHERE t1.a = d1.a;
|
||||||
|
DROP TABLE t1;
|
||||||
|
|
||||||
# End of 5.0 tests.
|
# End of 5.0 tests.
|
||||||
|
|
||||||
|
@ -1291,6 +1291,19 @@ INSERT INTO t1 VALUES ('aaaaaaaa');
|
|||||||
SELECT LOAD_FILE(a) FROM t1;
|
SELECT LOAD_FILE(a) FROM t1;
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
#
|
||||||
|
# Bug#46815 CONCAT_WS returning wrong data
|
||||||
|
#
|
||||||
|
CREATE TABLE t1 (f2 VARCHAR(20));
|
||||||
|
CREATE TABLE t2 (f2 VARCHAR(20));
|
||||||
|
|
||||||
|
INSERT INTO t1 VALUES ('MIN'),('MAX');
|
||||||
|
INSERT INTO t2 VALUES ('LOAD');
|
||||||
|
|
||||||
|
SELECT CONCAT_WS('_', (SELECT t2.f2 FROM t2), t1.f2) AS concat_name FROM t1;
|
||||||
|
|
||||||
|
DROP TABLE t1, t2;
|
||||||
|
|
||||||
|
|
||||||
--echo End of 5.0 tests
|
--echo End of 5.0 tests
|
||||||
|
|
||||||
|
2
mysql-test/t/lowercase_mixed_tmpdir_innodb-master.opt
Normal file
2
mysql-test/t/lowercase_mixed_tmpdir_innodb-master.opt
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
--lower-case-table-names=2
|
||||||
|
--tmpdir=$MYSQLTEST_VARDIR/tmp/MixedCase
|
6
mysql-test/t/lowercase_mixed_tmpdir_innodb-master.sh
Normal file
6
mysql-test/t/lowercase_mixed_tmpdir_innodb-master.sh
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# This test requires a non-lowercase tmpdir directory on a case-sensitive
|
||||||
|
# filesystem.
|
||||||
|
|
||||||
|
d="$MYSQLTEST_VARDIR/tmp/MixedCase"
|
||||||
|
test -d "$d" || mkdir "$d"
|
||||||
|
rm -f "$d"/*
|
12
mysql-test/t/lowercase_mixed_tmpdir_innodb.test
Normal file
12
mysql-test/t/lowercase_mixed_tmpdir_innodb.test
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
--source include/have_lowercase2.inc
|
||||||
|
--source include/have_innodb.inc
|
||||||
|
|
||||||
|
--disable_warnings
|
||||||
|
drop table if exists t1;
|
||||||
|
--enable_warnings
|
||||||
|
|
||||||
|
create table t1 (id int) engine=InnoDB;
|
||||||
|
insert into t1 values (1);
|
||||||
|
create temporary table t2 engine=InnoDB select * from t1;
|
||||||
|
drop temporary table t2;
|
||||||
|
drop table t1;
|
@ -5,8 +5,9 @@
|
|||||||
-- source include/have_cp932.inc
|
-- source include/have_cp932.inc
|
||||||
-- source include/have_log_bin.inc
|
-- source include/have_log_bin.inc
|
||||||
|
|
||||||
|
RESET MASTER;
|
||||||
|
|
||||||
# Bug#16217 (mysql client did not know how not switch its internal charset)
|
# Bug#16217 (mysql client did not know how not switch its internal charset)
|
||||||
flush logs;
|
|
||||||
create table t3 (f text character set utf8);
|
create table t3 (f text character set utf8);
|
||||||
create table t4 (f text character set cp932);
|
create table t4 (f text character set cp932);
|
||||||
--exec $MYSQL --default-character-set=utf8 test -e "insert into t3 values(_utf8'ソ')"
|
--exec $MYSQL --default-character-set=utf8 test -e "insert into t3 values(_utf8'ソ')"
|
||||||
@ -14,7 +15,7 @@ create table t4 (f text character set cp932);
|
|||||||
flush logs;
|
flush logs;
|
||||||
rename table t3 to t03, t4 to t04;
|
rename table t3 to t03, t4 to t04;
|
||||||
let $MYSQLD_DATADIR= `select @@datadir`;
|
let $MYSQLD_DATADIR= `select @@datadir`;
|
||||||
--exec $MYSQL_BINLOG --short-form $MYSQLD_DATADIR/master-bin.000002 | $MYSQL --default-character-set=utf8
|
--exec $MYSQL_BINLOG --short-form $MYSQLD_DATADIR/master-bin.000001 | $MYSQL --default-character-set=utf8
|
||||||
# original and recovered data must be equal
|
# original and recovered data must be equal
|
||||||
select HEX(f) from t03;
|
select HEX(f) from t03;
|
||||||
select HEX(f) from t3;
|
select HEX(f) from t3;
|
||||||
|
@ -5,6 +5,23 @@
|
|||||||
drop table if exists t1;
|
drop table if exists t1;
|
||||||
--enable_warnings
|
--enable_warnings
|
||||||
|
|
||||||
|
#
|
||||||
|
# Bug#47029: Crash when reorganize partition with subpartition
|
||||||
|
#
|
||||||
|
create table t1 (a int not null,
|
||||||
|
b datetime not null,
|
||||||
|
primary key (a,b))
|
||||||
|
engine=innodb
|
||||||
|
partition by range (to_days(b))
|
||||||
|
subpartition by hash (a)
|
||||||
|
subpartitions 2
|
||||||
|
( partition p0 values less than (to_days('2009-01-01')),
|
||||||
|
partition p1 values less than (to_days('2009-02-01')),
|
||||||
|
partition p2 values less than (to_days('2009-03-01')),
|
||||||
|
partition p3 values less than maxvalue);
|
||||||
|
alter table t1 reorganize partition p1,p2 into
|
||||||
|
( partition p2 values less than (to_days('2009-03-01')));
|
||||||
|
drop table t1;
|
||||||
#
|
#
|
||||||
# Bug#40595: Non-matching rows not released with READ-COMMITTED on tables
|
# Bug#40595: Non-matching rows not released with READ-COMMITTED on tables
|
||||||
# with partitions
|
# with partitions
|
||||||
@ -270,3 +287,15 @@ PARTITION BY RANGE (int_column)
|
|||||||
(PARTITION p1 VALUES LESS THAN (5));
|
(PARTITION p1 VALUES LESS THAN (5));
|
||||||
show create table t1;
|
show create table t1;
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
|
||||||
|
#
|
||||||
|
# BUG#46483 - drop table of partitioned table may leave extraneous file
|
||||||
|
# Note: was only repeatable with InnoDB plugin
|
||||||
|
#
|
||||||
|
CREATE TABLE t1 (a INT) ENGINE=InnoDB
|
||||||
|
PARTITION BY list(a) (PARTITION p1 VALUES IN (1));
|
||||||
|
CREATE INDEX i1 ON t1 (a);
|
||||||
|
DROP TABLE t1;
|
||||||
|
let $MYSQLD_DATADIR= `SELECT @@datadir`;
|
||||||
|
# Before the fix it should show extra file like #sql-2405_2.par
|
||||||
|
--list_files $MYSQLD_DATADIR/test/ *
|
||||||
|
@ -2,6 +2,8 @@
|
|||||||
--source include/not_embedded.inc
|
--source include/not_embedded.inc
|
||||||
# Non-windows specific ps tests.
|
# Non-windows specific ps tests.
|
||||||
--source include/not_windows.inc
|
--source include/not_windows.inc
|
||||||
|
# requires dynamic loading
|
||||||
|
--source include/have_dynamic_loading.inc
|
||||||
|
|
||||||
#
|
#
|
||||||
# Bug #20665: All commands supported in Stored Procedures should work in
|
# Bug #20665: All commands supported in Stored Procedures should work in
|
||||||
|
@ -8242,6 +8242,28 @@ while ($tab_count)
|
|||||||
DROP PROCEDURE p1;
|
DROP PROCEDURE p1;
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # Bug #46629: Item_in_subselect::val_int(): Assertion `0'
|
||||||
|
--echo # on subquery inside a SP
|
||||||
|
--echo #
|
||||||
|
CREATE TABLE t1(a INT);
|
||||||
|
CREATE TABLE t2(a INT, b INT PRIMARY KEY);
|
||||||
|
|
||||||
|
DELIMITER |;
|
||||||
|
CREATE PROCEDURE p1 ()
|
||||||
|
BEGIN
|
||||||
|
SELECT a FROM t1 A WHERE A.b IN (SELECT b FROM t2 AS B);
|
||||||
|
END|
|
||||||
|
DELIMITER ;|
|
||||||
|
--error ER_BAD_FIELD_ERROR
|
||||||
|
CALL p1;
|
||||||
|
--error ER_BAD_FIELD_ERROR
|
||||||
|
CALL p1;
|
||||||
|
DROP PROCEDURE p1;
|
||||||
|
DROP TABLE t1, t2;
|
||||||
|
|
||||||
|
|
||||||
--echo # ------------------------------------------------------------------
|
--echo # ------------------------------------------------------------------
|
||||||
--echo # -- End of 5.1 tests
|
--echo # -- End of 5.1 tests
|
||||||
--echo # ------------------------------------------------------------------
|
--echo # ------------------------------------------------------------------
|
||||||
|
32
mysql-test/t/subselect4.test
Normal file
32
mysql-test/t/subselect4.test
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
# General purpose bug fix tests go here : subselect.test too large
|
||||||
|
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # Bug #46791: Assertion failed:(table->key_read==0),function unknown
|
||||||
|
--echo # function,file sql_base.cc
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
CREATE TABLE t1 (a INT, b INT, KEY(a));
|
||||||
|
INSERT INTO t1 VALUES (1,1),(2,2);
|
||||||
|
CREATE TABLE t2 LIKE t1;
|
||||||
|
INSERT INTO t2 VALUES (1,1),(2,2);
|
||||||
|
CREATE TABLE t3 LIKE t1;
|
||||||
|
|
||||||
|
--echo # should have 1 impossible where and 2 dependent subqueries
|
||||||
|
EXPLAIN
|
||||||
|
SELECT 1 FROM t1
|
||||||
|
WHERE NOT EXISTS (SELECT 1 FROM t2 WHERE 1 = (SELECT MIN(t2.b) FROM t3))
|
||||||
|
ORDER BY count(*);
|
||||||
|
|
||||||
|
--echo # should not crash the next statement
|
||||||
|
SELECT 1 FROM t1
|
||||||
|
WHERE NOT EXISTS (SELECT 1 FROM t2 WHERE 1 = (SELECT MIN(t2.b) FROM t3))
|
||||||
|
ORDER BY count(*);
|
||||||
|
|
||||||
|
--echo # should not crash: the crash is caused by the previous statement
|
||||||
|
SELECT 1;
|
||||||
|
|
||||||
|
DROP TABLE t1,t2,t3;
|
||||||
|
|
||||||
|
|
||||||
|
--echo End of 5.0 tests.
|
@ -436,4 +436,16 @@ SELECT * FROM t2 WHERE a = sequence();
|
|||||||
DROP FUNCTION sequence;
|
DROP FUNCTION sequence;
|
||||||
DROP TABLE t1,t2;
|
DROP TABLE t1,t2;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # Bug#46259: 5.0.83 -> 5.1.36, query doesn't work
|
||||||
|
--echo #
|
||||||
|
CREATE TABLE t1 ( a INT );
|
||||||
|
|
||||||
|
INSERT INTO t1 VALUES (1), (2), (3);
|
||||||
|
|
||||||
|
SELECT IF( a = 1, a, a ) AS `b` FROM t1 ORDER BY field( `b` + 1, 1 );
|
||||||
|
SELECT IF( a = 1, a, a ) AS `b` FROM t1 ORDER BY field( `b`, 1 );
|
||||||
|
|
||||||
|
DROP TABLE t1;
|
||||||
|
|
||||||
--echo End of 5.0 tests.
|
--echo End of 5.0 tests.
|
||||||
|
@ -225,4 +225,11 @@ insert into t2 values(@q);
|
|||||||
|
|
||||||
drop table t1, t2;
|
drop table t1, t2;
|
||||||
|
|
||||||
|
#
|
||||||
|
# Bug#42364 SHOW ERRORS returns empty resultset after dropping non existent table
|
||||||
|
#
|
||||||
|
--error ER_BAD_TABLE_ERROR
|
||||||
|
DROP TABLE t1;
|
||||||
|
SHOW ERRORS;
|
||||||
|
|
||||||
--echo End of 5.0 tests
|
--echo End of 5.0 tests
|
||||||
|
@ -88,6 +88,13 @@ int my_copy(const char *from, const char *to, myf MyFlags)
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* sync the destination file */
|
||||||
|
if (MyFlags & MY_SYNC)
|
||||||
|
{
|
||||||
|
if (my_sync(to_file, MyFlags))
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
if (my_close(from_file,MyFlags) | my_close(to_file,MyFlags))
|
if (my_close(from_file,MyFlags) | my_close(to_file,MyFlags))
|
||||||
DBUG_RETURN(-1); /* Error on close */
|
DBUG_RETURN(-1); /* Error on close */
|
||||||
|
|
||||||
|
@ -239,6 +239,7 @@ void ha_partition::init_handler_variables()
|
|||||||
m_curr_key_info[0]= NULL;
|
m_curr_key_info[0]= NULL;
|
||||||
m_curr_key_info[1]= NULL;
|
m_curr_key_info[1]= NULL;
|
||||||
is_clone= FALSE,
|
is_clone= FALSE,
|
||||||
|
m_part_func_monotonicity_info= NON_MONOTONIC;
|
||||||
auto_increment_lock= FALSE;
|
auto_increment_lock= FALSE;
|
||||||
auto_increment_safe_stmt_log_lock= FALSE;
|
auto_increment_safe_stmt_log_lock= FALSE;
|
||||||
/*
|
/*
|
||||||
@ -705,6 +706,7 @@ int ha_partition::rename_partitions(const char *path)
|
|||||||
if (m_is_sub_partitioned)
|
if (m_is_sub_partitioned)
|
||||||
{
|
{
|
||||||
List_iterator<partition_element> sub_it(part_elem->subpartitions);
|
List_iterator<partition_element> sub_it(part_elem->subpartitions);
|
||||||
|
j= 0;
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
sub_elem= sub_it++;
|
sub_elem= sub_it++;
|
||||||
@ -2464,11 +2466,18 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Initialize the bitmap we use to minimize ha_start_bulk_insert calls */
|
||||||
|
if (bitmap_init(&m_bulk_insert_started, NULL, m_tot_parts + 1, FALSE))
|
||||||
|
DBUG_RETURN(1);
|
||||||
|
bitmap_clear_all(&m_bulk_insert_started);
|
||||||
/* Initialize the bitmap we use to determine what partitions are used */
|
/* Initialize the bitmap we use to determine what partitions are used */
|
||||||
if (!is_clone)
|
if (!is_clone)
|
||||||
{
|
{
|
||||||
if (bitmap_init(&(m_part_info->used_partitions), NULL, m_tot_parts, TRUE))
|
if (bitmap_init(&(m_part_info->used_partitions), NULL, m_tot_parts, TRUE))
|
||||||
|
{
|
||||||
|
bitmap_free(&m_bulk_insert_started);
|
||||||
DBUG_RETURN(1);
|
DBUG_RETURN(1);
|
||||||
|
}
|
||||||
bitmap_set_all(&(m_part_info->used_partitions));
|
bitmap_set_all(&(m_part_info->used_partitions));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2552,12 +2561,18 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
|
|||||||
calling open on all individual handlers.
|
calling open on all individual handlers.
|
||||||
*/
|
*/
|
||||||
m_handler_status= handler_opened;
|
m_handler_status= handler_opened;
|
||||||
|
if (m_part_info->part_expr)
|
||||||
|
m_part_func_monotonicity_info=
|
||||||
|
m_part_info->part_expr->get_monotonicity_info();
|
||||||
|
else if (m_part_info->list_of_part_fields)
|
||||||
|
m_part_func_monotonicity_info= MONOTONIC_STRICT_INCREASING;
|
||||||
info(HA_STATUS_VARIABLE | HA_STATUS_CONST);
|
info(HA_STATUS_VARIABLE | HA_STATUS_CONST);
|
||||||
DBUG_RETURN(0);
|
DBUG_RETURN(0);
|
||||||
|
|
||||||
err_handler:
|
err_handler:
|
||||||
while (file-- != m_file)
|
while (file-- != m_file)
|
||||||
(*file)->close();
|
(*file)->close();
|
||||||
|
bitmap_free(&m_bulk_insert_started);
|
||||||
if (!is_clone)
|
if (!is_clone)
|
||||||
bitmap_free(&(m_part_info->used_partitions));
|
bitmap_free(&(m_part_info->used_partitions));
|
||||||
|
|
||||||
@ -2605,6 +2620,7 @@ int ha_partition::close(void)
|
|||||||
|
|
||||||
DBUG_ASSERT(table->s == table_share);
|
DBUG_ASSERT(table->s == table_share);
|
||||||
delete_queue(&m_queue);
|
delete_queue(&m_queue);
|
||||||
|
bitmap_free(&m_bulk_insert_started);
|
||||||
if (!is_clone)
|
if (!is_clone)
|
||||||
bitmap_free(&(m_part_info->used_partitions));
|
bitmap_free(&(m_part_info->used_partitions));
|
||||||
file= m_file;
|
file= m_file;
|
||||||
@ -3021,10 +3037,12 @@ int ha_partition::write_row(uchar * buf)
|
|||||||
}
|
}
|
||||||
m_last_part= part_id;
|
m_last_part= part_id;
|
||||||
DBUG_PRINT("info", ("Insert in partition %d", part_id));
|
DBUG_PRINT("info", ("Insert in partition %d", part_id));
|
||||||
|
start_part_bulk_insert(part_id);
|
||||||
|
|
||||||
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
|
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
|
||||||
error= m_file[part_id]->ha_write_row(buf);
|
error= m_file[part_id]->ha_write_row(buf);
|
||||||
if (have_auto_increment && !table->s->next_number_keypart)
|
if (have_auto_increment && !table->s->next_number_keypart)
|
||||||
set_auto_increment_if_higher(table->next_number_field->val_int());
|
set_auto_increment_if_higher(table->next_number_field);
|
||||||
reenable_binlog(thd);
|
reenable_binlog(thd);
|
||||||
exit:
|
exit:
|
||||||
table->timestamp_field_type= orig_timestamp_type;
|
table->timestamp_field_type= orig_timestamp_type;
|
||||||
@ -3083,6 +3101,7 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data)
|
|||||||
}
|
}
|
||||||
|
|
||||||
m_last_part= new_part_id;
|
m_last_part= new_part_id;
|
||||||
|
start_part_bulk_insert(new_part_id);
|
||||||
if (new_part_id == old_part_id)
|
if (new_part_id == old_part_id)
|
||||||
{
|
{
|
||||||
DBUG_PRINT("info", ("Update in partition %d", new_part_id));
|
DBUG_PRINT("info", ("Update in partition %d", new_part_id));
|
||||||
@ -3128,7 +3147,7 @@ exit:
|
|||||||
HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
|
HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
|
||||||
if (!ha_data->auto_inc_initialized)
|
if (!ha_data->auto_inc_initialized)
|
||||||
info(HA_STATUS_AUTO);
|
info(HA_STATUS_AUTO);
|
||||||
set_auto_increment_if_higher(table->found_next_number_field->val_int());
|
set_auto_increment_if_higher(table->found_next_number_field);
|
||||||
}
|
}
|
||||||
table->timestamp_field_type= orig_timestamp_type;
|
table->timestamp_field_type= orig_timestamp_type;
|
||||||
DBUG_RETURN(error);
|
DBUG_RETURN(error);
|
||||||
@ -3247,22 +3266,65 @@ int ha_partition::delete_all_rows()
|
|||||||
DESCRIPTION
|
DESCRIPTION
|
||||||
rows == 0 means we will probably insert many rows
|
rows == 0 means we will probably insert many rows
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void ha_partition::start_bulk_insert(ha_rows rows)
|
void ha_partition::start_bulk_insert(ha_rows rows)
|
||||||
{
|
{
|
||||||
handler **file;
|
|
||||||
DBUG_ENTER("ha_partition::start_bulk_insert");
|
DBUG_ENTER("ha_partition::start_bulk_insert");
|
||||||
|
|
||||||
rows= rows ? rows/m_tot_parts + 1 : 0;
|
m_bulk_inserted_rows= 0;
|
||||||
file= m_file;
|
bitmap_clear_all(&m_bulk_insert_started);
|
||||||
do
|
/* use the last bit for marking if bulk_insert_started was called */
|
||||||
{
|
bitmap_set_bit(&m_bulk_insert_started, m_tot_parts);
|
||||||
(*file)->ha_start_bulk_insert(rows);
|
|
||||||
} while (*(++file));
|
|
||||||
DBUG_VOID_RETURN;
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
Check if start_bulk_insert has been called for this partition,
|
||||||
|
if not, call it and mark it called
|
||||||
|
*/
|
||||||
|
void ha_partition::start_part_bulk_insert(uint part_id)
|
||||||
|
{
|
||||||
|
if (!bitmap_is_set(&m_bulk_insert_started, part_id) &&
|
||||||
|
bitmap_is_set(&m_bulk_insert_started, m_tot_parts))
|
||||||
|
{
|
||||||
|
m_file[part_id]->ha_start_bulk_insert(guess_bulk_insert_rows());
|
||||||
|
bitmap_set_bit(&m_bulk_insert_started, part_id);
|
||||||
|
}
|
||||||
|
m_bulk_inserted_rows++;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
Try to predict the number of inserts into this partition.
|
||||||
|
|
||||||
|
If less than 10 rows (including 0 which means Unknown)
|
||||||
|
just give that as a guess
|
||||||
|
If monotonic partitioning function was used
|
||||||
|
guess that 50 % of the inserts goes to the first partition
|
||||||
|
For all other cases, guess on equal distribution between the partitions
|
||||||
|
*/
|
||||||
|
ha_rows ha_partition::guess_bulk_insert_rows()
|
||||||
|
{
|
||||||
|
DBUG_ENTER("guess_bulk_insert_rows");
|
||||||
|
|
||||||
|
if (estimation_rows_to_insert < 10)
|
||||||
|
DBUG_RETURN(estimation_rows_to_insert);
|
||||||
|
|
||||||
|
/* If first insert/partition and monotonic partition function, guess 50%. */
|
||||||
|
if (!m_bulk_inserted_rows &&
|
||||||
|
m_part_func_monotonicity_info != NON_MONOTONIC &&
|
||||||
|
m_tot_parts > 1)
|
||||||
|
DBUG_RETURN(estimation_rows_to_insert / 2);
|
||||||
|
|
||||||
|
/* Else guess on equal distribution (+1 is to avoid returning 0/Unknown) */
|
||||||
|
if (m_bulk_inserted_rows < estimation_rows_to_insert)
|
||||||
|
DBUG_RETURN(((estimation_rows_to_insert - m_bulk_inserted_rows)
|
||||||
|
/ m_tot_parts) + 1);
|
||||||
|
/* The estimation was wrong, must say 'Unknown' */
|
||||||
|
DBUG_RETURN(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Finish a large batch of insert rows
|
Finish a large batch of insert rows
|
||||||
|
|
||||||
@ -3272,21 +3334,29 @@ void ha_partition::start_bulk_insert(ha_rows rows)
|
|||||||
RETURN VALUE
|
RETURN VALUE
|
||||||
>0 Error code
|
>0 Error code
|
||||||
0 Success
|
0 Success
|
||||||
|
|
||||||
|
Note: end_bulk_insert can be called without start_bulk_insert
|
||||||
|
being called, see bug¤44108.
|
||||||
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int ha_partition::end_bulk_insert()
|
int ha_partition::end_bulk_insert()
|
||||||
{
|
{
|
||||||
int error= 0;
|
int error= 0;
|
||||||
handler **file;
|
uint i;
|
||||||
DBUG_ENTER("ha_partition::end_bulk_insert");
|
DBUG_ENTER("ha_partition::end_bulk_insert");
|
||||||
|
|
||||||
file= m_file;
|
if (!bitmap_is_set(&m_bulk_insert_started, m_tot_parts))
|
||||||
do
|
DBUG_RETURN(error);
|
||||||
|
|
||||||
|
for (i= 0; i < m_tot_parts; i++)
|
||||||
{
|
{
|
||||||
int tmp;
|
int tmp;
|
||||||
if ((tmp= (*file)->ha_end_bulk_insert()))
|
if (bitmap_is_set(&m_bulk_insert_started, i) &&
|
||||||
|
(tmp= m_file[i]->ha_end_bulk_insert()))
|
||||||
error= tmp;
|
error= tmp;
|
||||||
} while (*(++file));
|
}
|
||||||
|
bitmap_clear_all(&m_bulk_insert_started);
|
||||||
DBUG_RETURN(error);
|
DBUG_RETURN(error);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -176,6 +176,11 @@ private:
|
|||||||
This to ensure it will work with statement based replication.
|
This to ensure it will work with statement based replication.
|
||||||
*/
|
*/
|
||||||
bool auto_increment_safe_stmt_log_lock;
|
bool auto_increment_safe_stmt_log_lock;
|
||||||
|
/** For optimizing ha_start_bulk_insert calls */
|
||||||
|
MY_BITMAP m_bulk_insert_started;
|
||||||
|
ha_rows m_bulk_inserted_rows;
|
||||||
|
/** used for prediction of start_bulk_insert rows */
|
||||||
|
enum_monotonicity_info m_part_func_monotonicity_info;
|
||||||
public:
|
public:
|
||||||
handler *clone(MEM_ROOT *mem_root);
|
handler *clone(MEM_ROOT *mem_root);
|
||||||
virtual void set_part_info(partition_info *part_info)
|
virtual void set_part_info(partition_info *part_info)
|
||||||
@ -353,7 +358,6 @@ public:
|
|||||||
Bulk inserts are supported if all underlying handlers support it.
|
Bulk inserts are supported if all underlying handlers support it.
|
||||||
start_bulk_insert and end_bulk_insert is called before and after a
|
start_bulk_insert and end_bulk_insert is called before and after a
|
||||||
number of calls to write_row.
|
number of calls to write_row.
|
||||||
Not yet though.
|
|
||||||
*/
|
*/
|
||||||
virtual int write_row(uchar * buf);
|
virtual int write_row(uchar * buf);
|
||||||
virtual int update_row(const uchar * old_data, uchar * new_data);
|
virtual int update_row(const uchar * old_data, uchar * new_data);
|
||||||
@ -361,6 +365,10 @@ public:
|
|||||||
virtual int delete_all_rows(void);
|
virtual int delete_all_rows(void);
|
||||||
virtual void start_bulk_insert(ha_rows rows);
|
virtual void start_bulk_insert(ha_rows rows);
|
||||||
virtual int end_bulk_insert();
|
virtual int end_bulk_insert();
|
||||||
|
private:
|
||||||
|
ha_rows guess_bulk_insert_rows();
|
||||||
|
void start_part_bulk_insert(uint part_id);
|
||||||
|
public:
|
||||||
|
|
||||||
virtual bool is_fatal_error(int error, uint flags)
|
virtual bool is_fatal_error(int error, uint flags)
|
||||||
{
|
{
|
||||||
@ -936,9 +944,11 @@ private:
|
|||||||
auto_increment_lock= FALSE;
|
auto_increment_lock= FALSE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
virtual void set_auto_increment_if_higher(const ulonglong nr)
|
virtual void set_auto_increment_if_higher(Field *field)
|
||||||
{
|
{
|
||||||
HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
|
HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
|
||||||
|
ulonglong nr= (((Field_num*) field)->unsigned_flag ||
|
||||||
|
field->val_int() > 0) ? field->val_int() : 0;
|
||||||
lock_auto_increment();
|
lock_auto_increment();
|
||||||
DBUG_ASSERT(ha_data->auto_inc_initialized == TRUE);
|
DBUG_ASSERT(ha_data->auto_inc_initialized == TRUE);
|
||||||
/* must check when the mutex is taken */
|
/* must check when the mutex is taken */
|
||||||
|
@ -1885,12 +1885,42 @@ bool ha_flush_logs(handlerton *db_type)
|
|||||||
return FALSE;
|
return FALSE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
@brief make canonical filename
|
||||||
|
|
||||||
|
@param[in] file table handler
|
||||||
|
@param[in] path original path
|
||||||
|
@param[out] tmp_path buffer for canonized path
|
||||||
|
|
||||||
|
@details Lower case db name and table name path parts for
|
||||||
|
non file based tables when lower_case_table_names
|
||||||
|
is 2 (store as is, compare in lower case).
|
||||||
|
Filesystem path prefix (mysql_data_home or tmpdir)
|
||||||
|
is left intact.
|
||||||
|
|
||||||
|
@note tmp_path may be left intact if no conversion was
|
||||||
|
performed.
|
||||||
|
|
||||||
|
@retval canonized path
|
||||||
|
|
||||||
|
@todo This may be done more efficiently when table path
|
||||||
|
gets built. Convert this function to something like
|
||||||
|
ASSERT_CANONICAL_FILENAME.
|
||||||
|
*/
|
||||||
const char *get_canonical_filename(handler *file, const char *path,
|
const char *get_canonical_filename(handler *file, const char *path,
|
||||||
char *tmp_path)
|
char *tmp_path)
|
||||||
{
|
{
|
||||||
|
uint i;
|
||||||
if (lower_case_table_names != 2 || (file->ha_table_flags() & HA_FILE_BASED))
|
if (lower_case_table_names != 2 || (file->ha_table_flags() & HA_FILE_BASED))
|
||||||
return path;
|
return path;
|
||||||
|
|
||||||
|
for (i= 0; i <= mysql_tmpdir_list.max; i++)
|
||||||
|
{
|
||||||
|
if (is_prefix(path, mysql_tmpdir_list.list[i]))
|
||||||
|
return path;
|
||||||
|
}
|
||||||
|
|
||||||
/* Ensure that table handler get path in lower case */
|
/* Ensure that table handler get path in lower case */
|
||||||
if (tmp_path != path)
|
if (tmp_path != path)
|
||||||
strmov(tmp_path, path);
|
strmov(tmp_path, path);
|
||||||
|
@ -631,6 +631,7 @@ String *Item_func_concat_ws::val_str(String *str)
|
|||||||
String tmp_sep_str(tmp_str_buff, sizeof(tmp_str_buff),default_charset_info),
|
String tmp_sep_str(tmp_str_buff, sizeof(tmp_str_buff),default_charset_info),
|
||||||
*sep_str, *res, *res2,*use_as_buff;
|
*sep_str, *res, *res2,*use_as_buff;
|
||||||
uint i;
|
uint i;
|
||||||
|
bool is_const= 0;
|
||||||
|
|
||||||
null_value=0;
|
null_value=0;
|
||||||
if (!(sep_str= args[0]->val_str(&tmp_sep_str)))
|
if (!(sep_str= args[0]->val_str(&tmp_sep_str)))
|
||||||
@ -644,7 +645,11 @@ String *Item_func_concat_ws::val_str(String *str)
|
|||||||
// If not, return the empty string
|
// If not, return the empty string
|
||||||
for (i=1; i < arg_count; i++)
|
for (i=1; i < arg_count; i++)
|
||||||
if ((res= args[i]->val_str(str)))
|
if ((res= args[i]->val_str(str)))
|
||||||
|
{
|
||||||
|
is_const= args[i]->const_item() || !args[i]->used_tables();
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
if (i == arg_count)
|
if (i == arg_count)
|
||||||
return &my_empty_string;
|
return &my_empty_string;
|
||||||
|
|
||||||
@ -662,7 +667,7 @@ String *Item_func_concat_ws::val_str(String *str)
|
|||||||
current_thd->variables.max_allowed_packet);
|
current_thd->variables.max_allowed_packet);
|
||||||
goto null;
|
goto null;
|
||||||
}
|
}
|
||||||
if (res->alloced_length() >=
|
if (!is_const && res->alloced_length() >=
|
||||||
res->length() + sep_str->length() + res2->length())
|
res->length() + sep_str->length() + res2->length())
|
||||||
{ // Use old buffer
|
{ // Use old buffer
|
||||||
res->append(*sep_str); // res->length() > 0 always
|
res->append(*sep_str); // res->length() > 0 always
|
||||||
|
@ -155,13 +155,11 @@ bool Item_subselect::fix_fields(THD *thd_param, Item **ref)
|
|||||||
if (check_stack_overrun(thd, STACK_MIN_SIZE, (uchar*)&res))
|
if (check_stack_overrun(thd, STACK_MIN_SIZE, (uchar*)&res))
|
||||||
return TRUE;
|
return TRUE;
|
||||||
|
|
||||||
res= engine->prepare();
|
if (!(res= engine->prepare()))
|
||||||
|
|
||||||
// all transformation is done (used by prepared statements)
|
|
||||||
changed= 1;
|
|
||||||
|
|
||||||
if (!res)
|
|
||||||
{
|
{
|
||||||
|
// all transformation is done (used by prepared statements)
|
||||||
|
changed= 1;
|
||||||
|
|
||||||
if (substitution)
|
if (substitution)
|
||||||
{
|
{
|
||||||
int ret= 0;
|
int ret= 0;
|
||||||
|
@ -1024,14 +1024,10 @@ bool LOGGER::general_log_write(THD *thd, enum enum_server_command command,
|
|||||||
Log_event_handler **current_handler= general_log_handler_list;
|
Log_event_handler **current_handler= general_log_handler_list;
|
||||||
char user_host_buff[MAX_USER_HOST_SIZE + 1];
|
char user_host_buff[MAX_USER_HOST_SIZE + 1];
|
||||||
Security_context *sctx= thd->security_ctx;
|
Security_context *sctx= thd->security_ctx;
|
||||||
ulong id;
|
|
||||||
uint user_host_len= 0;
|
uint user_host_len= 0;
|
||||||
time_t current_time;
|
time_t current_time;
|
||||||
|
|
||||||
if (thd)
|
DBUG_ASSERT(thd);
|
||||||
id= thd->thread_id; /* Normal thread */
|
|
||||||
else
|
|
||||||
id= 0; /* Log from connect handler */
|
|
||||||
|
|
||||||
lock_shared();
|
lock_shared();
|
||||||
if (!opt_log)
|
if (!opt_log)
|
||||||
@ -1050,7 +1046,7 @@ bool LOGGER::general_log_write(THD *thd, enum enum_server_command command,
|
|||||||
while (*current_handler)
|
while (*current_handler)
|
||||||
error|= (*current_handler++)->
|
error|= (*current_handler++)->
|
||||||
log_general(thd, current_time, user_host_buff,
|
log_general(thd, current_time, user_host_buff,
|
||||||
user_host_len, id,
|
user_host_len, thd->thread_id,
|
||||||
command_name[(uint) command].str,
|
command_name[(uint) command].str,
|
||||||
command_name[(uint) command].length,
|
command_name[(uint) command].length,
|
||||||
query, query_length,
|
query, query_length,
|
||||||
|
@ -8312,6 +8312,16 @@ Write_rows_log_event::do_before_row_operations(const Slave_reporting_capability
|
|||||||
|
|
||||||
/* Honor next number column if present */
|
/* Honor next number column if present */
|
||||||
m_table->next_number_field= m_table->found_next_number_field;
|
m_table->next_number_field= m_table->found_next_number_field;
|
||||||
|
/*
|
||||||
|
* Fixed Bug#45999, In RBR, Store engine of Slave auto-generates new
|
||||||
|
* sequence numbers for auto_increment fields if the values of them are 0.
|
||||||
|
* If generateing a sequence number is decided by the values of
|
||||||
|
* table->auto_increment_field_not_null and SQL_MODE(if includes
|
||||||
|
* MODE_NO_AUTO_VALUE_ON_ZERO) in update_auto_increment function.
|
||||||
|
* SQL_MODE of slave sql thread is always consistency with master's.
|
||||||
|
* In RBR, auto_increment fields never are NULL.
|
||||||
|
*/
|
||||||
|
m_table->auto_increment_field_not_null= TRUE;
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -8321,6 +8331,7 @@ Write_rows_log_event::do_after_row_operations(const Slave_reporting_capability *
|
|||||||
{
|
{
|
||||||
int local_error= 0;
|
int local_error= 0;
|
||||||
m_table->next_number_field=0;
|
m_table->next_number_field=0;
|
||||||
|
m_table->auto_increment_field_not_null= FALSE;
|
||||||
if (bit_is_set(slave_exec_mode, SLAVE_EXEC_MODE_IDEMPOTENT) == 1 ||
|
if (bit_is_set(slave_exec_mode, SLAVE_EXEC_MODE_IDEMPOTENT) == 1 ||
|
||||||
m_table->s->db_type()->db_type == DB_TYPE_NDBCLUSTER)
|
m_table->s->db_type()->db_type == DB_TYPE_NDBCLUSTER)
|
||||||
{
|
{
|
||||||
|
@ -4789,10 +4789,10 @@ static bool read_init_file(char *file_name)
|
|||||||
DBUG_ENTER("read_init_file");
|
DBUG_ENTER("read_init_file");
|
||||||
DBUG_PRINT("enter",("name: %s",file_name));
|
DBUG_PRINT("enter",("name: %s",file_name));
|
||||||
if (!(file=my_fopen(file_name,O_RDONLY,MYF(MY_WME))))
|
if (!(file=my_fopen(file_name,O_RDONLY,MYF(MY_WME))))
|
||||||
return(1);
|
DBUG_RETURN(TRUE);
|
||||||
bootstrap(file);
|
bootstrap(file);
|
||||||
(void) my_fclose(file,MYF(MY_WME));
|
(void) my_fclose(file,MYF(MY_WME));
|
||||||
return 0;
|
DBUG_RETURN(FALSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -311,5 +311,6 @@ void init_all_partitions_iterator(partition_info *part_info,
|
|||||||
{
|
{
|
||||||
part_iter->part_nums.start= part_iter->part_nums.cur= 0;
|
part_iter->part_nums.start= part_iter->part_nums.cur= 0;
|
||||||
part_iter->part_nums.end= part_info->no_parts;
|
part_iter->part_nums.end= part_info->no_parts;
|
||||||
|
part_iter->ret_null_part= part_iter->ret_null_part_orig= FALSE;
|
||||||
part_iter->get_next= get_next_partition_id_range;
|
part_iter->get_next= get_next_partition_id_range;
|
||||||
}
|
}
|
||||||
|
@ -350,6 +350,7 @@ Rpl_filter::add_do_db(const char* table_spec)
|
|||||||
DBUG_ENTER("Rpl_filter::add_do_db");
|
DBUG_ENTER("Rpl_filter::add_do_db");
|
||||||
i_string *db = new i_string(table_spec);
|
i_string *db = new i_string(table_spec);
|
||||||
do_db.push_back(db);
|
do_db.push_back(db);
|
||||||
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -359,6 +360,7 @@ Rpl_filter::add_ignore_db(const char* table_spec)
|
|||||||
DBUG_ENTER("Rpl_filter::add_ignore_db");
|
DBUG_ENTER("Rpl_filter::add_ignore_db");
|
||||||
i_string *db = new i_string(table_spec);
|
i_string *db = new i_string(table_spec);
|
||||||
ignore_db.push_back(db);
|
ignore_db.push_back(db);
|
||||||
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C" uchar *get_table_key(const uchar *, size_t *, my_bool);
|
extern "C" uchar *get_table_key(const uchar *, size_t *, my_bool);
|
||||||
|
@ -1238,6 +1238,7 @@ void fix_slave_exec_mode(enum_var_type type)
|
|||||||
}
|
}
|
||||||
if (bit_is_set(slave_exec_mode_options, SLAVE_EXEC_MODE_IDEMPOTENT) == 0)
|
if (bit_is_set(slave_exec_mode_options, SLAVE_EXEC_MODE_IDEMPOTENT) == 0)
|
||||||
bit_do_set(slave_exec_mode_options, SLAVE_EXEC_MODE_STRICT);
|
bit_do_set(slave_exec_mode_options, SLAVE_EXEC_MODE_STRICT);
|
||||||
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -399,6 +399,31 @@ char *thd_security_context(THD *thd, char *buffer, unsigned int length,
|
|||||||
return buffer;
|
return buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
Implementation of Drop_table_error_handler::handle_error().
|
||||||
|
The reason in having this implementation is to silence technical low-level
|
||||||
|
warnings during DROP TABLE operation. Currently we don't want to expose
|
||||||
|
the following warnings during DROP TABLE:
|
||||||
|
- Some of table files are missed or invalid (the table is going to be
|
||||||
|
deleted anyway, so why bother that something was missed);
|
||||||
|
- A trigger associated with the table does not have DEFINER (One of the
|
||||||
|
MySQL specifics now is that triggers are loaded for the table being
|
||||||
|
dropped. So, we may have a warning that trigger does not have DEFINER
|
||||||
|
attribute during DROP TABLE operation).
|
||||||
|
|
||||||
|
@return TRUE if the condition is handled.
|
||||||
|
*/
|
||||||
|
bool Drop_table_error_handler::handle_error(uint sql_errno,
|
||||||
|
const char *message,
|
||||||
|
MYSQL_ERROR::enum_warning_level level,
|
||||||
|
THD *thd)
|
||||||
|
{
|
||||||
|
return ((sql_errno == EE_DELETE && my_errno == ENOENT) ||
|
||||||
|
sql_errno == ER_TRG_NO_DEFINER);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Clear this diagnostics area.
|
Clear this diagnostics area.
|
||||||
|
|
||||||
|
@ -1091,6 +1091,31 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
This class is an internal error handler implementation for
|
||||||
|
DROP TABLE statements. The thing is that there may be warnings during
|
||||||
|
execution of these statements, which should not be exposed to the user.
|
||||||
|
This class is intended to silence such warnings.
|
||||||
|
*/
|
||||||
|
|
||||||
|
class Drop_table_error_handler : public Internal_error_handler
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
Drop_table_error_handler(Internal_error_handler *err_handler)
|
||||||
|
:m_err_handler(err_handler)
|
||||||
|
{ }
|
||||||
|
|
||||||
|
public:
|
||||||
|
bool handle_error(uint sql_errno,
|
||||||
|
const char *message,
|
||||||
|
MYSQL_ERROR::enum_warning_level level,
|
||||||
|
THD *thd);
|
||||||
|
|
||||||
|
private:
|
||||||
|
Internal_error_handler *m_err_handler;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Stores status of the currently executed statement.
|
Stores status of the currently executed statement.
|
||||||
Cleared at the beginning of the statement, and then
|
Cleared at the beginning of the statement, and then
|
||||||
|
@ -2274,44 +2274,9 @@ void kill_delayed_threads(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
static void handle_delayed_insert_impl(THD *thd, Delayed_insert *di)
|
||||||
* Create a new delayed insert thread
|
|
||||||
*/
|
|
||||||
|
|
||||||
pthread_handler_t handle_delayed_insert(void *arg)
|
|
||||||
{
|
{
|
||||||
Delayed_insert *di=(Delayed_insert*) arg;
|
DBUG_ENTER("handle_delayed_insert_impl");
|
||||||
THD *thd= &di->thd;
|
|
||||||
|
|
||||||
pthread_detach_this_thread();
|
|
||||||
/* Add thread to THD list so that's it's visible in 'show processlist' */
|
|
||||||
pthread_mutex_lock(&LOCK_thread_count);
|
|
||||||
thd->thread_id= thd->variables.pseudo_thread_id= thread_id++;
|
|
||||||
thd->set_current_time();
|
|
||||||
threads.append(thd);
|
|
||||||
thd->killed=abort_loop ? THD::KILL_CONNECTION : THD::NOT_KILLED;
|
|
||||||
pthread_mutex_unlock(&LOCK_thread_count);
|
|
||||||
|
|
||||||
/*
|
|
||||||
Wait until the client runs into pthread_cond_wait(),
|
|
||||||
where we free it after the table is opened and di linked in the list.
|
|
||||||
If we did not wait here, the client might detect the opened table
|
|
||||||
before it is linked to the list. It would release LOCK_delayed_create
|
|
||||||
and allow another thread to create another handler for the same table,
|
|
||||||
since it does not find one in the list.
|
|
||||||
*/
|
|
||||||
pthread_mutex_lock(&di->mutex);
|
|
||||||
#if !defined( __WIN__) /* Win32 calls this in pthread_create */
|
|
||||||
if (my_thread_init())
|
|
||||||
{
|
|
||||||
/* Can't use my_error since store_globals has not yet been called */
|
|
||||||
thd->main_da.set_error_status(thd, ER_OUT_OF_RESOURCES,
|
|
||||||
ER(ER_OUT_OF_RESOURCES));
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
DBUG_ENTER("handle_delayed_insert");
|
|
||||||
thd->thread_stack= (char*) &thd;
|
thd->thread_stack= (char*) &thd;
|
||||||
if (init_thr_lock() || thd->store_globals())
|
if (init_thr_lock() || thd->store_globals())
|
||||||
{
|
{
|
||||||
@ -2500,6 +2465,49 @@ err:
|
|||||||
*/
|
*/
|
||||||
ha_autocommit_or_rollback(thd, 1);
|
ha_autocommit_or_rollback(thd, 1);
|
||||||
|
|
||||||
|
DBUG_VOID_RETURN;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create a new delayed insert thread
|
||||||
|
*/
|
||||||
|
|
||||||
|
pthread_handler_t handle_delayed_insert(void *arg)
|
||||||
|
{
|
||||||
|
Delayed_insert *di=(Delayed_insert*) arg;
|
||||||
|
THD *thd= &di->thd;
|
||||||
|
|
||||||
|
pthread_detach_this_thread();
|
||||||
|
/* Add thread to THD list so that's it's visible in 'show processlist' */
|
||||||
|
pthread_mutex_lock(&LOCK_thread_count);
|
||||||
|
thd->thread_id= thd->variables.pseudo_thread_id= thread_id++;
|
||||||
|
thd->set_current_time();
|
||||||
|
threads.append(thd);
|
||||||
|
thd->killed=abort_loop ? THD::KILL_CONNECTION : THD::NOT_KILLED;
|
||||||
|
pthread_mutex_unlock(&LOCK_thread_count);
|
||||||
|
|
||||||
|
/*
|
||||||
|
Wait until the client runs into pthread_cond_wait(),
|
||||||
|
where we free it after the table is opened and di linked in the list.
|
||||||
|
If we did not wait here, the client might detect the opened table
|
||||||
|
before it is linked to the list. It would release LOCK_delayed_create
|
||||||
|
and allow another thread to create another handler for the same table,
|
||||||
|
since it does not find one in the list.
|
||||||
|
*/
|
||||||
|
pthread_mutex_lock(&di->mutex);
|
||||||
|
#if !defined( __WIN__) /* Win32 calls this in pthread_create */
|
||||||
|
if (my_thread_init())
|
||||||
|
{
|
||||||
|
/* Can't use my_error since store_globals has not yet been called */
|
||||||
|
thd->main_da.set_error_status(thd, ER_OUT_OF_RESOURCES,
|
||||||
|
ER(ER_OUT_OF_RESOURCES));
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
handle_delayed_insert_impl(thd, di);
|
||||||
|
|
||||||
#ifndef __WIN__
|
#ifndef __WIN__
|
||||||
end:
|
end:
|
||||||
#endif
|
#endif
|
||||||
@ -2523,7 +2531,8 @@ end:
|
|||||||
|
|
||||||
my_thread_end();
|
my_thread_end();
|
||||||
pthread_exit(0);
|
pthread_exit(0);
|
||||||
DBUG_RETURN(0);
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -3596,7 +3605,7 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
|
|||||||
DBUG_EXECUTE_IF("sleep_create_select_before_check_if_exists", my_sleep(6000000););
|
DBUG_EXECUTE_IF("sleep_create_select_before_check_if_exists", my_sleep(6000000););
|
||||||
|
|
||||||
if (!(create_info->options & HA_LEX_CREATE_TMP_TABLE) &&
|
if (!(create_info->options & HA_LEX_CREATE_TMP_TABLE) &&
|
||||||
create_table->table->db_stat)
|
(create_table->table && create_table->table->db_stat))
|
||||||
{
|
{
|
||||||
/* Table already exists and was open at open_and_lock_tables() stage. */
|
/* Table already exists and was open at open_and_lock_tables() stage. */
|
||||||
if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS)
|
if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS)
|
||||||
|
@ -408,29 +408,12 @@ void execute_init_command(THD *thd, sys_var_str *init_command_var,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
static void handle_bootstrap_impl(THD *thd)
|
||||||
Execute commands from bootstrap_file.
|
|
||||||
|
|
||||||
Used when creating the initial grant tables.
|
|
||||||
*/
|
|
||||||
|
|
||||||
pthread_handler_t handle_bootstrap(void *arg)
|
|
||||||
{
|
{
|
||||||
THD *thd=(THD*) arg;
|
|
||||||
FILE *file=bootstrap_file;
|
FILE *file=bootstrap_file;
|
||||||
char *buff;
|
char *buff;
|
||||||
const char* found_semicolon= NULL;
|
const char* found_semicolon= NULL;
|
||||||
|
|
||||||
/* The following must be called before DBUG_ENTER */
|
|
||||||
thd->thread_stack= (char*) &thd;
|
|
||||||
if (my_thread_init() || thd->store_globals())
|
|
||||||
{
|
|
||||||
#ifndef EMBEDDED_LIBRARY
|
|
||||||
close_connection(thd, ER_OUT_OF_RESOURCES, 1);
|
|
||||||
#endif
|
|
||||||
thd->fatal_error();
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
DBUG_ENTER("handle_bootstrap");
|
DBUG_ENTER("handle_bootstrap");
|
||||||
|
|
||||||
#ifndef EMBEDDED_LIBRARY
|
#ifndef EMBEDDED_LIBRARY
|
||||||
@ -525,6 +508,33 @@ pthread_handler_t handle_bootstrap(void *arg)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
DBUG_VOID_RETURN;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
Execute commands from bootstrap_file.
|
||||||
|
|
||||||
|
Used when creating the initial grant tables.
|
||||||
|
*/
|
||||||
|
|
||||||
|
pthread_handler_t handle_bootstrap(void *arg)
|
||||||
|
{
|
||||||
|
THD *thd=(THD*) arg;
|
||||||
|
|
||||||
|
/* The following must be called before DBUG_ENTER */
|
||||||
|
thd->thread_stack= (char*) &thd;
|
||||||
|
if (my_thread_init() || thd->store_globals())
|
||||||
|
{
|
||||||
|
#ifndef EMBEDDED_LIBRARY
|
||||||
|
close_connection(thd, ER_OUT_OF_RESOURCES, 1);
|
||||||
|
#endif
|
||||||
|
thd->fatal_error();
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
|
|
||||||
|
handle_bootstrap_impl(thd);
|
||||||
|
|
||||||
end:
|
end:
|
||||||
net_end(&thd->net);
|
net_end(&thd->net);
|
||||||
thd->cleanup();
|
thd->cleanup();
|
||||||
@ -539,7 +549,8 @@ end:
|
|||||||
my_thread_end();
|
my_thread_end();
|
||||||
pthread_exit(0);
|
pthread_exit(0);
|
||||||
#endif
|
#endif
|
||||||
DBUG_RETURN(0);
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1523,12 +1523,8 @@ JOIN::optimize()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/* If this join belongs to an uncacheable query save the original join */
|
||||||
If this join belongs to an uncacheable subquery save
|
if (select_lex->uncacheable && init_save_join_tab())
|
||||||
the original join
|
|
||||||
*/
|
|
||||||
if (select_lex->uncacheable && !is_top_level_join() &&
|
|
||||||
init_save_join_tab())
|
|
||||||
DBUG_RETURN(-1); /* purecov: inspected */
|
DBUG_RETURN(-1); /* purecov: inspected */
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2252,7 +2248,7 @@ JOIN::destroy()
|
|||||||
tab->cleanup();
|
tab->cleanup();
|
||||||
}
|
}
|
||||||
tmp_join->tmp_join= 0;
|
tmp_join->tmp_join= 0;
|
||||||
tmp_table_param.copy_field=0;
|
tmp_table_param.cleanup();
|
||||||
DBUG_RETURN(tmp_join->destroy());
|
DBUG_RETURN(tmp_join->destroy());
|
||||||
}
|
}
|
||||||
cond_equal= 0;
|
cond_equal= 0;
|
||||||
@ -13682,7 +13678,10 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
|
|||||||
if (error)
|
if (error)
|
||||||
{
|
{
|
||||||
if (error == HA_ERR_RECORD_DELETED)
|
if (error == HA_ERR_RECORD_DELETED)
|
||||||
continue;
|
{
|
||||||
|
error= file->rnd_next(record);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
if (error == HA_ERR_END_OF_FILE)
|
if (error == HA_ERR_END_OF_FILE)
|
||||||
break;
|
break;
|
||||||
goto err;
|
goto err;
|
||||||
|
@ -1772,6 +1772,7 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists,
|
|||||||
my_bool drop_temporary)
|
my_bool drop_temporary)
|
||||||
{
|
{
|
||||||
bool error= FALSE, need_start_waiters= FALSE;
|
bool error= FALSE, need_start_waiters= FALSE;
|
||||||
|
Drop_table_error_handler err_handler(thd->get_internal_handler());
|
||||||
DBUG_ENTER("mysql_rm_table");
|
DBUG_ENTER("mysql_rm_table");
|
||||||
|
|
||||||
/* mark for close and remove all cached entries */
|
/* mark for close and remove all cached entries */
|
||||||
@ -1792,7 +1793,10 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists,
|
|||||||
LOCK_open during wait_if_global_read_lock(), other threads could not
|
LOCK_open during wait_if_global_read_lock(), other threads could not
|
||||||
close their tables. This would make a pretty deadlock.
|
close their tables. This would make a pretty deadlock.
|
||||||
*/
|
*/
|
||||||
|
thd->push_internal_handler(&err_handler);
|
||||||
error= mysql_rm_table_part2(thd, tables, if_exists, drop_temporary, 0, 0);
|
error= mysql_rm_table_part2(thd, tables, if_exists, drop_temporary, 0, 0);
|
||||||
|
thd->pop_internal_handler();
|
||||||
|
|
||||||
|
|
||||||
if (need_start_waiters)
|
if (need_start_waiters)
|
||||||
start_waiting_global_read_lock(thd);
|
start_waiting_global_read_lock(thd);
|
||||||
@ -1894,9 +1898,6 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
|
|||||||
DBUG_RETURN(1);
|
DBUG_RETURN(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Don't give warnings for not found errors, as we already generate notes */
|
|
||||||
thd->no_warnings_for_error= 1;
|
|
||||||
|
|
||||||
for (table= tables; table; table= table->next_local)
|
for (table= tables; table; table= table->next_local)
|
||||||
{
|
{
|
||||||
char *db=table->db;
|
char *db=table->db;
|
||||||
@ -2145,7 +2146,6 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
|
|||||||
err_with_placeholders:
|
err_with_placeholders:
|
||||||
unlock_table_names(thd, tables, (TABLE_LIST*) 0);
|
unlock_table_names(thd, tables, (TABLE_LIST*) 0);
|
||||||
pthread_mutex_unlock(&LOCK_open);
|
pthread_mutex_unlock(&LOCK_open);
|
||||||
thd->no_warnings_for_error= 0;
|
|
||||||
DBUG_RETURN(error);
|
DBUG_RETURN(error);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5217,6 +5217,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST* src_table,
|
|||||||
char tmp_path[FN_REFLEN];
|
char tmp_path[FN_REFLEN];
|
||||||
#endif
|
#endif
|
||||||
char ts_name[FN_LEN + 1];
|
char ts_name[FN_LEN + 1];
|
||||||
|
myf flags= MY_DONT_OVERWRITE_FILE;
|
||||||
DBUG_ENTER("mysql_create_like_table");
|
DBUG_ENTER("mysql_create_like_table");
|
||||||
|
|
||||||
|
|
||||||
@ -5273,8 +5274,12 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST* src_table,
|
|||||||
|
|
||||||
DBUG_EXECUTE_IF("sleep_create_like_before_copy", my_sleep(6000000););
|
DBUG_EXECUTE_IF("sleep_create_like_before_copy", my_sleep(6000000););
|
||||||
|
|
||||||
|
if (opt_sync_frm && !(create_info->options & HA_LEX_CREATE_TMP_TABLE))
|
||||||
|
flags|= MY_SYNC;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Create a new table by copying from source table
|
Create a new table by copying from source table
|
||||||
|
and sync the new table if the flag MY_SYNC is set
|
||||||
|
|
||||||
Altough exclusive name-lock on target table protects us from concurrent
|
Altough exclusive name-lock on target table protects us from concurrent
|
||||||
DML and DDL operations on it we still want to wrap .FRM creation and call
|
DML and DDL operations on it we still want to wrap .FRM creation and call
|
||||||
@ -5295,7 +5300,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST* src_table,
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (my_copy(src_path, dst_path, MYF(MY_DONT_OVERWRITE_FILE)))
|
else if (my_copy(src_path, dst_path, flags))
|
||||||
{
|
{
|
||||||
if (my_errno == ENOENT)
|
if (my_errno == ENOENT)
|
||||||
my_error(ER_BAD_DB_ERROR,MYF(0),db);
|
my_error(ER_BAD_DB_ERROR,MYF(0),db);
|
||||||
|
@ -7921,7 +7921,13 @@ udf_expr:
|
|||||||
$2->is_autogenerated_name= FALSE;
|
$2->is_autogenerated_name= FALSE;
|
||||||
$2->set_name($4.str, $4.length, system_charset_info);
|
$2->set_name($4.str, $4.length, system_charset_info);
|
||||||
}
|
}
|
||||||
else
|
/*
|
||||||
|
A field has to have its proper name in order for name
|
||||||
|
resolution to work, something we are only guaranteed if we
|
||||||
|
parse it out. If we hijack the input stream with
|
||||||
|
remember_name we may get quoted or escaped names.
|
||||||
|
*/
|
||||||
|
else if ($2->type() != Item::FIELD_ITEM)
|
||||||
$2->set_name($1, (uint) ($3 - $1), YYTHD->charset());
|
$2->set_name($1, (uint) ($3 - $1), YYTHD->charset());
|
||||||
$$= $2;
|
$$= $2;
|
||||||
}
|
}
|
||||||
|
@ -412,10 +412,10 @@ int rea_create_table(THD *thd, const char *path,
|
|||||||
DBUG_ASSERT(*fn_rext(frm_name));
|
DBUG_ASSERT(*fn_rext(frm_name));
|
||||||
if (thd->variables.keep_files_on_create)
|
if (thd->variables.keep_files_on_create)
|
||||||
create_info->options|= HA_CREATE_KEEP_FILES;
|
create_info->options|= HA_CREATE_KEEP_FILES;
|
||||||
if (file->ha_create_handler_files(path, NULL, CHF_CREATE_FLAG, create_info))
|
if (!create_info->frm_only &&
|
||||||
goto err_handler;
|
(file->ha_create_handler_files(path, NULL, CHF_CREATE_FLAG,
|
||||||
if (!create_info->frm_only && ha_create_table(thd, path, db, table_name,
|
create_info) ||
|
||||||
create_info,0))
|
ha_create_table(thd, path, db, table_name, create_info, 0)))
|
||||||
goto err_handler;
|
goto err_handler;
|
||||||
DBUG_RETURN(0);
|
DBUG_RETURN(0);
|
||||||
|
|
||||||
|
@ -993,6 +993,7 @@ int ha_archive::rnd_init(bool scan)
|
|||||||
/* We rewind the file so that we can read from the beginning if scan */
|
/* We rewind the file so that we can read from the beginning if scan */
|
||||||
if (scan)
|
if (scan)
|
||||||
{
|
{
|
||||||
|
scan_rows= stats.records;
|
||||||
DBUG_PRINT("info", ("archive will retrieve %llu rows",
|
DBUG_PRINT("info", ("archive will retrieve %llu rows",
|
||||||
(unsigned long long) scan_rows));
|
(unsigned long long) scan_rows));
|
||||||
|
|
||||||
@ -1461,7 +1462,6 @@ int ha_archive::info(uint flag)
|
|||||||
stats.records= share->rows_recorded;
|
stats.records= share->rows_recorded;
|
||||||
pthread_mutex_unlock(&share->mutex);
|
pthread_mutex_unlock(&share->mutex);
|
||||||
|
|
||||||
scan_rows= stats.records;
|
|
||||||
stats.deleted= 0;
|
stats.deleted= 0;
|
||||||
|
|
||||||
DBUG_PRINT("ha_archive", ("Stats rows is %d\n", (int)stats.records));
|
DBUG_PRINT("ha_archive", ("Stats rows is %d\n", (int)stats.records));
|
||||||
@ -1472,11 +1472,12 @@ int ha_archive::info(uint flag)
|
|||||||
|
|
||||||
VOID(my_stat(share->data_file_name, &file_stat, MYF(MY_WME)));
|
VOID(my_stat(share->data_file_name, &file_stat, MYF(MY_WME)));
|
||||||
|
|
||||||
stats.mean_rec_length= table->s->reclength + buffer.alloced_length();
|
|
||||||
stats.data_file_length= file_stat.st_size;
|
stats.data_file_length= file_stat.st_size;
|
||||||
stats.create_time= (ulong) file_stat.st_ctime;
|
stats.create_time= (ulong) file_stat.st_ctime;
|
||||||
stats.update_time= (ulong) file_stat.st_mtime;
|
stats.update_time= (ulong) file_stat.st_mtime;
|
||||||
stats.max_data_file_length= share->rows_recorded * stats.mean_rec_length;
|
stats.mean_rec_length= stats.records ?
|
||||||
|
stats.data_file_length / stats.records : table->s->reclength;
|
||||||
|
stats.max_data_file_length= MAX_FILE_SIZE;
|
||||||
}
|
}
|
||||||
stats.delete_length= 0;
|
stats.delete_length= 0;
|
||||||
stats.index_file_length=0;
|
stats.index_file_length=0;
|
||||||
|
@ -302,17 +302,17 @@ static struct my_option my_long_options[] =
|
|||||||
(uchar**) &check_param.read_buffer_length,
|
(uchar**) &check_param.read_buffer_length,
|
||||||
(uchar**) &check_param.read_buffer_length, 0, GET_ULONG, REQUIRED_ARG,
|
(uchar**) &check_param.read_buffer_length, 0, GET_ULONG, REQUIRED_ARG,
|
||||||
(long) READ_BUFFER_INIT, (long) MALLOC_OVERHEAD,
|
(long) READ_BUFFER_INIT, (long) MALLOC_OVERHEAD,
|
||||||
(long) ~0L, (long) MALLOC_OVERHEAD, (long) 1L, 0},
|
INT_MAX32, (long) MALLOC_OVERHEAD, (long) 1L, 0},
|
||||||
{ "write_buffer_size", OPT_WRITE_BUFFER_SIZE, "",
|
{ "write_buffer_size", OPT_WRITE_BUFFER_SIZE, "",
|
||||||
(uchar**) &check_param.write_buffer_length,
|
(uchar**) &check_param.write_buffer_length,
|
||||||
(uchar**) &check_param.write_buffer_length, 0, GET_ULONG, REQUIRED_ARG,
|
(uchar**) &check_param.write_buffer_length, 0, GET_ULONG, REQUIRED_ARG,
|
||||||
(long) READ_BUFFER_INIT, (long) MALLOC_OVERHEAD,
|
(long) READ_BUFFER_INIT, (long) MALLOC_OVERHEAD,
|
||||||
(long) ~0L, (long) MALLOC_OVERHEAD, (long) 1L, 0},
|
INT_MAX32, (long) MALLOC_OVERHEAD, (long) 1L, 0},
|
||||||
{ "sort_buffer_size", OPT_SORT_BUFFER_SIZE, "",
|
{ "sort_buffer_size", OPT_SORT_BUFFER_SIZE, "",
|
||||||
(uchar**) &check_param.sort_buffer_length,
|
(uchar**) &check_param.sort_buffer_length,
|
||||||
(uchar**) &check_param.sort_buffer_length, 0, GET_ULONG, REQUIRED_ARG,
|
(uchar**) &check_param.sort_buffer_length, 0, GET_ULONG, REQUIRED_ARG,
|
||||||
(long) SORT_BUFFER_INIT, (long) (MIN_SORT_BUFFER + MALLOC_OVERHEAD),
|
(long) SORT_BUFFER_INIT, (long) (MIN_SORT_BUFFER + MALLOC_OVERHEAD),
|
||||||
(long) ~0L, (long) MALLOC_OVERHEAD, (long) 1L, 0},
|
ULONG_MAX, (long) MALLOC_OVERHEAD, (long) 1L, 0},
|
||||||
{ "sort_key_blocks", OPT_SORT_KEY_BLOCKS, "",
|
{ "sort_key_blocks", OPT_SORT_KEY_BLOCKS, "",
|
||||||
(uchar**) &check_param.sort_key_blocks,
|
(uchar**) &check_param.sort_key_blocks,
|
||||||
(uchar**) &check_param.sort_key_blocks, 0, GET_ULONG, REQUIRED_ARG,
|
(uchar**) &check_param.sort_key_blocks, 0, GET_ULONG, REQUIRED_ARG,
|
||||||
|
@ -274,7 +274,7 @@ Suma::execSTTOR(Signal* signal) {
|
|||||||
jam();
|
jam();
|
||||||
|
|
||||||
send_start_me_req(signal);
|
send_start_me_req(signal);
|
||||||
return;
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -322,7 +322,7 @@ Suma::execSTTOR(Signal* signal) {
|
|||||||
if (ERROR_INSERTED(13030))
|
if (ERROR_INSERTED(13030))
|
||||||
{
|
{
|
||||||
ndbout_c("Dont start handover");
|
ndbout_c("Dont start handover");
|
||||||
return;
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
}//if
|
}//if
|
||||||
|
|
||||||
@ -332,7 +332,7 @@ Suma::execSTTOR(Signal* signal) {
|
|||||||
* Allow API's to connect
|
* Allow API's to connect
|
||||||
*/
|
*/
|
||||||
sendSTTORRY(signal);
|
sendSTTORRY(signal);
|
||||||
return;
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(startphase == 101)
|
if(startphase == 101)
|
||||||
@ -345,7 +345,7 @@ Suma::execSTTOR(Signal* signal) {
|
|||||||
*/
|
*/
|
||||||
c_startup.m_wait_handover= true;
|
c_startup.m_wait_handover= true;
|
||||||
check_start_handover(signal);
|
check_start_handover(signal);
|
||||||
return;
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sendSTTORRY(signal);
|
sendSTTORRY(signal);
|
||||||
@ -575,19 +575,19 @@ void Suma::execAPI_FAILREQ(Signal* signal)
|
|||||||
jam();
|
jam();
|
||||||
sendSignalWithDelay(reference(), GSN_API_FAILREQ, signal,
|
sendSignalWithDelay(reference(), GSN_API_FAILREQ, signal,
|
||||||
200, signal->getLength());
|
200, signal->getLength());
|
||||||
return;
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (c_failedApiNodes.get(failedApiNode))
|
if (c_failedApiNodes.get(failedApiNode))
|
||||||
{
|
{
|
||||||
jam();
|
jam();
|
||||||
return;
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!c_subscriber_nodes.get(failedApiNode))
|
if (!c_subscriber_nodes.get(failedApiNode))
|
||||||
{
|
{
|
||||||
jam();
|
jam();
|
||||||
return;
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
c_failedApiNodes.set(failedApiNode);
|
c_failedApiNodes.set(failedApiNode);
|
||||||
@ -2453,7 +2453,7 @@ Suma::execSUB_START_REQ(Signal* signal){
|
|||||||
jam();
|
jam();
|
||||||
c_subscriberPool.release(subbPtr);
|
c_subscriberPool.release(subbPtr);
|
||||||
sendSubStartRef(signal, SubStartRef::PartiallyConnected);
|
sendSubStartRef(signal, SubStartRef::PartiallyConnected);
|
||||||
return;
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
DBUG_PRINT("info",("c_subscriberPool size: %d free: %d",
|
DBUG_PRINT("info",("c_subscriberPool size: %d free: %d",
|
||||||
@ -4289,7 +4289,7 @@ Suma::Restart::runSUMA_START_ME_REQ(Signal* signal, Uint32 sumaRef)
|
|||||||
ref->errorCode = SumaStartMeRef::Busy;
|
ref->errorCode = SumaStartMeRef::Busy;
|
||||||
suma.sendSignal(sumaRef, GSN_SUMA_START_ME_REF, signal,
|
suma.sendSignal(sumaRef, GSN_SUMA_START_ME_REF, signal,
|
||||||
SumaStartMeRef::SignalLength, JBB);
|
SumaStartMeRef::SignalLength, JBB);
|
||||||
return;
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeId = refToNode(sumaRef);
|
nodeId = refToNode(sumaRef);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user