Merge whalegate.ndb.mysql.com:/home/tomas/cge-5.1
into whalegate.ndb.mysql.com:/home/tomas/mysql-5.1-new-ndb-merge mysql-test/suite/rpl/r/rpl_bug31076.result: Auto merged sql/ha_ndbcluster.cc: Auto merged sql/handler.cc: Auto merged sql/log_event.cc: Auto merged sql/sql_update.cc: Auto merged storage/ndb/include/ndbapi/Ndb.hpp: Auto merged mysql-test/suite/rpl/t/rpl_bug31076.test: manual merge
This commit is contained in:
commit
c84d4b2139
@ -1,16 +1,22 @@
|
||||
#############################################################
|
||||
# Author: Chuck
|
||||
#############################################################
|
||||
# Purpose: To test having extra columns on the master WL#3915
|
||||
# engine inspecific sourced part
|
||||
#############################################################
|
||||
|
||||
# Change Author: Jeb
|
||||
# Change: Cleanup and extend testing
|
||||
#############################################################
|
||||
# TODO: partition specific
|
||||
# -- source include/have_partition.inc
|
||||
# Note: Will be done in different test due to NDB using this
|
||||
# test case.
|
||||
############################################################
|
||||
|
||||
########### Clean up ################
|
||||
--disable_warnings
|
||||
--disable_query_log
|
||||
DROP TABLE IF EXISTS t1,t2,t3,t4,t31;
|
||||
|
||||
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t10,t11,t12,t13,t14,t15,t16,t17,t18,t31;
|
||||
--enable_query_log
|
||||
--enable_warnings
|
||||
|
||||
@ -70,154 +76,87 @@ DROP TABLE IF EXISTS t1,t2,t3,t4,t31;
|
||||
#VARCHAR(M)
|
||||
#
|
||||
|
||||
|
||||
let $binformat = `SHOW VARIABLES LIKE '%binlog_format%'`;
|
||||
--echo
|
||||
--echo ***********************************************************
|
||||
--echo ***********************************************************
|
||||
--echo ***************** Start of Testing ************************
|
||||
--echo ***********************************************************
|
||||
--echo ***********************************************************
|
||||
--echo * This test format == $binformat and engine == $engine_type
|
||||
--echo ***********************************************************
|
||||
--echo ***********************************************************
|
||||
--echo
|
||||
--echo ***** Testing more columns on the Master *****
|
||||
--echo
|
||||
connection master;
|
||||
eval CREATE TABLE t1 (f1 INT, f2 INT, f3 INT PRIMARY KEY, f4 CHAR(20),
|
||||
f5 FLOAT DEFAULT '2.00')
|
||||
ENGINE=$engine_type;
|
||||
|
||||
sync_slave_with_master;
|
||||
alter table t1 drop f5;
|
||||
|
||||
connection master;
|
||||
INSERT into t1 values (1, 1, 1, 'first', 1.0);
|
||||
|
||||
sync_slave_with_master;
|
||||
--replace_column 1 # 4 # 7 # 8 # 9 # 22 # 23 # 33 #
|
||||
--query_vertical show slave status;
|
||||
select * from t1 order by f3;
|
||||
|
||||
connection master;
|
||||
DROP TABLE t1;
|
||||
eval CREATE TABLE t1 (f1 INT, f2 INT, f3 INT PRIMARY KEY, f4 CHAR(20),
|
||||
f5 FLOAT DEFAULT '2.00',
|
||||
f6 CHAR(4) DEFAULT 'TEST')
|
||||
ENGINE=$engine_type;
|
||||
|
||||
sync_slave_with_master;
|
||||
alter table t1 drop f5, drop f6;
|
||||
|
||||
connection master;
|
||||
INSERT into t1 values (1, 1, 1, 'first', 1.0, 'yksi');
|
||||
|
||||
sync_slave_with_master;
|
||||
--replace_column 1 # 4 # 7 # 8 # 9 # 22 # 23 # 33 #
|
||||
--query_vertical show slave status;
|
||||
select * from t1 order by f3;
|
||||
|
||||
connection master;
|
||||
DROP TABLE t1;
|
||||
eval CREATE TABLE t1 (f1 INT, f2 INT, f3 INT PRIMARY KEY, f4 CHAR(20),
|
||||
f5 FLOAT DEFAULT '2.00',
|
||||
f6 CHAR(4) DEFAULT 'TEST',
|
||||
f7 INT DEFAULT '0')
|
||||
ENGINE=$engine_type;
|
||||
|
||||
sync_slave_with_master;
|
||||
alter table t1 drop f5, drop f6, drop f7;
|
||||
|
||||
connection master;
|
||||
INSERT into t1 values (1, 1, 1, 'first', 1.0, 'yksi', 1);
|
||||
sync_slave_with_master;
|
||||
--replace_column 1 # 4 # 7 # 8 # 9 # 22 # 23 # 33 #
|
||||
--query_vertical show slave status;
|
||||
select * from t1 order by f3;
|
||||
|
||||
connection master;
|
||||
DROP TABLE t1;
|
||||
eval CREATE TABLE t1 (f1 INT, f2 INT, f3 INT PRIMARY KEY, f4 CHAR(20),
|
||||
f5 FLOAT DEFAULT '2.00',
|
||||
f6 CHAR(4) DEFAULT 'TEST',
|
||||
f7 INT DEFAULT '0',
|
||||
f8 TEXT)
|
||||
ENGINE=$engine_type;
|
||||
|
||||
sync_slave_with_master;
|
||||
alter table t1 drop f5, drop f6, drop f7, drop f8;
|
||||
|
||||
connection master;
|
||||
INSERT into t1 values (1, 1, 1, 'first', 1.0, 'yksi', 1, 'lounge of happiness');
|
||||
sync_slave_with_master;
|
||||
--replace_column 1 # 4 # 7 # 8 # 9 # 22 # 23 # 33 #
|
||||
--query_vertical show slave status;
|
||||
select * from t1 order by f3;
|
||||
|
||||
connection master;
|
||||
DROP TABLE t1;
|
||||
eval CREATE TABLE t1 (f1 INT, f2 INT, f3 INT PRIMARY KEY, f4 CHAR(20),
|
||||
f5 FLOAT DEFAULT '2.00',
|
||||
f6 CHAR(4) DEFAULT 'TEST',
|
||||
f7 INT DEFAULT '0',
|
||||
f8 TEXT,
|
||||
f9 LONGBLOB)
|
||||
ENGINE=$engine_type;
|
||||
|
||||
sync_slave_with_master;
|
||||
alter table t1 drop f5, drop f6, drop f7, drop f8, drop f9;
|
||||
|
||||
connection master;
|
||||
INSERT into t1 values (1, 1, 1, 'first', 1.0, 'yksi', 1, 'lounge of happiness', 'very fat blob');
|
||||
sync_slave_with_master;
|
||||
--replace_column 1 # 4 # 7 # 8 # 9 # 22 # 23 # 33 #
|
||||
--query_vertical show slave status;
|
||||
select * from t1 order by f3;
|
||||
|
||||
connection master;
|
||||
DROP TABLE t1;
|
||||
eval CREATE TABLE t1 (f1 INT, f2 INT, f3 INT PRIMARY KEY, f4 CHAR(20),
|
||||
f5 FLOAT DEFAULT '2.00',
|
||||
f6 CHAR(4) DEFAULT 'TEST',
|
||||
f7 INT DEFAULT '0',
|
||||
f8 TEXT,
|
||||
f9 LONGBLOB,
|
||||
f10 BIT(63))
|
||||
ENGINE=$engine_type;
|
||||
|
||||
sync_slave_with_master;
|
||||
alter table t1 drop f5, drop f6, drop f7, drop f8, drop f9, drop f10;
|
||||
|
||||
connection master;
|
||||
INSERT into t1 values (1, 1, 1, 'first', 1.0, 'yksi', 1, 'lounge of happiness', 'very fat blob', b'01010101010101');
|
||||
sync_slave_with_master;
|
||||
--replace_column 1 # 4 # 7 # 8 # 9 # 22 # 23 # 33 #
|
||||
--query_vertical show slave status;
|
||||
select * from t1 order by f3;
|
||||
|
||||
connection master;
|
||||
DROP TABLE t1;
|
||||
eval CREATE TABLE t1 (f1 INT, f2 INT, f3 INT PRIMARY KEY, f4 CHAR(20),
|
||||
/* extra */
|
||||
f5 FLOAT DEFAULT '2.00',
|
||||
f6 CHAR(4) DEFAULT 'TEST',
|
||||
f7 INT DEFAULT '0',
|
||||
f8 TEXT,
|
||||
f9 LONGBLOB,
|
||||
f10 BIT(63),
|
||||
f11 VARBINARY(64))
|
||||
ENGINE=$engine_type;
|
||||
|
||||
#connection slave;
|
||||
sync_slave_with_master;
|
||||
alter table t1 drop f5, drop f6, drop f7, drop f8, drop f9, drop f10, drop f11;
|
||||
f5 FLOAT DEFAULT '2.00',
|
||||
f6 CHAR(4) DEFAULT 'TEST',
|
||||
f7 INT DEFAULT '0',
|
||||
f8 TEXT,
|
||||
f9 LONGBLOB,
|
||||
f10 BIT(63),
|
||||
f11 VARBINARY(64))ENGINE=$engine_type;
|
||||
--echo
|
||||
--echo * Alter Table on Slave and drop columns f5 through f11 *
|
||||
--echo
|
||||
sync_slave_with_master;
|
||||
alter table t1 drop f5, drop f6, drop f7, drop f8, drop f9, drop f10, drop f11;
|
||||
|
||||
--echo
|
||||
--echo * Insert data in Master then update and delete some rows*
|
||||
--echo
|
||||
connection master;
|
||||
let $j= 50;
|
||||
--disable_query_log
|
||||
while ($j)
|
||||
{
|
||||
eval INSERT INTO t1 VALUES ($j, $j, $j, 'second', 2.0, 'kaks', 2,
|
||||
'got stolen from the paradise',
|
||||
'very fat blob', b'01010101010101',
|
||||
0x123456);
|
||||
dec $j;
|
||||
}
|
||||
let $j= 30;
|
||||
while ($j)
|
||||
{
|
||||
eval update t1 set f4= 'next' where f1=$j;
|
||||
dec $j;
|
||||
dec $j;
|
||||
eval delete from t1 where f1=$j;
|
||||
dec $j;
|
||||
}
|
||||
--enable_query_log
|
||||
|
||||
INSERT into t1 values (1, 1, 1, 'first', 1.0, 'yksi', 1, 'lounge of happiness', 'very fat blob', b'01010101010101', 0x123456);
|
||||
INSERT into t1 values (2, 2, 2, 'second', 2.0, 'kaks', 2, 'got stolen from the paradise', 'very fat blob', b'01010101010101', 0x123456), (3, 3, 3, 'third', 3.0, 'kolm', 3, 'got stolen from the paradise', 'very fat blob', b'01010101010101', 0x123456);
|
||||
update t1 set f4= 'next' where f1=1;
|
||||
delete from t1 where f1=1;
|
||||
|
||||
select * from t1 order by f3;
|
||||
|
||||
--echo * Select count and 20 rows from Master *
|
||||
--echo
|
||||
SELECT COUNT(*) FROM t1;
|
||||
--echo
|
||||
SELECT f1,f2,f3,f4,f5,f6,f7,f8,f9,
|
||||
hex(f10),hex(f11) FROM t1 ORDER BY f3 LIMIT 20;
|
||||
|
||||
#connection slave;
|
||||
sync_slave_with_master;
|
||||
--replace_column 1 # 4 # 7 # 8 # 9 # 22 # 23 # 33 #
|
||||
--query_vertical show slave status;
|
||||
select * from t1 order by f3;
|
||||
sync_slave_with_master;
|
||||
--echo
|
||||
--echo * Select count and 20 rows from Slave *
|
||||
--echo
|
||||
SELECT COUNT(*) FROM t1;
|
||||
--echo
|
||||
SELECT * FROM t1 ORDER BY f3 LIMIT 20;
|
||||
|
||||
--echo
|
||||
--echo * Show Slave Status *
|
||||
--echo
|
||||
--replace_column 1 # 4 # 7 # 8 # 9 # 22 # 23 # 33 # 35 # 36 #
|
||||
--query_vertical show slave status;
|
||||
--echo
|
||||
|
||||
### Altering table def scenario
|
||||
--echo
|
||||
--echo ***** Testing Altering table def scenario *****
|
||||
--echo
|
||||
|
||||
connection master;
|
||||
|
||||
@ -232,7 +171,7 @@ connection master;
|
||||
f11 BINARY(20) NOT NULL DEFAULT '\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0',
|
||||
f12 SET('a', 'b', 'c') default 'b')
|
||||
ENGINE=$engine_type;
|
||||
|
||||
--echo
|
||||
eval CREATE TABLE t3 (f1 INT, f2 INT, f3 INT PRIMARY KEY, f4 CHAR(20),
|
||||
/* extra */
|
||||
f5 DOUBLE DEFAULT '2.00',
|
||||
@ -243,7 +182,7 @@ connection master;
|
||||
f12 SET('a', 'b', 'c') default 'b')
|
||||
ENGINE=$engine_type;
|
||||
|
||||
|
||||
--echo
|
||||
# no ENUM and SET
|
||||
eval CREATE TABLE t4 (f1 INT, f2 INT, f3 INT PRIMARY KEY, f4 CHAR(20),
|
||||
/* extra */
|
||||
@ -256,7 +195,7 @@ connection master;
|
||||
f11 CHAR(255))
|
||||
ENGINE=$engine_type;
|
||||
|
||||
|
||||
--echo
|
||||
eval CREATE TABLE t31 (f1 INT, f2 INT, f3 INT PRIMARY KEY, f4 CHAR(20),
|
||||
|
||||
/* extra */
|
||||
@ -293,7 +232,9 @@ connection master;
|
||||
f34 VARBINARY(1025),
|
||||
f35 VARCHAR(257)
|
||||
) ENGINE=$engine_type;
|
||||
|
||||
--echo
|
||||
--echo ** Alter tables on slave and drop columns **
|
||||
--echo
|
||||
#connection slave;
|
||||
sync_slave_with_master;
|
||||
alter table t2 drop f5, drop f6, drop f7, drop f8, drop f9, drop f10, drop f11, drop
|
||||
@ -308,8 +249,8 @@ f12;
|
||||
drop f26, drop f27, drop f28, drop f29, drop f30, drop f31, drop f32,
|
||||
drop f33, drop f34, drop f35;
|
||||
|
||||
|
||||
|
||||
--echo
|
||||
--echo ** Insert Data into Master **
|
||||
connection master;
|
||||
INSERT into t2 set f1=1, f2=1, f3=1, f4='first', f8='f8: medium size blob', f10='f10:
|
||||
some var char';
|
||||
@ -458,7 +399,10 @@ binary data';
|
||||
/*f34 VARBINARY(1025),*/ '3333 minus 3',
|
||||
/*f35 VARCHAR(257),*/ NULL
|
||||
);
|
||||
|
||||
--echo
|
||||
--echo ** Sync slave with master **
|
||||
--echo ** Do selects from tables **
|
||||
--echo
|
||||
#connection slave;
|
||||
sync_slave_with_master;
|
||||
|
||||
@ -469,24 +413,33 @@ binary data';
|
||||
select * from t31 order by f1;
|
||||
|
||||
connection master;
|
||||
|
||||
--echo
|
||||
--echo ** Do updates master **
|
||||
--echo
|
||||
update t31 set f5=555555555555555 where f3=6;
|
||||
update t31 set f2=2 where f3=2;
|
||||
update t31 set f1=NULL where f3=1;
|
||||
update t31 set f3=NULL, f27=NULL, f35='f35 new value' where f3=3;
|
||||
|
||||
--echo
|
||||
--echo ** Delete from Master **
|
||||
--echo
|
||||
|
||||
delete from t1;
|
||||
delete from t2;
|
||||
delete from t3;
|
||||
delete from t4;
|
||||
delete from t31;
|
||||
|
||||
--echo
|
||||
--echo ** Check slave status **
|
||||
--echo
|
||||
#connection slave;
|
||||
sync_slave_with_master;
|
||||
select * from t31;
|
||||
|
||||
--replace_result $MASTER_MYPORT MASTER_PORT
|
||||
--replace_column 1 # 4 # 7 # 8 # 9 # 22 # 23 # 33 #
|
||||
--replace_column 1 # 4 # 7 # 8 # 9 # 22 # 23 # 33 # 35 # 36 #
|
||||
--query_vertical show slave status;
|
||||
|
||||
#### Clean Up ####
|
||||
@ -496,11 +449,600 @@ connection master;
|
||||
--disable_query_log
|
||||
DROP TABLE t1,t2,t3,t4,t31;
|
||||
|
||||
######################################################
|
||||
#connection slave;
|
||||
sync_slave_with_master;
|
||||
--enable_query_log
|
||||
--enable_warnings
|
||||
--echo
|
||||
--echo ****************************************
|
||||
--echo * columns in master at middle of table *
|
||||
--echo * Expect: Proper error message *
|
||||
--echo ****************************************
|
||||
--echo
|
||||
--echo ** Stop and Reset Slave **
|
||||
--echo
|
||||
STOP SLAVE;
|
||||
RESET SLAVE;
|
||||
--echo
|
||||
--echo ** create table slave side **
|
||||
eval CREATE TABLE t10 (a INT PRIMARY KEY, b BLOB, c CHAR(5)
|
||||
) ENGINE=$engine_type;
|
||||
|
||||
# END of the tests
|
||||
--echo
|
||||
--echo ** Connect to master and create table **
|
||||
--echo
|
||||
--connection master
|
||||
eval CREATE TABLE t10 (a INT KEY, b BLOB, f DOUBLE DEFAULT '233',
|
||||
c CHAR(5), e INT DEFAULT '1')ENGINE=$engine_type;
|
||||
RESET MASTER;
|
||||
|
||||
--echo
|
||||
--echo *** Start Slave ***
|
||||
connection slave;
|
||||
START SLAVE;
|
||||
|
||||
--echo
|
||||
--echo *** Master Data Insert ***
|
||||
connection master;
|
||||
set @b1 = 'b1b1b1b1';
|
||||
set @b1 = concat(@b1,@b1);
|
||||
INSERT INTO t10 () VALUES(1,@b1,DEFAULT,'Kyle',DEFAULT),
|
||||
(2,@b1,DEFAULT,'JOE',DEFAULT),
|
||||
(3,@b1,DEFAULT,'QA',DEFAULT);
|
||||
|
||||
--echo
|
||||
--echo ********************************************
|
||||
--echo *** Expect slave to fail with Error 1523 ***
|
||||
--echo ********************************************
|
||||
--echo
|
||||
connection slave;
|
||||
wait_for_slave_to_stop;
|
||||
--replace_result $MASTER_MYPORT MASTER_PORT
|
||||
--replace_column 1 # 4 # 7 # 8 # 9 # 22 # 23 # 33 # 35 # 36 #
|
||||
--query_vertical SHOW SLAVE STATUS
|
||||
SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2;
|
||||
START SLAVE;
|
||||
|
||||
--echo
|
||||
--echo *** Drop t10 ***
|
||||
connection master;
|
||||
DROP TABLE t10;
|
||||
sync_slave_with_master;
|
||||
|
||||
############################################
|
||||
############## Continued ###################
|
||||
############################################
|
||||
--echo
|
||||
--echo *********************************************
|
||||
--echo * More columns in master at middle of table *
|
||||
--echo * Expect: Proper error message *
|
||||
--echo *********************************************
|
||||
--echo
|
||||
--echo *** Create t11 on slave ***
|
||||
STOP SLAVE;
|
||||
RESET SLAVE;
|
||||
|
||||
eval CREATE TABLE t11 (a INT PRIMARY KEY, b BLOB, c VARCHAR(254)
|
||||
) ENGINE=$engine_type;
|
||||
|
||||
--echo
|
||||
--echo *** Create t11 on Master ***
|
||||
connection master;
|
||||
eval CREATE TABLE t11 (a INT KEY, b BLOB, f TEXT,
|
||||
c CHAR(5) DEFAULT 'test', e INT DEFAULT '1')ENGINE=$engine_type;
|
||||
|
||||
RESET MASTER;
|
||||
|
||||
--echo
|
||||
--echo *** Start Slave ***
|
||||
connection slave;
|
||||
START SLAVE;
|
||||
|
||||
--echo
|
||||
--echo *** Master Data Insert ***
|
||||
connection master;
|
||||
set @b1 = 'b1b1b1b1';
|
||||
set @b1 = concat(@b1,@b1);
|
||||
INSERT INTO t11 () VALUES(1,@b1,'Testing is fun','Kyle',DEFAULT),
|
||||
(2,@b1,'Testing is cool','JOE',DEFAULT),
|
||||
(3,@b1,DEFAULT,'QA',DEFAULT);
|
||||
|
||||
--echo
|
||||
--echo ********************************************
|
||||
--echo *** Expect slave to fail with Error 1523 ***
|
||||
--echo ********************************************
|
||||
--echo
|
||||
connection slave;
|
||||
wait_for_slave_to_stop;
|
||||
--replace_result $MASTER_MYPORT MASTER_PORT
|
||||
--replace_column 1 # 4 # 7 # 8 # 9 # 22 # 23 # 33 # 35 # 36 #
|
||||
--query_vertical SHOW SLAVE STATUS
|
||||
SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2;
|
||||
START SLAVE;
|
||||
|
||||
--echo
|
||||
--echo *** Drop t11 ***
|
||||
connection master;
|
||||
DROP TABLE t11;
|
||||
sync_slave_with_master;
|
||||
|
||||
############################################
|
||||
############## Continued ###################
|
||||
############################################
|
||||
--echo
|
||||
--echo *********************************************
|
||||
--echo * More columns in master at middle of table *
|
||||
--echo * Expect: This one should pass blob-text *
|
||||
--echo *********************************************
|
||||
--echo
|
||||
--echo *** Create t12 on slave ***
|
||||
STOP SLAVE;
|
||||
RESET SLAVE;
|
||||
eval CREATE TABLE t12 (a INT PRIMARY KEY, b BLOB, c BLOB
|
||||
) ENGINE=$engine_type;
|
||||
|
||||
--echo
|
||||
--echo *** Create t12 on Master ***
|
||||
connection master;
|
||||
eval CREATE TABLE t12 (a INT KEY, b BLOB, f TEXT,
|
||||
c CHAR(5) DEFAULT 'test', e INT DEFAULT '1')ENGINE=$engine_type;
|
||||
|
||||
RESET MASTER;
|
||||
|
||||
--echo
|
||||
--echo *** Start Slave ***
|
||||
connection slave;
|
||||
START SLAVE;
|
||||
|
||||
--echo
|
||||
--echo *** Master Data Insert ***
|
||||
connection master;
|
||||
set @b1 = 'b1b1b1b1';
|
||||
set @b1 = concat(@b1,@b1);
|
||||
INSERT INTO t12 () VALUES(1,@b1,'Kyle',DEFAULT,DEFAULT),
|
||||
(2,@b1,'JOE',DEFAULT,DEFAULT),
|
||||
(3,@b1,'QA',DEFAULT,DEFAULT);
|
||||
--echo
|
||||
SELECT a,hex(b),f,c,e FROM t12 ORDER BY a;
|
||||
|
||||
--echo
|
||||
--echo *** Select on Slave ***
|
||||
sync_slave_with_master;
|
||||
SELECT a,hex(b),c FROM t12 ORDER BY a;
|
||||
|
||||
--echo
|
||||
--echo *** Drop t12 ***
|
||||
connection master;
|
||||
DROP TABLE t12;
|
||||
sync_slave_with_master;
|
||||
|
||||
############################################
|
||||
############## Continued ###################
|
||||
############################################
|
||||
--echo
|
||||
--echo ****************************************************
|
||||
--echo * - Alter Master adding columns at middle of table *
|
||||
--echo * Expect: columns added *
|
||||
--echo ****************************************************
|
||||
--echo
|
||||
--echo
|
||||
--echo *** Create t14 on slave ***
|
||||
STOP SLAVE;
|
||||
RESET SLAVE;
|
||||
eval CREATE TABLE t14 (c1 INT PRIMARY KEY, c4 BLOB, c5 CHAR(5)
|
||||
) ENGINE=$engine_type;
|
||||
|
||||
--echo
|
||||
--echo *** Create t14 on Master ***
|
||||
connection master;
|
||||
eval CREATE TABLE t14 (c1 INT KEY, c4 BLOB, c5 CHAR(5),
|
||||
c6 INT DEFAULT '1',
|
||||
c7 TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP
|
||||
)ENGINE=$engine_type;
|
||||
|
||||
RESET MASTER;
|
||||
|
||||
--echo
|
||||
--echo *** Start Slave ***
|
||||
connection slave;
|
||||
START SLAVE;
|
||||
|
||||
--echo
|
||||
--echo *** Master Data Insert ***
|
||||
connection master;
|
||||
ALTER TABLE t14 ADD COLUMN c2 DECIMAL(8,2) AFTER c1;
|
||||
ALTER TABLE t14 ADD COLUMN c3 TEXT AFTER c2;
|
||||
--echo
|
||||
set @b1 = 'b1b1b1b1';
|
||||
set @b1 = concat(@b1,@b1);
|
||||
INSERT INTO t14 () VALUES(1,1.00,'Replication Testing Extra Col',@b1,'Kyle',DEFAULT,DEFAULT),
|
||||
(2,2.00,'This Test Should work',@b1,'JOE',DEFAULT,DEFAULT),
|
||||
(3,3.00,'If is does not, I will open a bug',@b1,'QA',DEFAULT,DEFAULT);
|
||||
--echo
|
||||
--replace_column 7 CURRENT_TIMESTAMP
|
||||
SELECT c1,c2,c3,hex(c4),c5,c6,c7 FROM t14 ORDER BY c1;
|
||||
|
||||
--echo
|
||||
--echo *** Select on Slave ****
|
||||
sync_slave_with_master;
|
||||
SELECT c1,c2,c3,hex(c4),c5 FROM t14 ORDER BY c1;
|
||||
|
||||
####################################################
|
||||
--echo
|
||||
--echo ****************************************************
|
||||
--echo * - Alter Master Dropping columns from the middle. *
|
||||
--echo * Expect: columns dropped *
|
||||
--echo ****************************************************
|
||||
--echo
|
||||
--echo *** connect to master and drop columns ***
|
||||
connection master;
|
||||
ALTER TABLE t14 DROP COLUMN c2;
|
||||
ALTER TABLE t14 DROP COLUMN c7;
|
||||
--echo
|
||||
--echo *** Select from Master ***
|
||||
SELECT c1,c3,hex(c4),c5,c6 FROM t14 ORDER BY c1;
|
||||
--echo
|
||||
|
||||
--echo ************
|
||||
--echo * Bug30415 *
|
||||
--echo ************
|
||||
# Uncomment below once fixed
|
||||
|
||||
#--echo *** Select from Slave ***
|
||||
#sync_slave_with_master;
|
||||
#SELECT c1,c2,c3,hex(c4),c5 FROM t14 ORDER BY c1;
|
||||
|
||||
# Bug30415
|
||||
# Remove below once fixed
|
||||
#***************************
|
||||
connection slave;
|
||||
wait_for_slave_to_stop;
|
||||
--replace_result $MASTER_MYPORT MASTER_PORT
|
||||
--replace_column 1 # 4 # 7 # 8 # 9 # 22 # 23 # 33 # 35 # 36 #
|
||||
--query_vertical SHOW SLAVE STATUS
|
||||
#***************************
|
||||
|
||||
STOP SLAVE;
|
||||
RESET SLAVE;
|
||||
|
||||
--echo
|
||||
--echo *** Drop t14 ***
|
||||
DROP TABLE t14;
|
||||
|
||||
connection master;
|
||||
DROP TABLE t14;
|
||||
RESET MASTER;
|
||||
|
||||
connection slave;
|
||||
START SLAVE;
|
||||
|
||||
#################################################
|
||||
--echo
|
||||
--echo *************************************************
|
||||
--echo * - Alter Master adding columns at end of table *
|
||||
--echo * Expect: Error 1054 *
|
||||
--echo *************************************************
|
||||
--echo
|
||||
--echo *** Create t15 on slave ***
|
||||
STOP SLAVE;
|
||||
RESET SLAVE;
|
||||
eval CREATE TABLE t15 (c1 INT PRIMARY KEY, c4 BLOB, c5 CHAR(5)
|
||||
) ENGINE=$engine_type;
|
||||
|
||||
--echo
|
||||
--echo *** Create t15 on Master ***
|
||||
connection master;
|
||||
eval CREATE TABLE t15 (c1 INT KEY, c4 BLOB, c5 CHAR(5),
|
||||
c6 INT DEFAULT '1',
|
||||
c7 TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP
|
||||
)ENGINE=$engine_type;
|
||||
|
||||
RESET MASTER;
|
||||
|
||||
--echo
|
||||
--echo *** Start Slave ***
|
||||
connection slave;
|
||||
START SLAVE;
|
||||
|
||||
--echo
|
||||
--echo *** Master Data Insert ***
|
||||
connection master;
|
||||
ALTER TABLE t15 ADD COLUMN c2 DECIMAL(8,2) AFTER c7;
|
||||
set @b1 = 'b1b1b1b1';
|
||||
set @b1 = concat(@b1,@b1);
|
||||
INSERT INTO t15 () VALUES(1,@b1,'Kyle',DEFAULT,DEFAULT,3.00),
|
||||
(2,@b1,'JOE',DEFAULT,DEFAULT,3.00),
|
||||
(3,@b1,'QA',DEFAULT,DEFAULT,3.00);
|
||||
--replace_column 5 CURRENT_TIMESTAMP
|
||||
SELECT c1,hex(c4),c5,c6,c7,c2 FROM t15 ORDER BY c1;
|
||||
|
||||
--echo
|
||||
--echo ********************************************
|
||||
--echo *** Expect slave to fail with Error 1054 ***
|
||||
--echo ********************************************
|
||||
--echo
|
||||
connection slave;
|
||||
wait_for_slave_to_stop;
|
||||
--replace_result $MASTER_MYPORT MASTER_PORT
|
||||
--replace_column 1 # 4 # 7 # 8 # 9 # 22 # 23 # 33 # 35 # 36 #
|
||||
--query_vertical SHOW SLAVE STATUS
|
||||
STOP SLAVE;
|
||||
RESET SLAVE;
|
||||
|
||||
--echo
|
||||
--echo *** Drop t15 ***
|
||||
DROP TABLE t15;
|
||||
|
||||
connection master;
|
||||
DROP TABLE t15;
|
||||
RESET MASTER;
|
||||
|
||||
connection slave;
|
||||
START SLAVE;
|
||||
|
||||
####################################################
|
||||
--echo
|
||||
--echo ************************************************
|
||||
--echo * - Create index on Master column not on slave *
|
||||
--echo * Expect:Warning *
|
||||
--echo ************************************************
|
||||
--echo
|
||||
--echo *** Create t16 on slave ***
|
||||
STOP SLAVE;
|
||||
RESET SLAVE;
|
||||
eval CREATE TABLE t16 (c1 INT PRIMARY KEY, c4 BLOB, c5 CHAR(5)
|
||||
) ENGINE=$engine_type;
|
||||
|
||||
--echo
|
||||
--echo *** Create t16 on Master ***
|
||||
connection master;
|
||||
eval CREATE TABLE t16 (c1 INT KEY, c4 BLOB, c5 CHAR(5),
|
||||
c6 INT DEFAULT '1',
|
||||
c7 TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP
|
||||
)ENGINE=$engine_type;
|
||||
|
||||
RESET MASTER;
|
||||
|
||||
--echo
|
||||
--echo *** Start Slave ***
|
||||
connection slave;
|
||||
START SLAVE;
|
||||
|
||||
--echo
|
||||
--echo *** Master Create Index and Data Insert ***
|
||||
connection master;
|
||||
CREATE INDEX part_of_c6 ON t16 (c6);
|
||||
set @b1 = 'b1b1b1b1';
|
||||
set @b1 = concat(@b1,@b1);
|
||||
INSERT INTO t16 () VALUES(1,@b1,'Kyle',DEFAULT,DEFAULT),
|
||||
(2,@b1,'JOE',2,DEFAULT),
|
||||
(3,@b1,'QA',3,DEFAULT);
|
||||
--replace_column 5 CURRENT_TIMESTAMP
|
||||
SELECT c1,hex(c4),c5,c6,c7 FROM t16 ORDER BY c1;
|
||||
|
||||
# Uncomment the below when bug 30434 is patched
|
||||
|
||||
#--echo *** Select on Slave ****
|
||||
#sync_slave_with_master;
|
||||
#SELECT c1,hex(c4),c5 FROM t16 ORDER BY c1;
|
||||
#
|
||||
#--echo *** Drop t16 ***
|
||||
#connection master;
|
||||
#DROP TABLE t16;
|
||||
#sync_slave_with_master;
|
||||
|
||||
# Remove the below when bug 30434 is patched
|
||||
#*******************************************
|
||||
--echo
|
||||
--echo *****************
|
||||
--echo *** BUG 30434 ***
|
||||
--echo *****************
|
||||
--echo
|
||||
connection slave;
|
||||
wait_for_slave_to_stop;
|
||||
--replace_result $MASTER_MYPORT MASTER_PORT
|
||||
--replace_column 1 # 4 # 7 # 8 # 9 # 22 # 23 # 33 # 35 # 36 #
|
||||
--query_vertical SHOW SLAVE STATUS
|
||||
STOP SLAVE;
|
||||
RESET SLAVE;
|
||||
|
||||
--echo
|
||||
--echo *** Drop t16 ***
|
||||
DROP TABLE t16;
|
||||
|
||||
connection master;
|
||||
DROP TABLE t16;
|
||||
RESET MASTER;
|
||||
|
||||
connection slave;
|
||||
START SLAVE;
|
||||
#*******************************************
|
||||
|
||||
####################################################
|
||||
--echo
|
||||
--echo *****************************************************
|
||||
--echo * - Delete rows using column on Master not on slave *
|
||||
--echo * Expect: Rows Deleted *
|
||||
--echo *****************************************************
|
||||
--echo
|
||||
--echo *** Create t17 on slave ***
|
||||
STOP SLAVE;
|
||||
RESET SLAVE;
|
||||
eval CREATE TABLE t17 (c1 INT PRIMARY KEY, c4 BLOB, c5 CHAR(5)
|
||||
) ENGINE=$engine_type;
|
||||
|
||||
--echo
|
||||
--echo *** Create t17 on Master ***
|
||||
connection master;
|
||||
eval CREATE TABLE t17 (c1 INT KEY, c4 BLOB, c5 CHAR(5),
|
||||
c6 INT DEFAULT '1',
|
||||
c7 TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP
|
||||
)ENGINE=$engine_type;
|
||||
|
||||
RESET MASTER;
|
||||
|
||||
--echo
|
||||
--echo *** Start Slave ***
|
||||
connection slave;
|
||||
START SLAVE;
|
||||
|
||||
--echo
|
||||
--echo *** Master Data Insert ***
|
||||
connection master;
|
||||
set @b1 = 'b1b1b1b1';
|
||||
set @b1 = concat(@b1,@b1);
|
||||
INSERT INTO t17 () VALUES(1,@b1,'Kyle',DEFAULT,DEFAULT),
|
||||
(2,@b1,'JOE',2,DEFAULT),
|
||||
(3,@b1,'QA',3,DEFAULT);
|
||||
--replace_column 5 CURRENT_TIMESTAMP
|
||||
SELECT c1,hex(c4),c5,c6,c7 FROM t17 ORDER BY c1;
|
||||
|
||||
--echo
|
||||
--echo ** Select * from Slave **
|
||||
sync_slave_with_master;
|
||||
SELECT c1,hex(c4),c5 FROM t17 ORDER BY c1;
|
||||
|
||||
--echo
|
||||
--echo ** Delete from master **
|
||||
connection master;
|
||||
DELETE FROM t17 WHERE c6 = 3;
|
||||
--replace_column 5 CURRENT_TIMESTAMP
|
||||
SELECT c1,hex(c4),c5,c6,c7 FROM t17 ORDER BY c1;
|
||||
|
||||
--echo
|
||||
--echo ** Check slave **
|
||||
sync_slave_with_master;
|
||||
SELECT c1,hex(c4),c5 FROM t17 ORDER BY c1;
|
||||
|
||||
|
||||
connection master;
|
||||
DROP TABLE t17;
|
||||
sync_slave_with_master;
|
||||
--echo
|
||||
|
||||
####################################################
|
||||
--echo
|
||||
--echo *****************************************************
|
||||
--echo * - Update row using column on Master not on slave *
|
||||
--echo * Expect: Rows updated *
|
||||
--echo *****************************************************
|
||||
--echo
|
||||
--echo ** Bug30674 **
|
||||
--echo
|
||||
--echo *** Create t18 on slave ***
|
||||
--echo
|
||||
|
||||
STOP SLAVE;
|
||||
RESET SLAVE;
|
||||
eval CREATE TABLE t18 (c1 INT PRIMARY KEY, c4 BLOB, c5 CHAR(5)
|
||||
) ENGINE=$engine_type;
|
||||
|
||||
--echo
|
||||
--echo *** Create t18 on Master ***
|
||||
connection master;
|
||||
eval CREATE TABLE t18 (c1 INT KEY, c4 BLOB, c5 CHAR(5),
|
||||
c6 INT DEFAULT '1',
|
||||
c7 TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP
|
||||
)ENGINE=$engine_type;
|
||||
|
||||
RESET MASTER;
|
||||
|
||||
--echo
|
||||
--echo *** Start Slave ***
|
||||
connection slave;
|
||||
START SLAVE;
|
||||
|
||||
--echo
|
||||
--echo *** Master Data Insert ***
|
||||
connection master;
|
||||
set @b1 = 'b1b1b1b1';
|
||||
set @b1 = concat(@b1,@b1);
|
||||
|
||||
INSERT INTO t18 () VALUES(1,@b1,'Kyle',DEFAULT,DEFAULT),
|
||||
(2,@b1,'JOE',2,DEFAULT),
|
||||
(3,@b1,'QA',3,DEFAULT);
|
||||
--replace_column 5 CURRENT_TIMESTAMP
|
||||
SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1;
|
||||
|
||||
--echo
|
||||
--echo ** Select * from Slave **
|
||||
sync_slave_with_master;
|
||||
SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1;
|
||||
|
||||
--echo
|
||||
--echo ** update from master **
|
||||
connection master;
|
||||
#######################################
|
||||
# This test should be uncommented
|
||||
# once bug30674 is patched
|
||||
#######################################
|
||||
|
||||
#***************************
|
||||
#UPDATE t18 SET c5 = 'TEST' WHERE c6 = 3;
|
||||
#***************************
|
||||
|
||||
--replace_column 5 CURRENT_TIMESTAMP
|
||||
SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1;
|
||||
|
||||
--echo
|
||||
--echo ** Check slave **
|
||||
sync_slave_with_master;
|
||||
SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1;
|
||||
|
||||
connection master;
|
||||
DROP TABLE t18;
|
||||
sync_slave_with_master;
|
||||
--echo
|
||||
|
||||
####################################################
|
||||
--echo
|
||||
--echo *****************************************************
|
||||
--echo * - Insert UUID column on Master not on slave *
|
||||
--echo * Expect: Rows inserted *
|
||||
--echo *****************************************************
|
||||
--echo
|
||||
--echo *** Create t5 on slave ***
|
||||
STOP SLAVE;
|
||||
RESET SLAVE;
|
||||
eval CREATE TABLE t5 (c1 INT PRIMARY KEY, c4 BLOB, c5 CHAR(5)
|
||||
) ENGINE=$engine_type;
|
||||
|
||||
--echo
|
||||
--echo *** Create t5 on Master ***
|
||||
connection master;
|
||||
eval CREATE TABLE t5 (c1 INT KEY, c4 BLOB, c5 CHAR(5),
|
||||
c6 LONG,
|
||||
c7 TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP
|
||||
)ENGINE=$engine_type;
|
||||
|
||||
RESET MASTER;
|
||||
|
||||
--echo
|
||||
--echo *** Start Slave ***
|
||||
connection slave;
|
||||
START SLAVE;
|
||||
|
||||
--echo
|
||||
--echo *** Master Data Insert ***
|
||||
connection master;
|
||||
set @b1 = 'b1b1b1b1';
|
||||
INSERT INTO t5 () VALUES(1,@b1,'Kyle',UUID(),DEFAULT),
|
||||
(2,@b1,'JOE',UUID(),DEFAULT),
|
||||
(3,@b1,'QA',UUID(),DEFAULT);
|
||||
--replace_column 4 UUID 5 TIME
|
||||
SELECT c1,hex(c4),c5,c6,c7 FROM t5 ORDER BY c1;
|
||||
|
||||
--echo
|
||||
--echo ** Select * from Slave **
|
||||
sync_slave_with_master;
|
||||
SELECT c1,hex(c4),c5 FROM t5 ORDER BY c1;
|
||||
|
||||
connection master;
|
||||
DROP TABLE t5;
|
||||
sync_slave_with_master;
|
||||
--echo
|
||||
|
||||
# END of 5.1 tests case
|
||||
|
||||
|
||||
|
@ -38,7 +38,7 @@ show create database mysqltest3;
|
||||
|
||||
connection master;
|
||||
use mysqltest2;
|
||||
create table t1 (a int auto_increment primary key, b varchar(100));
|
||||
--eval create table t1 (a int auto_increment primary key, b varchar(100))engine=$engine_type;
|
||||
set character_set_client=cp850, collation_connection=latin2_croatian_ci;
|
||||
insert into t1 (b) values(@@character_set_server);
|
||||
insert into t1 (b) values(@@collation_server);
|
||||
@ -146,13 +146,15 @@ set collation_server=9999998;
|
||||
|
||||
select "--- --3943--" as "";
|
||||
use test;
|
||||
CREATE TABLE t1 (c1 VARBINARY(255), c2 VARBINARY(255));
|
||||
--eval CREATE TABLE t1 (c1 VARBINARY(255), c2 VARBINARY(255))ENGINE=$engine_type;
|
||||
SET CHARACTER_SET_CLIENT=koi8r,
|
||||
CHARACTER_SET_CONNECTION=cp1251,
|
||||
CHARACTER_SET_RESULTS=koi8r;
|
||||
INSERT INTO t1 (c1, c2) VALUES ('îÕ, ÚÁ ÒÙÂÁÌËÕ','îÕ, ÚÁ ÒÙÂÁÌËÕ');
|
||||
SET SQL_BIG_SELECTS=1;
|
||||
select hex(c1), hex(c2) from t1;
|
||||
sync_slave_with_master;
|
||||
SET SQL_BIG_SELECTS=1;
|
||||
select hex(c1), hex(c2) from t1;
|
||||
|
||||
connection master;
|
||||
|
@ -1888,5 +1888,27 @@ set engine_condition_pushdown = 1;
|
||||
SELECT fname, lname FROM t1 WHERE (fname like 'Y%') or (lname like 'F%');
|
||||
fname lname
|
||||
Young Foo
|
||||
drop table t1;
|
||||
create table t1 (a int, b int, c int, d int, primary key using hash(a))
|
||||
engine=ndbcluster;
|
||||
insert into t1 values (10,1,100,0+0x1111);
|
||||
insert into t1 values (20,2,200,0+0x2222);
|
||||
insert into t1 values (30,3,300,0+0x3333);
|
||||
insert into t1 values (40,4,400,0+0x4444);
|
||||
insert into t1 values (50,5,500,0+0x5555);
|
||||
set engine_condition_pushdown = on;
|
||||
select a,b,d from t1
|
||||
where b in (0,1,2,5)
|
||||
order by b;
|
||||
a b d
|
||||
10 1 4369
|
||||
20 2 8738
|
||||
50 5 21845
|
||||
a b d
|
||||
10 1 4369
|
||||
20 2 8738
|
||||
50 5 21845
|
||||
Warnings:
|
||||
Warning 4294 Scan filter is too large, discarded
|
||||
set engine_condition_pushdown = @old_ecpd;
|
||||
DROP TABLE t1,t2,t3,t4,t5;
|
||||
|
11
mysql-test/suite/ndb/r/ndb_lock_table.result
Normal file
11
mysql-test/suite/ndb/r/ndb_lock_table.result
Normal file
@ -0,0 +1,11 @@
|
||||
drop table if exists t1;
|
||||
create table t1 (a int) engine ndb;
|
||||
set autocommit=1;
|
||||
lock table t1 write;
|
||||
set autocommit=0;
|
||||
insert into t1 values (0);
|
||||
rollback;
|
||||
select * from t1;
|
||||
a
|
||||
unlock tables;
|
||||
drop table t1;
|
@ -121,3 +121,24 @@ show tables;
|
||||
Tables_in_db
|
||||
t2
|
||||
drop database db;
|
||||
use test;
|
||||
create table `test`.`t1$EX`
|
||||
(server_id int unsigned,
|
||||
master_server_id int unsigned,
|
||||
master_epoch bigint unsigned,
|
||||
count int unsigned,
|
||||
primary key(server_id, master_server_id,
|
||||
master_epoch, count))
|
||||
engine ndb;
|
||||
show tables like '%$%';
|
||||
Tables_in_test (%$%)
|
||||
t1$EX
|
||||
use test;
|
||||
show tables like '%$%';
|
||||
Tables_in_test (%$%)
|
||||
t1$EX
|
||||
drop table `test`.`t1$EX`;
|
||||
show tables like '%$%';
|
||||
Tables_in_test (%$%)
|
||||
show tables like '%$%';
|
||||
Tables_in_test (%$%)
|
||||
|
@ -41,6 +41,14 @@ pk1 b c
|
||||
10 0 0
|
||||
12 2 2
|
||||
14 1 1
|
||||
create unique index ib on t1(b);
|
||||
update t1 set c = 4 where pk1 = 12;
|
||||
update ignore t1 set b = 55 where pk1 = 14;
|
||||
select * from t1 order by pk1;
|
||||
pk1 b c
|
||||
10 0 0
|
||||
12 2 4
|
||||
14 55 1
|
||||
DROP TABLE IF EXISTS t1;
|
||||
CREATE TABLE t1 (a int, b int, KEY (a, b)) ENGINE=ndbcluster;
|
||||
CREATE TABLE t2 (a int, b int, UNIQUE KEY (a, b)) ENGINE=ndbcluster;
|
||||
|
75
mysql-test/suite/ndb/r/ndb_update_no_read.result
Normal file
75
mysql-test/suite/ndb/r/ndb_update_no_read.result
Normal file
@ -0,0 +1,75 @@
|
||||
DROP TABLE IF EXISTS t1;
|
||||
create table t1 (a int not null primary key, b int not null, c int,
|
||||
unique index_b (b) using hash)
|
||||
engine ndb;
|
||||
insert into t1 values (1,10,1),(2,9,1),(3,8,1),(4,7,1),(5,6,1),(6,5,2),(7,4,2),(8,3,2),
|
||||
(9,2,2),(10,1,2);
|
||||
update t1 set c = 111, b = 20 where a = 1;
|
||||
select * from t1 where a = 1 order by a;
|
||||
a b c
|
||||
1 20 111
|
||||
delete from t1 where a = 1;
|
||||
select * from t1 where a = 1 order by a;
|
||||
a b c
|
||||
update t1 set c = 12, b = 19 where b = 2;
|
||||
select * from t1 where b = 2 order by a;
|
||||
a b c
|
||||
delete from t1 where b = 19;
|
||||
select * from t1 where b = 19 order by a;
|
||||
a b c
|
||||
update t1 set c = 22 where a = 10 or a >= 10;
|
||||
select * from t1 order by a;
|
||||
a b c
|
||||
2 9 1
|
||||
3 8 1
|
||||
4 7 1
|
||||
5 6 1
|
||||
6 5 2
|
||||
7 4 2
|
||||
8 3 2
|
||||
10 1 22
|
||||
update t1 set c = 23 where a in (8,10);
|
||||
select * from t1 order by a;
|
||||
a b c
|
||||
2 9 1
|
||||
3 8 1
|
||||
4 7 1
|
||||
5 6 1
|
||||
6 5 2
|
||||
7 4 2
|
||||
8 3 23
|
||||
10 1 23
|
||||
update t1 set c = 23 where a in (7,8) or a >= 10;
|
||||
select * from t1 order by a;
|
||||
a b c
|
||||
2 9 1
|
||||
3 8 1
|
||||
4 7 1
|
||||
5 6 1
|
||||
6 5 2
|
||||
7 4 23
|
||||
8 3 23
|
||||
10 1 23
|
||||
update t1 set c = 11 where a = 3 or b = 7;
|
||||
select * from t1 where a = 3 or b = 7 order by a;
|
||||
a b c
|
||||
3 8 11
|
||||
4 7 11
|
||||
update t1 set a = 13, b = 20 where a = 3;
|
||||
select * from t1 where a = 13 order by a;
|
||||
a b c
|
||||
13 20 11
|
||||
update t1 set a = 12, b = 19 where b = 7;
|
||||
select * from t1 where b = 19 order by a;
|
||||
a b c
|
||||
12 19 11
|
||||
select * from t1 where b = 7 order by a;
|
||||
a b c
|
||||
update t1 set c = 12, b = 29 where a = 5 and b = 6;
|
||||
select * from t1 where b = 19 order by a;
|
||||
a b c
|
||||
12 19 11
|
||||
delete from t1 where b = 6 and c = 12;
|
||||
select * from t1 where b = 6 order by a;
|
||||
a b c
|
||||
drop table t1;
|
@ -9,9 +9,6 @@
|
||||
# Do not use any TAB characters for whitespace.
|
||||
#
|
||||
##############################################################################
|
||||
ndb_dd_sql_features : Bug#29102 ndb_dd_sql_features fails in pushbuild
|
||||
ndb_load : BUG#17233 2006-05-04 tomas failed load data from infile causes mysqld dbug_assert, binlog not flushed
|
||||
|
||||
partition_03ndb : BUG#16385 2006-03-24 mikael Partitions: crash when updating a range partitioned NDB table
|
||||
|
||||
ndb_partition_error2 : HF is not sure if the test can work as internded on all the platforms
|
||||
|
File diff suppressed because it is too large
Load Diff
15
mysql-test/suite/ndb/t/ndb_lock_table.test
Normal file
15
mysql-test/suite/ndb/t/ndb_lock_table.test
Normal file
@ -0,0 +1,15 @@
|
||||
-- source include/have_ndb.inc
|
||||
|
||||
# BUG 30996
|
||||
--disable_warnings
|
||||
drop table if exists t1;
|
||||
--enable_warnings
|
||||
create table t1 (a int) engine ndb;
|
||||
set autocommit=1;
|
||||
lock table t1 write;
|
||||
set autocommit=0;
|
||||
insert into t1 values (0);
|
||||
rollback;
|
||||
select * from t1;
|
||||
unlock tables;
|
||||
drop table t1;
|
@ -122,4 +122,31 @@ connection server2;
|
||||
show tables;
|
||||
drop database db;
|
||||
|
||||
|
||||
#
|
||||
# bug#31470, ndb table with special characters in name
|
||||
# are not discovered correctly
|
||||
connection server1;
|
||||
use test;
|
||||
create table `test`.`t1$EX`
|
||||
(server_id int unsigned,
|
||||
master_server_id int unsigned,
|
||||
master_epoch bigint unsigned,
|
||||
count int unsigned,
|
||||
primary key(server_id, master_server_id,
|
||||
master_epoch, count))
|
||||
engine ndb;
|
||||
|
||||
# check that table shows up ok on both servers
|
||||
# before bugfix table would not show up on server2
|
||||
show tables like '%$%';
|
||||
connection server2;
|
||||
use test;
|
||||
show tables like '%$%';
|
||||
|
||||
# check cleanup
|
||||
drop table `test`.`t1$EX`;
|
||||
show tables like '%$%';
|
||||
|
||||
connection server1;
|
||||
show tables like '%$%';
|
||||
|
@ -35,6 +35,11 @@ UPDATE IGNORE t1 set pk1 = 1, c = 2 where pk1 = 4;
|
||||
select * from t1 order by pk1;
|
||||
UPDATE t1 set pk1 = pk1 + 10;
|
||||
select * from t1 order by pk1;
|
||||
# bug#25817
|
||||
create unique index ib on t1(b);
|
||||
update t1 set c = 4 where pk1 = 12;
|
||||
update ignore t1 set b = 55 where pk1 = 14;
|
||||
select * from t1 order by pk1;
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1;
|
||||
|
79
mysql-test/suite/ndb/t/ndb_update_no_read.test
Normal file
79
mysql-test/suite/ndb/t/ndb_update_no_read.test
Normal file
@ -0,0 +1,79 @@
|
||||
-- source include/have_ndb.inc
|
||||
-- source include/not_embedded.inc
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1;
|
||||
--enable_warnings
|
||||
|
||||
#
|
||||
# New test case for WL 3686 (which is not until CGE-6.3)
|
||||
# but test is committed in 5.1 to verify consistant results.
|
||||
#
|
||||
# When only constant expressions in update statements and
|
||||
# only PK or UK in WHERE clause. No extra WHERE parts are
|
||||
# allowed. WL #3687 takes of more advanced variants of
|
||||
# avoiding the read before the update/delete
|
||||
|
||||
create table t1 (a int not null primary key, b int not null, c int,
|
||||
unique index_b (b) using hash)
|
||||
engine ndb;
|
||||
|
||||
insert into t1 values (1,10,1),(2,9,1),(3,8,1),(4,7,1),(5,6,1),(6,5,2),(7,4,2),(8,3,2),
|
||||
(9,2,2),(10,1,2);
|
||||
|
||||
# These ones should use optimisation
|
||||
|
||||
update t1 set c = 111, b = 20 where a = 1;
|
||||
|
||||
select * from t1 where a = 1 order by a;
|
||||
|
||||
delete from t1 where a = 1;
|
||||
|
||||
select * from t1 where a = 1 order by a;
|
||||
|
||||
update t1 set c = 12, b = 19 where b = 2;
|
||||
|
||||
select * from t1 where b = 2 order by a;
|
||||
|
||||
delete from t1 where b = 19;
|
||||
|
||||
select * from t1 where b = 19 order by a;
|
||||
|
||||
update t1 set c = 22 where a = 10 or a >= 10;
|
||||
|
||||
select * from t1 order by a;
|
||||
|
||||
update t1 set c = 23 where a in (8,10);
|
||||
|
||||
select * from t1 order by a;
|
||||
|
||||
update t1 set c = 23 where a in (7,8) or a >= 10;
|
||||
|
||||
select * from t1 order by a;
|
||||
|
||||
# These ones should not use optimisation
|
||||
|
||||
update t1 set c = 11 where a = 3 or b = 7;
|
||||
|
||||
select * from t1 where a = 3 or b = 7 order by a;
|
||||
|
||||
update t1 set a = 13, b = 20 where a = 3;
|
||||
|
||||
select * from t1 where a = 13 order by a;
|
||||
|
||||
update t1 set a = 12, b = 19 where b = 7;
|
||||
|
||||
select * from t1 where b = 19 order by a;
|
||||
|
||||
select * from t1 where b = 7 order by a;
|
||||
|
||||
update t1 set c = 12, b = 29 where a = 5 and b = 6;
|
||||
|
||||
select * from t1 where b = 19 order by a;
|
||||
|
||||
delete from t1 where b = 6 and c = 12;
|
||||
|
||||
select * from t1 where b = 6 order by a;
|
||||
|
||||
drop table t1;
|
||||
|
Binary file not shown.
Binary file not shown.
@ -40,7 +40,7 @@ show create database mysqltest3;
|
||||
Database Create Database
|
||||
mysqltest3 CREATE DATABASE `mysqltest3` /*!40100 DEFAULT CHARACTER SET armscii8 COLLATE armscii8_bin */
|
||||
use mysqltest2;
|
||||
create table t1 (a int auto_increment primary key, b varchar(100));
|
||||
create table t1 (a int auto_increment primary key, b varchar(100))engine=myisam;;
|
||||
set character_set_client=cp850, collation_connection=latin2_croatian_ci;
|
||||
insert into t1 (b) values(@@character_set_server);
|
||||
insert into t1 (b) values(@@collation_server);
|
||||
@ -117,7 +117,7 @@ master-bin.000001 # Query # # create database mysqltest2 character set latin2
|
||||
master-bin.000001 # Query # # create database mysqltest3
|
||||
master-bin.000001 # Query # # drop database mysqltest3
|
||||
master-bin.000001 # Query # # create database mysqltest3
|
||||
master-bin.000001 # Query # # use `mysqltest2`; create table t1 (a int auto_increment primary key, b varchar(100))
|
||||
master-bin.000001 # Query # # use `mysqltest2`; create table t1 (a int auto_increment primary key, b varchar(100))engine=myisam
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
@ -177,14 +177,16 @@ select "--- --3943--" as "";
|
||||
|
||||
--- --3943--
|
||||
use test;
|
||||
CREATE TABLE t1 (c1 VARBINARY(255), c2 VARBINARY(255));
|
||||
CREATE TABLE t1 (c1 VARBINARY(255), c2 VARBINARY(255))ENGINE=myisam;;
|
||||
SET CHARACTER_SET_CLIENT=koi8r,
|
||||
CHARACTER_SET_CONNECTION=cp1251,
|
||||
CHARACTER_SET_RESULTS=koi8r;
|
||||
INSERT INTO t1 (c1, c2) VALUES ('îÕ, ÚÁ ÒÙÂÁÌËÕ','îÕ, ÚÁ ÒÙÂÁÌËÕ');
|
||||
SET SQL_BIG_SELECTS=1;
|
||||
select hex(c1), hex(c2) from t1;
|
||||
hex(c1) hex(c2)
|
||||
CDF32C20E7E020F0FBE1E0EBEAF3 CDF32C20E7E020F0FBE1E0EBEAF3
|
||||
SET SQL_BIG_SELECTS=1;
|
||||
select hex(c1), hex(c2) from t1;
|
||||
hex(c1) hex(c2)
|
||||
CDF32C20E7E020F0FBE1E0EBEAF3 CDF32C20E7E020F0FBE1E0EBEAF3
|
||||
|
215
mysql-test/suite/rpl/r/rpl_row_charset_innodb.result
Normal file
215
mysql-test/suite/rpl/r/rpl_row_charset_innodb.result
Normal file
@ -0,0 +1,215 @@
|
||||
stop slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
reset master;
|
||||
reset slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
start slave;
|
||||
set timestamp=1000000000;
|
||||
drop database if exists mysqltest2;
|
||||
drop database if exists mysqltest3;
|
||||
create database mysqltest2 character set latin2;
|
||||
set @@character_set_server=latin5;
|
||||
create database mysqltest3;
|
||||
|
||||
--- --master--
|
||||
show create database mysqltest2;
|
||||
Database Create Database
|
||||
mysqltest2 CREATE DATABASE `mysqltest2` /*!40100 DEFAULT CHARACTER SET latin2 */
|
||||
show create database mysqltest3;
|
||||
Database Create Database
|
||||
mysqltest3 CREATE DATABASE `mysqltest3` /*!40100 DEFAULT CHARACTER SET latin5 */
|
||||
|
||||
--- --slave--
|
||||
show create database mysqltest2;
|
||||
Database Create Database
|
||||
mysqltest2 CREATE DATABASE `mysqltest2` /*!40100 DEFAULT CHARACTER SET latin2 */
|
||||
show create database mysqltest3;
|
||||
Database Create Database
|
||||
mysqltest3 CREATE DATABASE `mysqltest3` /*!40100 DEFAULT CHARACTER SET latin5 */
|
||||
set @@collation_server=armscii8_bin;
|
||||
drop database mysqltest3;
|
||||
create database mysqltest3;
|
||||
|
||||
--- --master--
|
||||
show create database mysqltest3;
|
||||
Database Create Database
|
||||
mysqltest3 CREATE DATABASE `mysqltest3` /*!40100 DEFAULT CHARACTER SET armscii8 COLLATE armscii8_bin */
|
||||
|
||||
--- --slave--
|
||||
show create database mysqltest3;
|
||||
Database Create Database
|
||||
mysqltest3 CREATE DATABASE `mysqltest3` /*!40100 DEFAULT CHARACTER SET armscii8 COLLATE armscii8_bin */
|
||||
use mysqltest2;
|
||||
create table t1 (a int auto_increment primary key, b varchar(100))engine=innodb;;
|
||||
set character_set_client=cp850, collation_connection=latin2_croatian_ci;
|
||||
insert into t1 (b) values(@@character_set_server);
|
||||
insert into t1 (b) values(@@collation_server);
|
||||
insert into t1 (b) values(@@character_set_client);
|
||||
insert into t1 (b) values(@@character_set_connection);
|
||||
insert into t1 (b) values(@@collation_connection);
|
||||
|
||||
--- --master--
|
||||
select * from t1 order by a;
|
||||
a b
|
||||
1 armscii8
|
||||
2 armscii8_bin
|
||||
3 cp850
|
||||
4 latin2
|
||||
5 latin2_croatian_ci
|
||||
|
||||
--- --slave--
|
||||
select * from mysqltest2.t1 order by a;
|
||||
a b
|
||||
1 armscii8
|
||||
2 armscii8_bin
|
||||
3 cp850
|
||||
4 latin2
|
||||
5 latin2_croatian_ci
|
||||
select "--- --muller--" as "";
|
||||
|
||||
--- --muller--
|
||||
set character_set_client=latin1, collation_connection=latin1_german1_ci;
|
||||
truncate table t1;
|
||||
insert into t1 (b) values(@@collation_connection);
|
||||
insert into t1 (b) values(LEAST("Müller","Muffler"));
|
||||
set collation_connection=latin1_german2_ci;
|
||||
insert into t1 (b) values(@@collation_connection);
|
||||
insert into t1 (b) values(LEAST("Müller","Muffler"));
|
||||
|
||||
--- --master--
|
||||
select * from t1 order by a;
|
||||
a b
|
||||
1 latin1_german1_ci
|
||||
2 Muffler
|
||||
3 latin1_german2_ci
|
||||
4 Müller
|
||||
|
||||
--- --slave--
|
||||
select * from mysqltest2.t1 order by a;
|
||||
a b
|
||||
1 latin1_german1_ci
|
||||
2 Muffler
|
||||
3 latin1_german2_ci
|
||||
4 Müller
|
||||
select "--- --INSERT--" as "";
|
||||
|
||||
--- --INSERT--
|
||||
set @a= _cp850 'Müller' collate cp850_general_ci;
|
||||
truncate table t1;
|
||||
insert into t1 (b) values(collation(@a));
|
||||
|
||||
--- --master--
|
||||
select * from t1 order by a;
|
||||
a b
|
||||
1 cp850_general_ci
|
||||
|
||||
--- --slave--
|
||||
select * from mysqltest2.t1 order by a;
|
||||
a b
|
||||
1 cp850_general_ci
|
||||
drop database mysqltest2;
|
||||
drop database mysqltest3;
|
||||
show binlog events from <binlog_start>;
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000001 # Query # # drop database if exists mysqltest2
|
||||
master-bin.000001 # Query # # drop database if exists mysqltest3
|
||||
master-bin.000001 # Query # # create database mysqltest2 character set latin2
|
||||
master-bin.000001 # Query # # create database mysqltest3
|
||||
master-bin.000001 # Query # # drop database mysqltest3
|
||||
master-bin.000001 # Query # # create database mysqltest3
|
||||
master-bin.000001 # Query # # use `mysqltest2`; create table t1 (a int auto_increment primary key, b varchar(100))engine=innodb
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Xid # # COMMIT /* XID */
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Xid # # COMMIT /* XID */
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Xid # # COMMIT /* XID */
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Xid # # COMMIT /* XID */
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Xid # # COMMIT /* XID */
|
||||
master-bin.000001 # Query # # use `mysqltest2`; truncate table t1
|
||||
master-bin.000001 # Xid # # COMMIT /* XID */
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Xid # # COMMIT /* XID */
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Xid # # COMMIT /* XID */
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Xid # # COMMIT /* XID */
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Xid # # COMMIT /* XID */
|
||||
master-bin.000001 # Query # # use `mysqltest2`; truncate table t1
|
||||
master-bin.000001 # Xid # # COMMIT /* XID */
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Xid # # COMMIT /* XID */
|
||||
master-bin.000001 # Query # # drop database mysqltest2
|
||||
master-bin.000001 # Query # # drop database mysqltest3
|
||||
select "--- --global--" as "";
|
||||
|
||||
--- --global--
|
||||
set global character_set_server=latin2;
|
||||
set global character_set_server=latin1;
|
||||
set global character_set_server=latin2;
|
||||
set global character_set_server=latin1;
|
||||
select "--- --oneshot--" as "";
|
||||
|
||||
--- --oneshot--
|
||||
set one_shot @@character_set_server=latin5;
|
||||
set @@max_join_size=1000;
|
||||
select @@character_set_server;
|
||||
@@character_set_server
|
||||
latin5
|
||||
select @@character_set_server;
|
||||
@@character_set_server
|
||||
latin1
|
||||
set @@character_set_server=latin5;
|
||||
select @@character_set_server;
|
||||
@@character_set_server
|
||||
latin5
|
||||
select @@character_set_server;
|
||||
@@character_set_server
|
||||
latin5
|
||||
set one_shot max_join_size=10;
|
||||
ERROR HY000: The 'SET ONE_SHOT' syntax is reserved for purposes internal to the MySQL server
|
||||
set character_set_client=9999999;
|
||||
ERROR 42000: Unknown character set: '9999999'
|
||||
set collation_server=9999998;
|
||||
ERROR HY000: Unknown collation: '9999998'
|
||||
select "--- --3943--" as "";
|
||||
|
||||
--- --3943--
|
||||
use test;
|
||||
CREATE TABLE t1 (c1 VARBINARY(255), c2 VARBINARY(255))ENGINE=innodb;;
|
||||
SET CHARACTER_SET_CLIENT=koi8r,
|
||||
CHARACTER_SET_CONNECTION=cp1251,
|
||||
CHARACTER_SET_RESULTS=koi8r;
|
||||
INSERT INTO t1 (c1, c2) VALUES ('îÕ, ÚÁ ÒÙÂÁÌËÕ','îÕ, ÚÁ ÒÙÂÁÌËÕ');
|
||||
SET SQL_BIG_SELECTS=1;
|
||||
select hex(c1), hex(c2) from t1;
|
||||
hex(c1) hex(c2)
|
||||
CDF32C20E7E020F0FBE1E0EBEAF3 CDF32C20E7E020F0FBE1E0EBEAF3
|
||||
SET SQL_BIG_SELECTS=1;
|
||||
select hex(c1), hex(c2) from t1;
|
||||
hex(c1) hex(c2)
|
||||
CDF32C20E7E020F0FBE1E0EBEAF3 CDF32C20E7E020F0FBE1E0EBEAF3
|
||||
drop table t1;
|
||||
select "--- --6676--" as "";
|
||||
|
||||
--- --6676--
|
||||
create table `t1` (
|
||||
`pk` varchar(10) not null default '',
|
||||
primary key (`pk`)
|
||||
) engine=innodb default charset=latin1;
|
||||
set @p=_latin1 'test';
|
||||
update t1 set pk='test' where pk=@p;
|
||||
drop table t1;
|
Binary file not shown.
@ -12,4 +12,5 @@ set binlog_format=row;
|
||||
set binlog_format=statement;
|
||||
-- source extra/rpl_tests/rpl_extraMaster_Col.test
|
||||
|
||||
|
||||
set binlog_format=mixed;
|
||||
-- source extra/rpl_tests/rpl_extraMaster_Col.test
|
||||
|
@ -11,3 +11,5 @@ set binlog_format=row;
|
||||
set binlog_format=statement;
|
||||
-- source extra/rpl_tests/rpl_extraMaster_Col.test
|
||||
|
||||
set binlog_format=mixed;
|
||||
-- source extra/rpl_tests/rpl_extraMaster_Col.test
|
||||
|
1
mysql-test/suite/rpl/t/rpl_row_charset_innodb-master.opt
Normal file
1
mysql-test/suite/rpl/t/rpl_row_charset_innodb-master.opt
Normal file
@ -0,0 +1 @@
|
||||
--innodb
|
1
mysql-test/suite/rpl/t/rpl_row_charset_innodb-slave.opt
Normal file
1
mysql-test/suite/rpl/t/rpl_row_charset_innodb-slave.opt
Normal file
@ -0,0 +1 @@
|
||||
--innodb
|
9
mysql-test/suite/rpl/t/rpl_row_charset_innodb.test
Normal file
9
mysql-test/suite/rpl/t/rpl_row_charset_innodb.test
Normal file
@ -0,0 +1,9 @@
|
||||
########################################################
|
||||
# By JBM 2005-02-15 Wrapped to allow reuse of test code#
|
||||
# Added to skip if ndb is default #
|
||||
########################################################
|
||||
-- source include/not_ndb_default.inc
|
||||
-- source include/have_binlog_format_row.inc
|
||||
-- source include/master-slave.inc
|
||||
let $engine_type=innodb;
|
||||
-- source extra/rpl_tests/rpl_row_charset.test
|
@ -1,12 +0,0 @@
|
||||
###########################################
|
||||
# Purpose: Wrapper for rpl_extraMaster_Col.test
|
||||
# Using NDB
|
||||
###########################################
|
||||
-- source include/have_ndb.inc
|
||||
-- source include/ndb_master-slave.inc
|
||||
-- source include/have_binlog_format_row.inc
|
||||
|
||||
let $engine_type = 'NDB';
|
||||
|
||||
-- source extra/rpl_tests/rpl_extraMaster_Col.test
|
||||
|
@ -1,13 +0,0 @@
|
||||
###########################################
|
||||
# Purpose: Wrapper for rpl_extraMaster_Col.test
|
||||
# Using NDB
|
||||
###########################################
|
||||
-- source include/have_ndb.inc
|
||||
-- source include/ndb_master-slave.inc
|
||||
-- source include/have_binlog_format_statement.inc
|
||||
|
||||
let $engine_type = 'NDB';
|
||||
|
||||
-- source extra/rpl_tests/rpl_extraMaster_Col.test
|
||||
|
||||
|
@ -40,7 +40,7 @@ show create database mysqltest3;
|
||||
Database Create Database
|
||||
mysqltest3 CREATE DATABASE `mysqltest3` /*!40100 DEFAULT CHARACTER SET armscii8 COLLATE armscii8_bin */
|
||||
use mysqltest2;
|
||||
create table t1 (a int auto_increment primary key, b varchar(100));
|
||||
create table t1 (a int auto_increment primary key, b varchar(100))engine=NDB;;
|
||||
set character_set_client=cp850, collation_connection=latin2_croatian_ci;
|
||||
insert into t1 (b) values(@@character_set_server);
|
||||
insert into t1 (b) values(@@collation_server);
|
||||
@ -117,29 +117,27 @@ master-bin.000001 # Query # # create database mysqltest2 character set latin2
|
||||
master-bin.000001 # Query # # create database mysqltest3
|
||||
master-bin.000001 # Query # # drop database mysqltest3
|
||||
master-bin.000001 # Query # # create database mysqltest3
|
||||
master-bin.000001 # Query # # use `mysqltest2`; create table t1 (a int auto_increment primary key, b varchar(100))
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Query # # use `mysqltest2`; create table t1 (a int auto_increment primary key, b varchar(100))engine=NDB
|
||||
master-bin.000001 # Query # # BEGIN
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
master-bin.000001 # Table_map # # table_id: # (mysql.ndb_apply_status)
|
||||
master-bin.000001 # Write_rows # # table_id: #
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Query # # COMMIT
|
||||
master-bin.000001 # Query # # use `mysqltest2`; truncate table t1
|
||||
master-bin.000001 # Query # # BEGIN
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
master-bin.000001 # Table_map # # table_id: # (mysql.ndb_apply_status)
|
||||
master-bin.000001 # Write_rows # # table_id: #
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Query # # COMMIT
|
||||
master-bin.000001 # Query # # use `mysqltest2`; truncate table t1
|
||||
master-bin.000001 # Query # # BEGIN
|
||||
master-bin.000001 # Table_map # # table_id: # (mysqltest2.t1)
|
||||
master-bin.000001 # Table_map # # table_id: # (mysql.ndb_apply_status)
|
||||
master-bin.000001 # Write_rows # # table_id: #
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Query # # COMMIT
|
||||
master-bin.000001 # Query # # drop database mysqltest2
|
||||
master-bin.000001 # Query # # drop database mysqltest3
|
||||
select "--- --global--" as "";
|
||||
@ -177,14 +175,16 @@ select "--- --3943--" as "";
|
||||
|
||||
--- --3943--
|
||||
use test;
|
||||
CREATE TABLE t1 (c1 VARBINARY(255), c2 VARBINARY(255));
|
||||
CREATE TABLE t1 (c1 VARBINARY(255), c2 VARBINARY(255))ENGINE=NDB;;
|
||||
SET CHARACTER_SET_CLIENT=koi8r,
|
||||
CHARACTER_SET_CONNECTION=cp1251,
|
||||
CHARACTER_SET_RESULTS=koi8r;
|
||||
INSERT INTO t1 (c1, c2) VALUES ('îÕ, ÚÁ ÒÙÂÁÌËÕ','îÕ, ÚÁ ÒÙÂÁÌËÕ');
|
||||
SET SQL_BIG_SELECTS=1;
|
||||
select hex(c1), hex(c2) from t1;
|
||||
hex(c1) hex(c2)
|
||||
CDF32C20E7E020F0FBE1E0EBEAF3 CDF32C20E7E020F0FBE1E0EBEAF3
|
||||
SET SQL_BIG_SELECTS=1;
|
||||
select hex(c1), hex(c2) from t1;
|
||||
hex(c1) hex(c2)
|
||||
CDF32C20E7E020F0FBE1E0EBEAF3 CDF32C20E7E020F0FBE1E0EBEAF3
|
||||
|
2286
mysql-test/suite/rpl_ndb/r/rpl_ndb_extraColMaster.result
Normal file
2286
mysql-test/suite/rpl_ndb/r/rpl_ndb_extraColMaster.result
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,8 +1,8 @@
|
||||
########################################################
|
||||
# By JBM 2005-02-15 Wrapped to allow reuse of test code#
|
||||
########################################################
|
||||
--source include/have_ndb.inc
|
||||
-- source include/have_binlog_format_row.inc
|
||||
-- source include/have_ndb.inc
|
||||
-- source include/have_binlog_format_mixed_or_row.inc
|
||||
-- source include/ndb_master-slave.inc
|
||||
let $engine_type=NDB;
|
||||
-- source extra/rpl_tests/rpl_row_charset.test
|
||||
|
14
mysql-test/suite/rpl_ndb/t/rpl_ndb_extraColMaster.test
Normal file
14
mysql-test/suite/rpl_ndb/t/rpl_ndb_extraColMaster.test
Normal file
@ -0,0 +1,14 @@
|
||||
#############################################################
|
||||
# Purpose: To test having extra columns on the master WL#3915
|
||||
#############################################################
|
||||
-- source include/have_ndb.inc
|
||||
-- source include/ndb_master-slave.inc
|
||||
-- source include/have_binlog_format_mixed_or_row.inc
|
||||
|
||||
let $engine_type = 'NDB';
|
||||
|
||||
set binlog_format=row;
|
||||
-- source extra/rpl_tests/rpl_extraMaster_Col.test
|
||||
|
||||
set binlog_format=mixed;
|
||||
-- source extra/rpl_tests/rpl_extraMaster_Col.test
|
@ -325,9 +325,9 @@ Thd_ndb::Thd_ndb()
|
||||
{
|
||||
ndb= new Ndb(g_ndb_cluster_connection, "");
|
||||
lock_count= 0;
|
||||
start_stmt_count= 0;
|
||||
count= 0;
|
||||
all= NULL;
|
||||
stmt= NULL;
|
||||
trans= NULL;
|
||||
m_error= FALSE;
|
||||
m_error_code= 0;
|
||||
query_state&= NDB_QUERY_NORMAL;
|
||||
@ -382,6 +382,11 @@ Thd_ndb::get_open_table(THD *thd, const void *key)
|
||||
{
|
||||
thd_ndb_share= (THD_NDB_SHARE *) alloc_root(&thd->transaction.mem_root,
|
||||
sizeof(THD_NDB_SHARE));
|
||||
if (!thd_ndb_share)
|
||||
{
|
||||
mem_alloc_error(sizeof(THD_NDB_SHARE));
|
||||
DBUG_RETURN(NULL);
|
||||
}
|
||||
thd_ndb_share->key= key;
|
||||
thd_ndb_share->stat.last_count= count;
|
||||
thd_ndb_share->stat.no_uncommitted_rows_count= 0;
|
||||
@ -1638,6 +1643,26 @@ int ha_ndbcluster::set_primary_key_from_record(NdbOperation *op, const uchar *re
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
bool ha_ndbcluster::check_index_fields_in_write_set(uint keyno)
|
||||
{
|
||||
KEY* key_info= table->key_info + keyno;
|
||||
KEY_PART_INFO* key_part= key_info->key_part;
|
||||
KEY_PART_INFO* end= key_part+key_info->key_parts;
|
||||
uint i;
|
||||
DBUG_ENTER("check_index_fields_in_write_set");
|
||||
|
||||
for (i= 0; key_part != end; key_part++, i++)
|
||||
{
|
||||
Field* field= key_part->field;
|
||||
if (!bitmap_is_set(table->write_set, field->field_index))
|
||||
{
|
||||
DBUG_RETURN(false);
|
||||
}
|
||||
}
|
||||
|
||||
DBUG_RETURN(true);
|
||||
}
|
||||
|
||||
int ha_ndbcluster::set_index_key_from_record(NdbOperation *op,
|
||||
const uchar *record, uint keyno)
|
||||
{
|
||||
@ -1956,8 +1981,8 @@ check_null_in_record(const KEY* key_info, const uchar *record)
|
||||
* primary key or unique index values
|
||||
*/
|
||||
|
||||
int ha_ndbcluster::peek_indexed_rows(const uchar *record,
|
||||
bool check_pk)
|
||||
int ha_ndbcluster::peek_indexed_rows(const uchar *record,
|
||||
NDB_WRITE_OP write_op)
|
||||
{
|
||||
NdbTransaction *trans= m_active_trans;
|
||||
NdbOperation *op;
|
||||
@ -1969,7 +1994,7 @@ int ha_ndbcluster::peek_indexed_rows(const uchar *record,
|
||||
NdbOperation::LockMode lm=
|
||||
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
|
||||
first= NULL;
|
||||
if (check_pk && table->s->primary_key != MAX_KEY)
|
||||
if (write_op != NDB_UPDATE && table->s->primary_key != MAX_KEY)
|
||||
{
|
||||
/*
|
||||
* Fetch any row with colliding primary key
|
||||
@ -2019,6 +2044,11 @@ int ha_ndbcluster::peek_indexed_rows(const uchar *record,
|
||||
DBUG_PRINT("info", ("skipping check for key with NULL"));
|
||||
continue;
|
||||
}
|
||||
if (write_op != NDB_INSERT && !check_index_fields_in_write_set(i))
|
||||
{
|
||||
DBUG_PRINT("info", ("skipping check for key %u not in write_set", i));
|
||||
continue;
|
||||
}
|
||||
NdbIndexOperation *iop;
|
||||
const NDBINDEX *unique_index = m_index[i].unique_index;
|
||||
key_part= key_info->key_part;
|
||||
@ -2716,7 +2746,7 @@ int ha_ndbcluster::write_row(uchar *record)
|
||||
start_bulk_insert will set parameters to ensure that each
|
||||
write_row is committed individually
|
||||
*/
|
||||
int peek_res= peek_indexed_rows(record, TRUE);
|
||||
int peek_res= peek_indexed_rows(record, NDB_INSERT);
|
||||
|
||||
if (!peek_res)
|
||||
{
|
||||
@ -2765,7 +2795,7 @@ int ha_ndbcluster::write_row(uchar *record)
|
||||
if (ndb->getAutoIncrementValue(m_table, g.range, auto_value, 1) == -1)
|
||||
{
|
||||
if (--retries &&
|
||||
ndb->getNdbError().status == NdbError::TemporaryError);
|
||||
ndb->getNdbError().status == NdbError::TemporaryError)
|
||||
{
|
||||
my_sleep(retry_sleep);
|
||||
continue;
|
||||
@ -2960,7 +2990,8 @@ int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data)
|
||||
if (m_ignore_dup_key && (thd->lex->sql_command == SQLCOM_UPDATE ||
|
||||
thd->lex->sql_command == SQLCOM_UPDATE_MULTI))
|
||||
{
|
||||
int peek_res= peek_indexed_rows(new_data, pk_update);
|
||||
NDB_WRITE_OP write_op= (pk_update) ? NDB_PK_UPDATE : NDB_UPDATE;
|
||||
int peek_res= peek_indexed_rows(new_data, write_op);
|
||||
|
||||
if (!peek_res)
|
||||
{
|
||||
@ -4327,7 +4358,7 @@ static int ndbcluster_update_apply_status(THD *thd, int do_update)
|
||||
Ndb *ndb= thd_ndb->ndb;
|
||||
NDBDICT *dict= ndb->getDictionary();
|
||||
const NDBTAB *ndbtab;
|
||||
NdbTransaction *trans= thd_ndb->all ? thd_ndb->all : thd_ndb->stmt;
|
||||
NdbTransaction *trans= thd_ndb->trans;
|
||||
ndb->setDatabaseName(NDB_REP_DB);
|
||||
Ndb_table_guard ndbtab_g(dict, NDB_APPLY_TABLE);
|
||||
if (!(ndbtab= ndbtab_g.get_table()))
|
||||
@ -4371,10 +4402,110 @@ static int ndbcluster_update_apply_status(THD *thd, int do_update)
|
||||
}
|
||||
#endif /* HAVE_NDB_BINLOG */
|
||||
|
||||
void ha_ndbcluster::transaction_checks(THD *thd)
|
||||
{
|
||||
if (thd->lex->sql_command == SQLCOM_LOAD)
|
||||
{
|
||||
m_transaction_on= FALSE;
|
||||
/* Would be simpler if has_transactions() didn't always say "yes" */
|
||||
thd->transaction.all.modified_non_trans_table=
|
||||
thd->transaction.stmt.modified_non_trans_table= TRUE;
|
||||
}
|
||||
else if (!thd->transaction.on)
|
||||
m_transaction_on= FALSE;
|
||||
else
|
||||
m_transaction_on= thd->variables.ndb_use_transactions;
|
||||
}
|
||||
|
||||
int ha_ndbcluster::start_statement(THD *thd,
|
||||
Thd_ndb *thd_ndb,
|
||||
Ndb *ndb)
|
||||
{
|
||||
DBUG_ENTER("ha_ndbcluster::start_statement");
|
||||
PRINT_OPTION_FLAGS(thd);
|
||||
|
||||
trans_register_ha(thd, FALSE, ndbcluster_hton);
|
||||
if (!thd_ndb->trans)
|
||||
{
|
||||
if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
|
||||
trans_register_ha(thd, TRUE, ndbcluster_hton);
|
||||
DBUG_PRINT("trans",("Starting transaction"));
|
||||
thd_ndb->trans= ndb->startTransaction();
|
||||
if (thd_ndb->trans == NULL)
|
||||
ERR_RETURN(ndb->getNdbError());
|
||||
thd_ndb->init_open_tables();
|
||||
thd_ndb->query_state&= NDB_QUERY_NORMAL;
|
||||
thd_ndb->trans_options= 0;
|
||||
thd_ndb->m_slow_path= FALSE;
|
||||
if (!(thd->options & OPTION_BIN_LOG) ||
|
||||
thd->variables.binlog_format == BINLOG_FORMAT_STMT)
|
||||
{
|
||||
thd_ndb->trans_options|= TNTO_NO_LOGGING;
|
||||
thd_ndb->m_slow_path= TRUE;
|
||||
}
|
||||
else if (thd->slave_thread)
|
||||
thd_ndb->m_slow_path= TRUE;
|
||||
}
|
||||
/*
|
||||
If this is the start of a LOCK TABLE, a table look
|
||||
should be taken on the table in NDB
|
||||
|
||||
Check if it should be read or write lock
|
||||
*/
|
||||
if (thd->options & (OPTION_TABLE_LOCK))
|
||||
{
|
||||
//lockThisTable();
|
||||
DBUG_PRINT("info", ("Locking the table..." ));
|
||||
}
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
int ha_ndbcluster::init_handler_for_statement(THD *thd, Thd_ndb *thd_ndb)
|
||||
{
|
||||
/*
|
||||
This is the place to make sure this handler instance
|
||||
has a started transaction.
|
||||
|
||||
The transaction is started by the first handler on which
|
||||
MySQL Server calls external lock
|
||||
|
||||
Other handlers in the same stmt or transaction should use
|
||||
the same NDB transaction. This is done by setting up the m_active_trans
|
||||
pointer to point to the NDB transaction.
|
||||
*/
|
||||
|
||||
DBUG_ENTER("ha_ndbcluster::init_handler_for_statement");
|
||||
// store thread specific data first to set the right context
|
||||
m_force_send= thd->variables.ndb_force_send;
|
||||
m_ha_not_exact_count= !thd->variables.ndb_use_exact_count;
|
||||
m_autoincrement_prefetch=
|
||||
(ha_rows) thd->variables.ndb_autoincrement_prefetch_sz;
|
||||
|
||||
m_active_trans= thd_ndb->trans;
|
||||
DBUG_ASSERT(m_active_trans);
|
||||
// Start of transaction
|
||||
m_rows_changed= 0;
|
||||
m_ops_pending= 0;
|
||||
m_slow_path= thd_ndb->m_slow_path;
|
||||
#ifdef HAVE_NDB_BINLOG
|
||||
if (unlikely(m_slow_path))
|
||||
{
|
||||
if (m_share == ndb_apply_status_share && thd->slave_thread)
|
||||
thd_ndb->trans_options|= TNTO_INJECTED_APPLY_STATUS;
|
||||
}
|
||||
#endif
|
||||
// TODO remove double pointers...
|
||||
if (!(m_thd_ndb_share= thd_ndb->get_open_table(thd, m_table)))
|
||||
{
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
m_table_info= &m_thd_ndb_share->stat;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
int ha_ndbcluster::external_lock(THD *thd, int lock_type)
|
||||
{
|
||||
int error=0;
|
||||
NdbTransaction* trans= NULL;
|
||||
DBUG_ENTER("external_lock");
|
||||
|
||||
/*
|
||||
@ -4395,124 +4526,15 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
|
||||
if (lock_type != F_UNLCK)
|
||||
{
|
||||
DBUG_PRINT("info", ("lock_type != F_UNLCK"));
|
||||
if (thd->lex->sql_command == SQLCOM_LOAD)
|
||||
{
|
||||
m_transaction_on= FALSE;
|
||||
/* Would be simpler if has_transactions() didn't always say "yes" */
|
||||
thd->transaction.all.modified_non_trans_table= thd->transaction.stmt.modified_non_trans_table= TRUE;
|
||||
}
|
||||
else if (!thd->transaction.on)
|
||||
m_transaction_on= FALSE;
|
||||
else
|
||||
m_transaction_on= thd->variables.ndb_use_transactions;
|
||||
transaction_checks(thd);
|
||||
if (!thd_ndb->lock_count++)
|
||||
{
|
||||
PRINT_OPTION_FLAGS(thd);
|
||||
if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))
|
||||
{
|
||||
// Autocommit transaction
|
||||
DBUG_ASSERT(!thd_ndb->stmt);
|
||||
DBUG_PRINT("trans",("Starting transaction stmt"));
|
||||
|
||||
trans= ndb->startTransaction();
|
||||
if (trans == NULL)
|
||||
{
|
||||
thd_ndb->lock_count= 0;
|
||||
ERR_RETURN(ndb->getNdbError());
|
||||
}
|
||||
thd_ndb->init_open_tables();
|
||||
thd_ndb->stmt= trans;
|
||||
thd_ndb->query_state&= NDB_QUERY_NORMAL;
|
||||
thd_ndb->trans_options= 0;
|
||||
thd_ndb->m_slow_path= FALSE;
|
||||
if (!(thd->options & OPTION_BIN_LOG) ||
|
||||
thd->variables.binlog_format == BINLOG_FORMAT_STMT)
|
||||
{
|
||||
thd_ndb->trans_options|= TNTO_NO_LOGGING;
|
||||
thd_ndb->m_slow_path= TRUE;
|
||||
}
|
||||
else if (thd->slave_thread)
|
||||
thd_ndb->m_slow_path= TRUE;
|
||||
trans_register_ha(thd, FALSE, ndbcluster_hton);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!thd_ndb->all)
|
||||
{
|
||||
// Not autocommit transaction
|
||||
// A "master" transaction ha not been started yet
|
||||
DBUG_PRINT("trans",("starting transaction, all"));
|
||||
|
||||
trans= ndb->startTransaction();
|
||||
if (trans == NULL)
|
||||
{
|
||||
thd_ndb->lock_count= 0;
|
||||
ERR_RETURN(ndb->getNdbError());
|
||||
}
|
||||
thd_ndb->init_open_tables();
|
||||
thd_ndb->all= trans;
|
||||
thd_ndb->query_state&= NDB_QUERY_NORMAL;
|
||||
thd_ndb->trans_options= 0;
|
||||
thd_ndb->m_slow_path= FALSE;
|
||||
if (!(thd->options & OPTION_BIN_LOG) ||
|
||||
thd->variables.binlog_format == BINLOG_FORMAT_STMT)
|
||||
{
|
||||
thd_ndb->trans_options|= TNTO_NO_LOGGING;
|
||||
thd_ndb->m_slow_path= TRUE;
|
||||
}
|
||||
else if (thd->slave_thread)
|
||||
thd_ndb->m_slow_path= TRUE;
|
||||
trans_register_ha(thd, TRUE, ndbcluster_hton);
|
||||
|
||||
/*
|
||||
If this is the start of a LOCK TABLE, a table look
|
||||
should be taken on the table in NDB
|
||||
|
||||
Check if it should be read or write lock
|
||||
*/
|
||||
if (thd->options & (OPTION_TABLE_LOCK))
|
||||
{
|
||||
//lockThisTable();
|
||||
DBUG_PRINT("info", ("Locking the table..." ));
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
if ((error= start_statement(thd, thd_ndb, ndb)))
|
||||
goto error;
|
||||
}
|
||||
/*
|
||||
This is the place to make sure this handler instance
|
||||
has a started transaction.
|
||||
|
||||
The transaction is started by the first handler on which
|
||||
MySQL Server calls external lock
|
||||
|
||||
Other handlers in the same stmt or transaction should use
|
||||
the same NDB transaction. This is done by setting up the m_active_trans
|
||||
pointer to point to the NDB transaction.
|
||||
*/
|
||||
|
||||
// store thread specific data first to set the right context
|
||||
m_force_send= thd->variables.ndb_force_send;
|
||||
m_ha_not_exact_count= !thd->variables.ndb_use_exact_count;
|
||||
m_autoincrement_prefetch=
|
||||
(ha_rows) thd->variables.ndb_autoincrement_prefetch_sz;
|
||||
|
||||
m_active_trans= thd_ndb->all ? thd_ndb->all : thd_ndb->stmt;
|
||||
DBUG_ASSERT(m_active_trans);
|
||||
// Start of transaction
|
||||
m_rows_changed= 0;
|
||||
m_ops_pending= 0;
|
||||
m_slow_path= thd_ndb->m_slow_path;
|
||||
#ifdef HAVE_NDB_BINLOG
|
||||
if (unlikely(m_slow_path))
|
||||
{
|
||||
if (m_share == ndb_apply_status_share && thd->slave_thread)
|
||||
thd_ndb->trans_options|= TNTO_INJECTED_APPLY_STATUS;
|
||||
}
|
||||
#endif
|
||||
// TODO remove double pointers...
|
||||
m_thd_ndb_share= thd_ndb->get_open_table(thd, m_table);
|
||||
m_table_info= &m_thd_ndb_share->stat;
|
||||
if ((error= init_handler_for_statement(thd, thd_ndb)))
|
||||
goto error;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -4540,16 +4562,19 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
|
||||
DBUG_PRINT("trans", ("Last external_lock"));
|
||||
PRINT_OPTION_FLAGS(thd);
|
||||
|
||||
if (thd_ndb->stmt)
|
||||
if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))
|
||||
{
|
||||
/*
|
||||
Unlock is done without a transaction commit / rollback.
|
||||
This happens if the thread didn't update any rows
|
||||
We must in this case close the transaction to release resources
|
||||
*/
|
||||
DBUG_PRINT("trans",("ending non-updating transaction"));
|
||||
ndb->closeTransaction(m_active_trans);
|
||||
thd_ndb->stmt= NULL;
|
||||
if (thd_ndb->trans)
|
||||
{
|
||||
/*
|
||||
Unlock is done without a transaction commit / rollback.
|
||||
This happens if the thread didn't update any rows
|
||||
We must in this case close the transaction to release resources
|
||||
*/
|
||||
DBUG_PRINT("trans",("ending non-updating transaction"));
|
||||
ndb->closeTransaction(thd_ndb->trans);
|
||||
thd_ndb->trans= NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
m_table_info= NULL;
|
||||
@ -4578,7 +4603,10 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
|
||||
if (m_ops_pending)
|
||||
DBUG_PRINT("warning", ("ops_pending != 0L"));
|
||||
m_ops_pending= 0;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
error:
|
||||
thd_ndb->lock_count--;
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
@ -4610,25 +4638,20 @@ int ha_ndbcluster::start_stmt(THD *thd, thr_lock_type lock_type)
|
||||
{
|
||||
int error=0;
|
||||
DBUG_ENTER("start_stmt");
|
||||
PRINT_OPTION_FLAGS(thd);
|
||||
|
||||
Thd_ndb *thd_ndb= get_thd_ndb(thd);
|
||||
NdbTransaction *trans= (thd_ndb->stmt)?thd_ndb->stmt:thd_ndb->all;
|
||||
if (!trans){
|
||||
transaction_checks(thd);
|
||||
if (!thd_ndb->start_stmt_count++)
|
||||
{
|
||||
Ndb *ndb= thd_ndb->ndb;
|
||||
DBUG_PRINT("trans",("Starting transaction stmt"));
|
||||
trans= ndb->startTransaction();
|
||||
if (trans == NULL)
|
||||
ERR_RETURN(ndb->getNdbError());
|
||||
no_uncommitted_rows_reset(thd);
|
||||
thd_ndb->stmt= trans;
|
||||
thd_ndb->query_state&= NDB_QUERY_NORMAL;
|
||||
trans_register_ha(thd, FALSE, ndbcluster_hton);
|
||||
if ((error= start_statement(thd, thd_ndb, ndb)))
|
||||
goto error;
|
||||
}
|
||||
m_active_trans= trans;
|
||||
// Start of statement
|
||||
m_ops_pending= 0;
|
||||
|
||||
if ((error= init_handler_for_statement(thd, thd_ndb)))
|
||||
goto error;
|
||||
DBUG_RETURN(0);
|
||||
error:
|
||||
thd_ndb->start_stmt_count--;
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
@ -4642,15 +4665,29 @@ static int ndbcluster_commit(handlerton *hton, THD *thd, bool all)
|
||||
int res= 0;
|
||||
Thd_ndb *thd_ndb= get_thd_ndb(thd);
|
||||
Ndb *ndb= thd_ndb->ndb;
|
||||
NdbTransaction *trans= all ? thd_ndb->all : thd_ndb->stmt;
|
||||
NdbTransaction *trans= thd_ndb->trans;
|
||||
|
||||
DBUG_ENTER("ndbcluster_commit");
|
||||
DBUG_PRINT("transaction",("%s",
|
||||
trans == thd_ndb->stmt ?
|
||||
"stmt" : "all"));
|
||||
DBUG_ASSERT(ndb);
|
||||
if (trans == NULL)
|
||||
PRINT_OPTION_FLAGS(thd);
|
||||
DBUG_PRINT("enter", ("Commit %s", (all ? "all" : "stmt")));
|
||||
thd_ndb->start_stmt_count= 0;
|
||||
if (trans == NULL || (!all &&
|
||||
thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))
|
||||
{
|
||||
/*
|
||||
An odditity in the handler interface is that commit on handlerton
|
||||
is called to indicate end of statement only in cases where
|
||||
autocommit isn't used and the all flag isn't set.
|
||||
|
||||
We also leave quickly when a transaction haven't even been started,
|
||||
in this case we are safe that no clean up is needed. In this case
|
||||
the MySQL Server could handle the query without contacting the
|
||||
NDB kernel.
|
||||
*/
|
||||
DBUG_PRINT("info", ("Commit before start or end-of-statement only"));
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
#ifdef HAVE_NDB_BINLOG
|
||||
if (unlikely(thd_ndb->m_slow_path))
|
||||
@ -4671,11 +4708,7 @@ static int ndbcluster_commit(handlerton *hton, THD *thd, bool all)
|
||||
ndbcluster_print_error(res, error_op);
|
||||
}
|
||||
ndb->closeTransaction(trans);
|
||||
|
||||
if (all)
|
||||
thd_ndb->all= NULL;
|
||||
else
|
||||
thd_ndb->stmt= NULL;
|
||||
thd_ndb->trans= NULL;
|
||||
|
||||
/* Clear commit_count for tables changed by transaction */
|
||||
NDB_SHARE* share;
|
||||
@ -4704,13 +4737,18 @@ static int ndbcluster_rollback(handlerton *hton, THD *thd, bool all)
|
||||
int res= 0;
|
||||
Thd_ndb *thd_ndb= get_thd_ndb(thd);
|
||||
Ndb *ndb= thd_ndb->ndb;
|
||||
NdbTransaction *trans= all ? thd_ndb->all : thd_ndb->stmt;
|
||||
NdbTransaction *trans= thd_ndb->trans;
|
||||
|
||||
DBUG_ENTER("ndbcluster_rollback");
|
||||
DBUG_PRINT("transaction",("%s",
|
||||
trans == thd_ndb->stmt ?
|
||||
"stmt" : "all"));
|
||||
DBUG_ASSERT(ndb && trans);
|
||||
DBUG_ASSERT(ndb);
|
||||
thd_ndb->start_stmt_count= 0;
|
||||
if (trans == NULL || (!all &&
|
||||
thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))
|
||||
{
|
||||
/* Ignore end-of-statement until real rollback or commit is called */
|
||||
DBUG_PRINT("info", ("Rollback before start or end-of-statement only"));
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
if (trans->execute(NdbTransaction::Rollback) != 0)
|
||||
{
|
||||
@ -4722,11 +4760,7 @@ static int ndbcluster_rollback(handlerton *hton, THD *thd, bool all)
|
||||
ndbcluster_print_error(res, error_op);
|
||||
}
|
||||
ndb->closeTransaction(trans);
|
||||
|
||||
if (all)
|
||||
thd_ndb->all= NULL;
|
||||
else
|
||||
thd_ndb->stmt= NULL;
|
||||
thd_ndb->trans= NULL;
|
||||
|
||||
/* Clear list of tables changed by transaction */
|
||||
thd_ndb->changed_tables.empty();
|
||||
@ -6155,7 +6189,7 @@ void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment,
|
||||
ndb->getAutoIncrementValue(m_table, g.range, auto_value, cache_size, increment, offset))
|
||||
{
|
||||
if (--retries &&
|
||||
ndb->getNdbError().status == NdbError::TemporaryError);
|
||||
ndb->getNdbError().status == NdbError::TemporaryError)
|
||||
{
|
||||
my_sleep(retry_sleep);
|
||||
continue;
|
||||
|
@ -81,6 +81,12 @@ typedef struct ndb_index_data {
|
||||
uint index_stat_query_count;
|
||||
} NDB_INDEX_DATA;
|
||||
|
||||
typedef enum ndb_write_op {
|
||||
NDB_INSERT = 0,
|
||||
NDB_UPDATE = 1,
|
||||
NDB_PK_UPDATE = 2
|
||||
} NDB_WRITE_OP;
|
||||
|
||||
typedef union { const NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue;
|
||||
|
||||
int get_ndb_blobs_value(TABLE* table, NdbValue* value_array,
|
||||
@ -204,8 +210,8 @@ class Thd_ndb
|
||||
Ndb *ndb;
|
||||
ulong count;
|
||||
uint lock_count;
|
||||
NdbTransaction *all;
|
||||
NdbTransaction *stmt;
|
||||
uint start_stmt_count;
|
||||
NdbTransaction *trans;
|
||||
bool m_error;
|
||||
bool m_slow_path;
|
||||
int m_error_code;
|
||||
@ -438,7 +444,7 @@ private:
|
||||
const NdbOperation *first,
|
||||
const NdbOperation *last,
|
||||
uint errcode);
|
||||
int peek_indexed_rows(const uchar *record, bool check_pk);
|
||||
int peek_indexed_rows(const uchar *record, NDB_WRITE_OP write_op);
|
||||
int fetch_next(NdbScanOperation* op);
|
||||
int next_result(uchar *buf);
|
||||
int define_read_attrs(uchar* buf, NdbOperation* op);
|
||||
@ -463,6 +469,7 @@ private:
|
||||
friend int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg);
|
||||
int set_primary_key(NdbOperation *op, const uchar *key);
|
||||
int set_primary_key_from_record(NdbOperation *op, const uchar *record);
|
||||
bool check_index_fields_in_write_set(uint keyno);
|
||||
int set_index_key_from_record(NdbOperation *op, const uchar *record,
|
||||
uint keyno);
|
||||
int set_bounds(NdbIndexScanOperation*, uint inx, bool rir,
|
||||
@ -496,6 +503,10 @@ private:
|
||||
friend int execute_no_commit(ha_ndbcluster*, NdbTransaction*, bool);
|
||||
friend int execute_no_commit_ie(ha_ndbcluster*, NdbTransaction*, bool);
|
||||
|
||||
void transaction_checks(THD *thd);
|
||||
int start_statement(THD *thd, Thd_ndb *thd_ndb, Ndb* ndb);
|
||||
int init_handler_for_statement(THD *thd, Thd_ndb *thd_ndb);
|
||||
|
||||
NdbTransaction *m_active_trans;
|
||||
NdbScanOperation *m_active_cursor;
|
||||
const NdbDictionary::Table *m_table;
|
||||
|
@ -1338,9 +1338,23 @@ ha_ndbcluster_cond::generate_scan_filter(NdbScanOperation *op)
|
||||
|
||||
if (m_cond_stack)
|
||||
{
|
||||
NdbScanFilter filter(op);
|
||||
NdbScanFilter filter(op, false); // don't abort on too large
|
||||
|
||||
DBUG_RETURN(generate_scan_filter_from_cond(filter));
|
||||
int ret=generate_scan_filter_from_cond(filter);
|
||||
if (ret != 0)
|
||||
{
|
||||
const NdbError& err=filter.getNdbError();
|
||||
if (err.code == NdbScanFilter::FilterTooLarge)
|
||||
{
|
||||
// err.message has static storage
|
||||
DBUG_PRINT("info", ("%s", err.message));
|
||||
push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
||||
err.code, err.message);
|
||||
ret=0;
|
||||
}
|
||||
}
|
||||
if (ret != 0)
|
||||
DBUG_RETURN(ret);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1391,7 +1405,7 @@ int ha_ndbcluster_cond::generate_scan_filter_from_key(NdbScanOperation *op,
|
||||
{
|
||||
KEY_PART_INFO* key_part= key_info->key_part;
|
||||
KEY_PART_INFO* end= key_part+key_info->key_parts;
|
||||
NdbScanFilter filter(op);
|
||||
NdbScanFilter filter(op, true); // abort on too large
|
||||
int res;
|
||||
DBUG_ENTER("generate_scan_filter_from_key");
|
||||
|
||||
|
@ -2641,8 +2641,7 @@ int ha_create_table_from_engine(THD* thd, const char *db, const char *name)
|
||||
frmblob and frmlen are set, write the frm to disk
|
||||
*/
|
||||
|
||||
(void)strxnmov(path,FN_REFLEN-1,mysql_data_home,FN_ROOTDIR,
|
||||
db,FN_ROOTDIR,name,NullS);
|
||||
build_table_filename(path, FN_REFLEN-1, db, name, "", 0);
|
||||
// Save the frm file
|
||||
error= writefrm(path, frmblob, frmlen);
|
||||
my_free(frmblob, MYF(0));
|
||||
|
@ -679,7 +679,7 @@ int mysql_update(THD *thd,
|
||||
*/
|
||||
if (will_batch &&
|
||||
((error= table->file->exec_bulk_update(&dup_key_found)) ||
|
||||
!dup_key_found))
|
||||
dup_key_found))
|
||||
{
|
||||
if (error)
|
||||
{
|
||||
|
@ -52,8 +52,7 @@ public:
|
||||
// NOTE: in 5.1 ctors and init take size in bytes
|
||||
|
||||
/** Initialize AttributeHeader at location aHeaderPtr */
|
||||
static AttributeHeader& init(void* aHeaderPtr, Uint32 anAttributeId,
|
||||
Uint32 aByteSize);
|
||||
static void init(Uint32* aHeaderPtr, Uint32 anAttributeId, Uint32 aByteSize);
|
||||
|
||||
/** Returns size of AttributeHeader (usually one or two words) */
|
||||
Uint32 getHeaderSize() const; // In 32-bit words
|
||||
@ -113,10 +112,11 @@ public:
|
||||
*/
|
||||
|
||||
inline
|
||||
AttributeHeader& AttributeHeader::init(void* aHeaderPtr, Uint32 anAttributeId,
|
||||
Uint32 aByteSize)
|
||||
void AttributeHeader::init(Uint32* aHeaderPtr, Uint32 anAttributeId,
|
||||
Uint32 aByteSize)
|
||||
{
|
||||
return * new (aHeaderPtr) AttributeHeader(anAttributeId, aByteSize);
|
||||
AttributeHeader ah(anAttributeId, aByteSize);
|
||||
*aHeaderPtr = ah.m_value;
|
||||
}
|
||||
|
||||
inline
|
||||
|
@ -195,9 +195,11 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
|
||||
/* 132 not unused */
|
||||
/* 133 not unused */
|
||||
#define GSN_CM_HEARTBEAT 134 /* distr. */
|
||||
/* 135 unused */
|
||||
/* 136 unused */
|
||||
/* 137 unused */
|
||||
|
||||
#define GSN_PREPARE_COPY_FRAG_REQ 135
|
||||
#define GSN_PREPARE_COPY_FRAG_REF 136
|
||||
#define GSN_PREPARE_COPY_FRAG_CONF 137
|
||||
|
||||
#define GSN_CM_NODEINFOCONF 138 /* distr. */
|
||||
#define GSN_CM_NODEINFOREF 139 /* distr. */
|
||||
#define GSN_CM_NODEINFOREQ 140 /* distr. */
|
||||
|
@ -49,6 +49,7 @@ private:
|
||||
Uint32 savePointId;
|
||||
Uint32 gci;
|
||||
};
|
||||
Uint32 maxPage;
|
||||
|
||||
/**
|
||||
* Previously there where also a scan type
|
||||
|
@ -29,7 +29,7 @@ class CopyFragReq {
|
||||
*/
|
||||
friend class Dblqh;
|
||||
public:
|
||||
STATIC_CONST( SignalLength = 9 );
|
||||
STATIC_CONST( SignalLength = 10 );
|
||||
|
||||
private:
|
||||
Uint32 userPtr;
|
||||
@ -42,6 +42,7 @@ private:
|
||||
Uint32 gci;
|
||||
Uint32 nodeCount;
|
||||
Uint32 nodeList[1];
|
||||
//Uint32 maxPage; is stored in nodeList[nodeCount]
|
||||
};
|
||||
|
||||
class CopyFragConf {
|
||||
@ -95,4 +96,42 @@ struct UpdateFragDistKeyOrd
|
||||
STATIC_CONST( SignalLength = 3 );
|
||||
};
|
||||
|
||||
struct PrepareCopyFragReq
|
||||
{
|
||||
STATIC_CONST( SignalLength = 6 );
|
||||
|
||||
Uint32 senderRef;
|
||||
Uint32 senderData;
|
||||
Uint32 tableId;
|
||||
Uint32 fragId;
|
||||
Uint32 copyNodeId;
|
||||
Uint32 startingNodeId;
|
||||
};
|
||||
|
||||
struct PrepareCopyFragRef
|
||||
{
|
||||
Uint32 senderRef;
|
||||
Uint32 senderData;
|
||||
Uint32 tableId;
|
||||
Uint32 fragId;
|
||||
Uint32 copyNodeId;
|
||||
Uint32 startingNodeId;
|
||||
Uint32 errorCode;
|
||||
|
||||
STATIC_CONST( SignalLength = 7 );
|
||||
};
|
||||
|
||||
struct PrepareCopyFragConf
|
||||
{
|
||||
STATIC_CONST( SignalLength = 7 );
|
||||
|
||||
Uint32 senderRef;
|
||||
Uint32 senderData;
|
||||
Uint32 tableId;
|
||||
Uint32 fragId;
|
||||
Uint32 copyNodeId;
|
||||
Uint32 startingNodeId;
|
||||
Uint32 maxPageNo;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -46,6 +46,7 @@ public:
|
||||
* Length of signal
|
||||
*/
|
||||
STATIC_CONST( StaticLength = 11 );
|
||||
STATIC_CONST( MaxTotalAttrInfo = 0xFFFF );
|
||||
|
||||
private:
|
||||
|
||||
|
@ -46,7 +46,7 @@ public:
|
||||
*/
|
||||
STATIC_CONST( StaticLength = 5 );
|
||||
STATIC_CONST( OperationLength = 2 );
|
||||
STATIC_CONST( SimpleReadBit = (((Uint32)1) << 31) );
|
||||
STATIC_CONST( DirtyReadBit = (((Uint32)1) << 31) );
|
||||
|
||||
private:
|
||||
|
||||
|
@ -88,5 +88,52 @@ Uint32 ndbGetOwnVersion();
|
||||
|
||||
#define NDBD_NODE_VERSION_REP NDB_MAKE_VERSION(6,1,1)
|
||||
|
||||
#define NDBD_PREPARE_COPY_FRAG_VERSION NDB_MAKE_VERSION(6,2,1)
|
||||
#define NDBD_PREPARE_COPY_FRAG_V2_51 NDB_MAKE_VERSION(5,1,23)
|
||||
#define NDBD_PREPARE_COPY_FRAG_V2_62 NDB_MAKE_VERSION(6,2,8)
|
||||
#define NDBD_PREPARE_COPY_FRAG_V2_63 NDB_MAKE_VERSION(6,3,6)
|
||||
|
||||
/**
|
||||
* 0 = NO PREP COPY FRAG SUPPORT
|
||||
* 1 = NO MAX PAGE SUPPORT
|
||||
* 2 = LATEST VERSION
|
||||
*/
|
||||
static
|
||||
inline
|
||||
int
|
||||
ndb_check_prep_copy_frag_version(Uint32 version)
|
||||
{
|
||||
if (version == NDB_VERSION_D)
|
||||
return 2;
|
||||
|
||||
const Uint32 major = (version >> 16) & 0xFF;
|
||||
const Uint32 minor = (version >> 8) & 0xFF;
|
||||
if (major >= 6)
|
||||
{
|
||||
if (minor == 2)
|
||||
{
|
||||
if (version >= NDBD_PREPARE_COPY_FRAG_V2_62)
|
||||
return 2;
|
||||
if (version >= NDBD_PREPARE_COPY_FRAG_VERSION)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
else if (minor == 3)
|
||||
{
|
||||
if (version >= NDBD_PREPARE_COPY_FRAG_V2_63)
|
||||
return 2;
|
||||
return 1;
|
||||
}
|
||||
return 2;
|
||||
}
|
||||
else if (major == 5 && minor == 1)
|
||||
{
|
||||
if (version >= NDBD_PREPARE_COPY_FRAG_V2_51)
|
||||
return 2;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -1056,6 +1056,7 @@ class Ndb
|
||||
friend class NdbBlob;
|
||||
friend class NdbImpl;
|
||||
friend class Ndb_internal;
|
||||
friend class NdbScanFilterImpl;
|
||||
#endif
|
||||
|
||||
public:
|
||||
|
@ -93,8 +93,9 @@ public:
|
||||
,LM_CommittedRead ///< Ignore locks, read last committed value
|
||||
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
|
||||
= 2,
|
||||
LM_Dirty = 2
|
||||
LM_Dirty = 2,
|
||||
#endif
|
||||
LM_SimpleRead = 3 ///< Read with shared lock, but release lock directly
|
||||
};
|
||||
|
||||
/**
|
||||
@ -842,8 +843,10 @@ protected:
|
||||
virtual ~NdbOperation();
|
||||
void next(NdbOperation*); // Set next pointer
|
||||
NdbOperation* next(); // Get next pointer
|
||||
|
||||
public:
|
||||
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
|
||||
NdbTransaction* getNdbTransaction();
|
||||
const NdbOperation* next() const;
|
||||
const NdbRecAttr* getFirstRecAttr() const;
|
||||
#endif
|
||||
|
@ -17,7 +17,8 @@ class Ndb;
|
||||
class NdbPool;
|
||||
|
||||
bool
|
||||
create_instance(Uint32 max_ndb_objects,
|
||||
create_instance(Ndb_cluster_connection* cc,
|
||||
Uint32 max_ndb_objects,
|
||||
Uint32 no_conn_obj,
|
||||
Uint32 init_no_ndb_objects);
|
||||
|
||||
|
@ -17,6 +17,7 @@
|
||||
#define NDB_SCAN_FILTER_HPP
|
||||
|
||||
#include <ndb_types.h>
|
||||
#include <ndbapi_limits.h>
|
||||
|
||||
/**
|
||||
* @class NdbScanFilter
|
||||
@ -31,8 +32,13 @@ public:
|
||||
/**
|
||||
* Constructor
|
||||
* @param op The NdbOperation that the filter belongs to (is applied to).
|
||||
* @param abort_on_too_large abort transaction on filter too large
|
||||
* default: true
|
||||
* @param max_size Maximum size of generated filter in words
|
||||
*/
|
||||
NdbScanFilter(class NdbOperation * op);
|
||||
NdbScanFilter(class NdbOperation * op,
|
||||
bool abort_on_too_large = true,
|
||||
Uint32 max_size = NDB_MAX_SCANFILTER_SIZE_IN_WORDS);
|
||||
~NdbScanFilter();
|
||||
|
||||
/**
|
||||
@ -166,6 +172,27 @@ public:
|
||||
/** @} *********************************************************************/
|
||||
#endif
|
||||
|
||||
enum Error {
|
||||
FilterTooLarge = 4294
|
||||
};
|
||||
|
||||
/**
|
||||
* Get filter level error.
|
||||
*
|
||||
* Most errors are set only on operation level, and they abort the
|
||||
* transaction. The error FilterTooLarge is set on filter level and
|
||||
* by default it propagates to operation level and also aborts the
|
||||
* transaction.
|
||||
*
|
||||
* If option abort_on_too_large is set to false, then FilterTooLarge
|
||||
* does not propagate. One can then either ignore this error (in
|
||||
* which case no filtering is done) or try to define a new filter
|
||||
* immediately.
|
||||
*/
|
||||
const class NdbError & getNdbError() const;
|
||||
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
|
||||
NdbOperation * getNdbOperation();
|
||||
#endif
|
||||
private:
|
||||
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
|
||||
friend class NdbScanFilterImpl;
|
||||
|
@ -170,6 +170,15 @@ public:
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
|
||||
/**
|
||||
* Convenience method to fetch this transaction's Ndb* object
|
||||
*/
|
||||
Ndb * getNdb() {
|
||||
return theNdb;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
|
||||
/**
|
||||
* Get an NdbOperation for a table.
|
||||
|
@ -26,4 +26,6 @@
|
||||
#define NDB_MAX_TUPLE_SIZE (NDB_MAX_TUPLE_SIZE_IN_WORDS*4)
|
||||
#define NDB_MAX_ACTIVE_EVENTS 100
|
||||
|
||||
#define NDB_MAX_SCANFILTER_SIZE_IN_WORDS 50000
|
||||
|
||||
#endif
|
||||
|
33
storage/ndb/include/util/ndb_rand.h
Normal file
33
storage/ndb/include/util/ndb_rand.h
Normal file
@ -0,0 +1,33 @@
|
||||
/* Copyright (C) 2003 MySQL AB
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; version 2 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
#ifndef NDB_RAND_H
|
||||
#define NDB_RAND_H
|
||||
|
||||
#define NDB_RAND_MAX 32767
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
int ndb_rand(void);
|
||||
|
||||
void ndb_srand(unsigned seed);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@ -498,10 +498,10 @@ void getTextTransReportCounters(QQQQ) {
|
||||
// -------------------------------------------------------------------
|
||||
BaseString::snprintf(m_text, m_text_len,
|
||||
"Trans. Count = %u, Commit Count = %u, "
|
||||
"Read Count = %u, Simple Read Count = %u,\n"
|
||||
"Read Count = %u, Simple Read Count = %u, "
|
||||
"Write Count = %u, AttrInfo Count = %u, "
|
||||
"Concurrent Operations = %u, Abort Count = %u\n"
|
||||
" Scans: %u Range scans: %u",
|
||||
"Concurrent Operations = %u, Abort Count = %u"
|
||||
" Scans = %u Range scans = %u",
|
||||
theData[1],
|
||||
theData[2],
|
||||
theData[3],
|
||||
@ -797,9 +797,9 @@ void getTextBackupFailedToStart(QQQQ) {
|
||||
}
|
||||
void getTextBackupCompleted(QQQQ) {
|
||||
BaseString::snprintf(m_text, m_text_len,
|
||||
"Backup %u started from node %u completed\n"
|
||||
" StartGCP: %u StopGCP: %u\n"
|
||||
" #Records: %u #LogRecords: %u\n"
|
||||
"Backup %u started from node %u completed."
|
||||
" StartGCP: %u StopGCP: %u"
|
||||
" #Records: %u #LogRecords: %u"
|
||||
" Data: %u bytes Log: %u bytes",
|
||||
theData[2], refToNode(theData[1]),
|
||||
theData[3], theData[4], theData[6], theData[8],
|
||||
|
@ -640,5 +640,9 @@ const GsnName SignalNames [] = {
|
||||
|
||||
,{ GSN_ROUTE_ORD, "ROUTE_ORD" }
|
||||
,{ GSN_NODE_VERSION_REP, "NODE_VERSION_REP" }
|
||||
|
||||
,{ GSN_PREPARE_COPY_FRAG_REQ, "PREPARE_COPY_FRAG_REQ" }
|
||||
,{ GSN_PREPARE_COPY_FRAG_REF, "PREPARE_COPY_FRAG_REF" }
|
||||
,{ GSN_PREPARE_COPY_FRAG_CONF, "PREPARE_COPY_FRAG_CONF" }
|
||||
};
|
||||
const unsigned short NO_OF_SIGNAL_NAMES = sizeof(SignalNames)/sizeof(GsnName);
|
||||
|
@ -51,11 +51,11 @@ printTCKEYCONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receive
|
||||
(TcKeyConf::getMarkerFlag(confInfo) == 0)?"false":"true");
|
||||
fprintf(output, "Operations:\n");
|
||||
for(i = 0; i < noOfOp; i++) {
|
||||
if(sig->operations[i].attrInfoLen > TcKeyConf::SimpleReadBit)
|
||||
if(sig->operations[i].attrInfoLen > TcKeyConf::DirtyReadBit)
|
||||
fprintf(output,
|
||||
" apiOperationPtr: H'%.8x, simplereadnode: %u\n",
|
||||
sig->operations[i].apiOperationPtr,
|
||||
sig->operations[i].attrInfoLen & (~TcKeyConf::SimpleReadBit));
|
||||
sig->operations[i].attrInfoLen & (~TcKeyConf::DirtyReadBit));
|
||||
else
|
||||
fprintf(output,
|
||||
" apiOperationPtr: H'%.8x, attrInfoLen: %u\n",
|
||||
|
@ -24,7 +24,8 @@ libgeneral_la_SOURCES = \
|
||||
uucode.c random.c version.c \
|
||||
strdup.c \
|
||||
ConfigValues.cpp ndb_init.c basestring_vsnprintf.c \
|
||||
Bitmask.cpp
|
||||
Bitmask.cpp \
|
||||
ndb_rand.c
|
||||
|
||||
EXTRA_PROGRAMS = testBitmask
|
||||
testBitmask_SOURCES = testBitmask.cpp
|
||||
|
40
storage/ndb/src/common/util/ndb_rand.c
Normal file
40
storage/ndb/src/common/util/ndb_rand.c
Normal file
@ -0,0 +1,40 @@
|
||||
/* Copyright (C) 2003 MySQL AB
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; version 2 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
#include <ndb_rand.h>
|
||||
|
||||
static unsigned long next= 1;
|
||||
|
||||
/**
|
||||
* ndb_rand
|
||||
*
|
||||
* constant time, cheap, pseudo-random number generator.
|
||||
*
|
||||
* NDB_RAND_MAX assumed to be 32767
|
||||
*
|
||||
* This is the POSIX example for "generating the same sequence on
|
||||
* different machines". Although that is not one of our requirements.
|
||||
*/
|
||||
int ndb_rand(void)
|
||||
{
|
||||
next= next * 1103515245 + 12345;
|
||||
return((unsigned)(next/65536) % 32768);
|
||||
}
|
||||
|
||||
void ndb_srand(unsigned seed)
|
||||
{
|
||||
next= seed;
|
||||
}
|
||||
|
@ -145,7 +145,7 @@ CPCDAPISession::CPCDAPISession(NDB_SOCKET_TYPE sock,
|
||||
: SocketServer::Session(sock)
|
||||
, m_cpcd(cpcd)
|
||||
{
|
||||
m_input = new SocketInputStream(sock);
|
||||
m_input = new SocketInputStream(sock, 7*24*60*60000);
|
||||
m_output = new SocketOutputStream(sock);
|
||||
m_parser = new Parser<CPCDAPISession>(commands, *m_input, true, true, true);
|
||||
}
|
||||
|
@ -3,15 +3,15 @@ Next NDBCNTR 1002
|
||||
Next NDBFS 2000
|
||||
Next DBACC 3002
|
||||
Next DBTUP 4029
|
||||
Next DBLQH 5045
|
||||
Next DBLQH 5047
|
||||
Next DBDICT 6008
|
||||
Next DBDIH 7186
|
||||
Next DBTC 8053
|
||||
Next DBDIH 7193
|
||||
Next DBTC 8054
|
||||
Next CMVMI 9000
|
||||
Next BACKUP 10038
|
||||
Next DBUTIL 11002
|
||||
Next DBTUX 12008
|
||||
Next SUMA 13001
|
||||
Next SUMA 13034
|
||||
|
||||
TESTING NODE FAILURE, ARBITRATION
|
||||
---------------------------------
|
||||
@ -157,6 +157,9 @@ And crash when all have "not" been sent
|
||||
7027: Crash in master when changing state to LCP_TAB_SAVED
|
||||
7018: Crash in master when changing state to LCP_TAB_SAVED
|
||||
|
||||
7191: Crash when receiving LCP_COMPLETE_REP
|
||||
7192: Crash in setLcpActiveStatusStart - when dead node missed to LCP's
|
||||
|
||||
ERROR CODES FOR TESTING NODE FAILURE, FAILURE IN COPY FRAGMENT PROCESS:
|
||||
-----------------------------------------------------------------------
|
||||
|
||||
@ -183,6 +186,8 @@ handling in DBTC to ensure that node failures are also well handled in
|
||||
time-out handling. They can also be used to test multiple node failure
|
||||
handling.
|
||||
|
||||
5045: Crash in PREPARE_COPY_FRAG_REQ
|
||||
5046: Crash if LQHKEYREQ (NrCopy) comes when frag-state is incorrect
|
||||
|
||||
ERROR CODES FOR TESTING TIME-OUT HANDLING IN DBLQH
|
||||
-------------------------------------------------
|
||||
@ -248,6 +253,8 @@ Delay execution of ABORTCONF signal 2 seconds to generate time-out.
|
||||
|
||||
8050: Send ZABORT_TIMEOUT_BREAK delayed
|
||||
|
||||
8053: Crash in timeOutFoundLab, state CS_WAIT_COMMIT_CONF
|
||||
|
||||
ERROR CODES FOR TESTING TIME-OUT HANDLING IN DBTC
|
||||
-------------------------------------------------
|
||||
|
||||
|
@ -1124,6 +1124,38 @@ Cmvmi::execDUMP_STATE_ORD(Signal* signal)
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
if (arg == 9999)
|
||||
{
|
||||
Uint32 delay = 1000;
|
||||
switch(signal->getLength()){
|
||||
case 1:
|
||||
break;
|
||||
case 2:
|
||||
delay = signal->theData[1];
|
||||
break;
|
||||
default:{
|
||||
Uint32 dmin = signal->theData[1];
|
||||
Uint32 dmax = signal->theData[2];
|
||||
delay = dmin + (rand() % (dmax - dmin));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
signal->theData[0] = 9999;
|
||||
if (delay == 0)
|
||||
{
|
||||
execNDB_TAMPER(signal);
|
||||
}
|
||||
else if (delay < 10)
|
||||
{
|
||||
sendSignal(reference(), GSN_NDB_TAMPER, signal, 1, JBB);
|
||||
}
|
||||
else
|
||||
{
|
||||
sendSignalWithDelay(reference(), GSN_NDB_TAMPER, signal, delay, 1);
|
||||
}
|
||||
}
|
||||
}//Cmvmi::execDUMP_STATE_ORD()
|
||||
|
||||
void
|
||||
|
@ -318,6 +318,7 @@ public:
|
||||
Uint8 noOfStartedChkpt;
|
||||
|
||||
MasterLCPConf::State lcpStateAtTakeOver;
|
||||
Uint32 m_remove_node_from_table_lcp_id;
|
||||
};
|
||||
typedef Ptr<NodeRecord> NodeRecordPtr;
|
||||
/**********************************************************************/
|
||||
@ -544,7 +545,8 @@ public:
|
||||
TO_WAIT_ENDING = 21,
|
||||
ENDING = 22,
|
||||
|
||||
STARTING_LOCAL_FRAGMENTS = 24
|
||||
STARTING_LOCAL_FRAGMENTS = 24,
|
||||
PREPARE_COPY = 25
|
||||
};
|
||||
enum ToSlaveStatus {
|
||||
TO_SLAVE_IDLE = 0,
|
||||
@ -555,6 +557,7 @@ public:
|
||||
TO_SLAVE_COPY_COMPLETED = 5
|
||||
};
|
||||
Uint32 startGci;
|
||||
Uint32 maxPage;
|
||||
Uint32 toCopyNode;
|
||||
Uint32 toCurrentFragid;
|
||||
Uint32 toCurrentReplica;
|
||||
@ -671,6 +674,8 @@ private:
|
||||
void execNODE_FAILREP(Signal *);
|
||||
void execCOPY_FRAGCONF(Signal *);
|
||||
void execCOPY_FRAGREF(Signal *);
|
||||
void execPREPARE_COPY_FRAG_REF(Signal*);
|
||||
void execPREPARE_COPY_FRAG_CONF(Signal*);
|
||||
void execDIADDTABREQ(Signal *);
|
||||
void execDIGETNODESREQ(Signal *);
|
||||
void execDIRELEASEREQ(Signal *);
|
||||
@ -1113,6 +1118,7 @@ private:
|
||||
void sendStartTo(Signal *, Uint32 takeOverPtr);
|
||||
void startNextCopyFragment(Signal *, Uint32 takeOverPtr);
|
||||
void toCopyFragLab(Signal *, Uint32 takeOverPtr);
|
||||
void toStartCopyFrag(Signal *, TakeOverRecordPtr);
|
||||
void startHsAddFragConfLab(Signal *);
|
||||
void prepareSendCreateFragReq(Signal *, Uint32 takeOverPtr);
|
||||
void sendUpdateTo(Signal *, Uint32 takeOverPtr, Uint32 updateState);
|
||||
|
@ -259,6 +259,11 @@ Dbdih::Dbdih(Block_context& ctx):
|
||||
|
||||
addRecSignal(GSN_START_FRAGREF,
|
||||
&Dbdih::execSTART_FRAGREF);
|
||||
|
||||
addRecSignal(GSN_PREPARE_COPY_FRAG_REF,
|
||||
&Dbdih::execPREPARE_COPY_FRAG_REF);
|
||||
addRecSignal(GSN_PREPARE_COPY_FRAG_CONF,
|
||||
&Dbdih::execPREPARE_COPY_FRAG_CONF);
|
||||
|
||||
apiConnectRecord = 0;
|
||||
connectRecord = 0;
|
||||
|
@ -3155,6 +3155,94 @@ void Dbdih::toCopyFragLab(Signal* signal,
|
||||
TakeOverRecordPtr takeOverPtr;
|
||||
RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
|
||||
|
||||
/**
|
||||
* Inform starting node that TakeOver is about to start
|
||||
*/
|
||||
Uint32 nodeId = takeOverPtr.p->toStartingNode;
|
||||
|
||||
Uint32 version = getNodeInfo(nodeId).m_version;
|
||||
if (ndb_check_prep_copy_frag_version(version))
|
||||
{
|
||||
jam();
|
||||
TabRecordPtr tabPtr;
|
||||
tabPtr.i = takeOverPtr.p->toCurrentTabref;
|
||||
ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
|
||||
|
||||
FragmentstorePtr fragPtr;
|
||||
getFragstore(tabPtr.p, takeOverPtr.p->toCurrentFragid, fragPtr);
|
||||
Uint32 nodes[MAX_REPLICAS];
|
||||
extractNodeInfo(fragPtr.p, nodes);
|
||||
|
||||
PrepareCopyFragReq* req= (PrepareCopyFragReq*)signal->getDataPtrSend();
|
||||
req->senderRef = reference();
|
||||
req->senderData = takeOverPtrI;
|
||||
req->tableId = takeOverPtr.p->toCurrentTabref;
|
||||
req->fragId = takeOverPtr.p->toCurrentFragid;
|
||||
req->copyNodeId = nodes[0]; // Src
|
||||
req->startingNodeId = takeOverPtr.p->toStartingNode; // Dst
|
||||
Uint32 ref = calcLqhBlockRef(takeOverPtr.p->toStartingNode);
|
||||
|
||||
sendSignal(ref, GSN_PREPARE_COPY_FRAG_REQ, signal,
|
||||
PrepareCopyFragReq::SignalLength, JBB);
|
||||
|
||||
takeOverPtr.p->toMasterStatus = TakeOverRecord::PREPARE_COPY;
|
||||
return;
|
||||
}
|
||||
|
||||
takeOverPtr.p->maxPage = RNIL;
|
||||
toStartCopyFrag(signal, takeOverPtr);
|
||||
}
|
||||
|
||||
void
|
||||
Dbdih::execPREPARE_COPY_FRAG_REF(Signal* signal)
|
||||
{
|
||||
jamEntry();
|
||||
PrepareCopyFragRef ref = *(PrepareCopyFragRef*)signal->getDataPtr();
|
||||
|
||||
TakeOverRecordPtr takeOverPtr;
|
||||
RETURN_IF_TAKE_OVER_INTERRUPTED(ref.senderData, takeOverPtr);
|
||||
|
||||
ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::PREPARE_COPY);
|
||||
|
||||
/**
|
||||
* Treat this as copy frag ref
|
||||
*/
|
||||
CopyFragRef * cfref = (CopyFragRef*)signal->getDataPtrSend();
|
||||
cfref->userPtr = ref.senderData;
|
||||
cfref->startingNodeId = ref.startingNodeId;
|
||||
cfref->errorCode = ref.errorCode;
|
||||
cfref->tableId = ref.tableId;
|
||||
cfref->fragId = ref.fragId;
|
||||
cfref->sendingNodeId = ref.copyNodeId;
|
||||
takeOverPtr.p->toMasterStatus = TakeOverRecord::COPY_FRAG;
|
||||
execCOPY_FRAGREF(signal);
|
||||
}
|
||||
|
||||
void
|
||||
Dbdih::execPREPARE_COPY_FRAG_CONF(Signal* signal)
|
||||
{
|
||||
PrepareCopyFragConf conf = *(PrepareCopyFragConf*)signal->getDataPtr();
|
||||
|
||||
TakeOverRecordPtr takeOverPtr;
|
||||
RETURN_IF_TAKE_OVER_INTERRUPTED(conf.senderData, takeOverPtr);
|
||||
|
||||
Uint32 version = getNodeInfo(refToNode(conf.senderRef)).m_version;
|
||||
if (ndb_check_prep_copy_frag_version(version) >= 2)
|
||||
{
|
||||
jam();
|
||||
takeOverPtr.p->maxPage = conf.maxPageNo;
|
||||
}
|
||||
else
|
||||
{
|
||||
jam();
|
||||
takeOverPtr.p->maxPage = RNIL;
|
||||
}
|
||||
toStartCopyFrag(signal, takeOverPtr);
|
||||
}
|
||||
|
||||
void
|
||||
Dbdih::toStartCopyFrag(Signal* signal, TakeOverRecordPtr takeOverPtr)
|
||||
{
|
||||
CreateReplicaRecordPtr createReplicaPtr;
|
||||
createReplicaPtr.i = 0;
|
||||
ptrAss(createReplicaPtr, createReplicaRecord);
|
||||
@ -3178,8 +3266,8 @@ void Dbdih::toCopyFragLab(Signal* signal,
|
||||
createReplicaPtr.p->hotSpareUse = true;
|
||||
createReplicaPtr.p->dataNodeId = takeOverPtr.p->toStartingNode;
|
||||
|
||||
prepareSendCreateFragReq(signal, takeOverPtrI);
|
||||
}//Dbdih::toCopyFragLab()
|
||||
prepareSendCreateFragReq(signal, takeOverPtr.i);
|
||||
}//Dbdih::toStartCopy()
|
||||
|
||||
void Dbdih::prepareSendCreateFragReq(Signal* signal, Uint32 takeOverPtrI)
|
||||
{
|
||||
@ -3412,10 +3500,12 @@ void Dbdih::execCREATE_FRAGCONF(Signal* signal)
|
||||
copyFragReq->schemaVersion = tabPtr.p->schemaVersion;
|
||||
copyFragReq->distributionKey = fragPtr.p->distributionKey;
|
||||
copyFragReq->gci = gci;
|
||||
copyFragReq->nodeCount = extractNodeInfo(fragPtr.p,
|
||||
copyFragReq->nodeList);
|
||||
Uint32 len = copyFragReq->nodeCount =
|
||||
extractNodeInfo(fragPtr.p,
|
||||
copyFragReq->nodeList);
|
||||
copyFragReq->nodeList[len] = takeOverPtr.p->maxPage;
|
||||
sendSignal(ref, GSN_COPY_FRAGREQ, signal,
|
||||
CopyFragReq::SignalLength + copyFragReq->nodeCount, JBB);
|
||||
CopyFragReq::SignalLength + len, JBB);
|
||||
} else {
|
||||
ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::COMMIT_CREATE);
|
||||
jam();
|
||||
@ -4576,13 +4666,22 @@ void Dbdih::checkTakeOverInMasterStartNodeFailure(Signal* signal,
|
||||
ok = true;
|
||||
jam();
|
||||
//-----------------------------------------------------------------------
|
||||
// The starting node will discover the problem. We will receive either
|
||||
// The copying node will discover the problem. We will receive either
|
||||
// COPY_FRAGREQ or COPY_FRAGCONF and then we can release the take over
|
||||
// record and end the process. If the copying node should also die then
|
||||
// we will try to send prepare create fragment and will then discover
|
||||
// that the starting node has failed.
|
||||
//-----------------------------------------------------------------------
|
||||
break;
|
||||
case TakeOverRecord::PREPARE_COPY:
|
||||
ok = true;
|
||||
jam();
|
||||
/**
|
||||
* We're waiting for the starting node...which just died...
|
||||
* endTakeOver
|
||||
*/
|
||||
endTakeOver(takeOverPtr.i);
|
||||
break;
|
||||
case TakeOverRecord::COPY_ACTIVE:
|
||||
ok = true;
|
||||
jam();
|
||||
@ -5069,6 +5168,18 @@ void Dbdih::startRemoveFailedNode(Signal* signal, NodeRecordPtr failedNodePtr)
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* If node has node complete LCP
|
||||
* we need to remove it as undo might not be complete
|
||||
* bug#31257
|
||||
*/
|
||||
failedNodePtr.p->m_remove_node_from_table_lcp_id = RNIL;
|
||||
if (c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.isWaitingFor(failedNodePtr.i))
|
||||
{
|
||||
jam();
|
||||
failedNodePtr.p->m_remove_node_from_table_lcp_id = SYSFILE->latestLCP_ID;
|
||||
}
|
||||
|
||||
jam();
|
||||
signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
|
||||
signal->theData[1] = failedNodePtr.i;
|
||||
@ -5710,6 +5821,11 @@ void Dbdih::removeNodeFromTable(Signal* signal,
|
||||
return;
|
||||
}//if
|
||||
|
||||
NodeRecordPtr nodePtr;
|
||||
nodePtr.i = nodeId;
|
||||
ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
|
||||
const Uint32 lcpId = nodePtr.p->m_remove_node_from_table_lcp_id;
|
||||
|
||||
/**
|
||||
* For each fragment
|
||||
*/
|
||||
@ -5717,7 +5833,6 @@ void Dbdih::removeNodeFromTable(Signal* signal,
|
||||
Uint32 noOfRemovedLcpReplicas = 0; // No of replicas in LCP removed
|
||||
Uint32 noOfRemainingLcpReplicas = 0;// No of replicas in LCP remaining
|
||||
|
||||
//const Uint32 lcpId = SYSFILE->latestLCP_ID;
|
||||
const bool lcpOngoingFlag = (tabPtr.p->tabLcpStatus== TabRecord::TLS_ACTIVE);
|
||||
const bool unlogged = (tabPtr.p->tabStorage != TabRecord::ST_NORMAL);
|
||||
|
||||
@ -5752,6 +5867,23 @@ void Dbdih::removeNodeFromTable(Signal* signal,
|
||||
noOfRemovedLcpReplicas ++;
|
||||
replicaPtr.p->lcpOngoingFlag = false;
|
||||
}
|
||||
|
||||
if (lcpId != RNIL)
|
||||
{
|
||||
jam();
|
||||
Uint32 lcpNo = prevLcpNo(replicaPtr.p->nextLcp);
|
||||
if (replicaPtr.p->lcpStatus[lcpNo] == ZVALID &&
|
||||
replicaPtr.p->lcpId[lcpNo] == SYSFILE->latestLCP_ID)
|
||||
{
|
||||
jam();
|
||||
replicaPtr.p->lcpStatus[lcpNo] = ZINVALID;
|
||||
replicaPtr.p->lcpId[lcpNo] = 0;
|
||||
replicaPtr.p->nextLcp = lcpNo;
|
||||
ndbout_c("REMOVING lcp: %u from table: %u frag: %u node: %u",
|
||||
SYSFILE->latestLCP_ID,
|
||||
tabPtr.i, fragNo, nodeId);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!found)
|
||||
@ -10898,6 +11030,8 @@ void Dbdih::execLCP_COMPLETE_REP(Signal* signal)
|
||||
{
|
||||
jamEntry();
|
||||
|
||||
CRASH_INSERTION(7191);
|
||||
|
||||
#if 0
|
||||
g_eventLogger.info("LCP_COMPLETE_REP");
|
||||
printLCP_COMPLETE_REP(stdout,
|
||||
@ -13657,6 +13791,7 @@ void Dbdih::setLcpActiveStatusStart(Signal* signal)
|
||||
// It must be taken over with the copy fragment process after a system
|
||||
// crash. We indicate this by setting the active status to TAKE_OVER.
|
||||
/*-------------------------------------------------------------------*/
|
||||
c_lcpState.m_participatingLQH.set(nodePtr.i);
|
||||
nodePtr.p->activeStatus = Sysfile::NS_TakeOver;
|
||||
//break; // Fall through
|
||||
case Sysfile::NS_TakeOver:{
|
||||
@ -13699,6 +13834,7 @@ void Dbdih::setLcpActiveStatusStart(Signal* signal)
|
||||
break;
|
||||
case Sysfile::NS_ActiveMissed_2:
|
||||
jam();
|
||||
CRASH_INSERTION(7192);
|
||||
if ((nodePtr.p->nodeStatus == NodeRecord::ALIVE) &&
|
||||
(!nodePtr.p->copyCompleted)) {
|
||||
jam();
|
||||
|
@ -2025,7 +2025,6 @@ public:
|
||||
Uint8 reclenAiLqhkey;
|
||||
Uint8 m_offset_current_keybuf;
|
||||
Uint8 replicaType;
|
||||
Uint8 simpleRead;
|
||||
Uint8 seqNoReplica;
|
||||
Uint8 tcNodeFailrec;
|
||||
Uint8 m_disk_table;
|
||||
@ -2145,6 +2144,7 @@ private:
|
||||
void execSTORED_PROCCONF(Signal* signal);
|
||||
void execSTORED_PROCREF(Signal* signal);
|
||||
void execCOPY_FRAGREQ(Signal* signal);
|
||||
void execPREPARE_COPY_FRAG_REQ(Signal* signal);
|
||||
void execUPDATE_FRAG_DIST_KEY_ORD(Signal*);
|
||||
void execCOPY_ACTIVEREQ(Signal* signal);
|
||||
void execCOPY_STATEREQ(Signal* signal);
|
||||
|
@ -310,6 +310,9 @@ Dblqh::Dblqh(Block_context& ctx):
|
||||
addRecSignal(GSN_UPDATE_FRAG_DIST_KEY_ORD,
|
||||
&Dblqh::execUPDATE_FRAG_DIST_KEY_ORD);
|
||||
|
||||
addRecSignal(GSN_PREPARE_COPY_FRAG_REQ,
|
||||
&Dblqh::execPREPARE_COPY_FRAG_REQ);
|
||||
|
||||
initData();
|
||||
|
||||
#ifdef VM_TRACE
|
||||
|
@ -3496,7 +3496,6 @@ void Dblqh::execLQHKEYREQ(Signal* signal)
|
||||
regTcPtr->dirtyOp = LqhKeyReq::getDirtyFlag(Treqinfo);
|
||||
regTcPtr->opExec = LqhKeyReq::getInterpretedFlag(Treqinfo);
|
||||
regTcPtr->opSimple = LqhKeyReq::getSimpleFlag(Treqinfo);
|
||||
regTcPtr->simpleRead = op == ZREAD && regTcPtr->opSimple;
|
||||
regTcPtr->seqNoReplica = LqhKeyReq::getSeqNoReplica(Treqinfo);
|
||||
UintR TreclenAiLqhkey = LqhKeyReq::getAIInLqhKeyReq(Treqinfo);
|
||||
regTcPtr->apiVersionNo = 0;
|
||||
@ -3513,9 +3512,15 @@ void Dblqh::execLQHKEYREQ(Signal* signal)
|
||||
regTcPtr->lockType =
|
||||
op == ZREAD_EX ? ZUPDATE : (Operation_t) op == ZWRITE ? ZINSERT : (Operation_t) op;
|
||||
}
|
||||
|
||||
if (regTcPtr->dirtyOp)
|
||||
{
|
||||
ndbrequire(regTcPtr->opSimple);
|
||||
}
|
||||
|
||||
CRASH_INSERTION2(5041, regTcPtr->simpleRead &&
|
||||
refToNode(signal->senderBlockRef()) != cownNodeid);
|
||||
CRASH_INSERTION2(5041, (op == ZREAD &&
|
||||
(regTcPtr->opSimple || regTcPtr->dirtyOp) &&
|
||||
refToNode(signal->senderBlockRef()) != cownNodeid));
|
||||
|
||||
regTcPtr->reclenAiLqhkey = TreclenAiLqhkey;
|
||||
regTcPtr->currReclenAi = TreclenAiLqhkey;
|
||||
@ -3665,6 +3670,7 @@ void Dblqh::execLQHKEYREQ(Signal* signal)
|
||||
{
|
||||
ndbout_c("fragptr.p->fragStatus: %d",
|
||||
fragptr.p->fragStatus);
|
||||
CRASH_INSERTION(5046);
|
||||
}
|
||||
ndbassert(fragptr.p->fragStatus == Fragrecord::ACTIVE_CREATION);
|
||||
fragptr.p->m_copy_started_state = Fragrecord::AC_NR_COPY;
|
||||
@ -3687,8 +3693,8 @@ void Dblqh::execLQHKEYREQ(Signal* signal)
|
||||
Uint8 TdistKey = LqhKeyReq::getDistributionKey(TtotReclenAi);
|
||||
if ((tfragDistKey != TdistKey) &&
|
||||
(regTcPtr->seqNoReplica == 0) &&
|
||||
(regTcPtr->dirtyOp == ZFALSE) &&
|
||||
(regTcPtr->simpleRead == ZFALSE)) {
|
||||
(regTcPtr->dirtyOp == ZFALSE))
|
||||
{
|
||||
/* ----------------------------------------------------------------------
|
||||
* WE HAVE DIFFERENT OPINION THAN THE DIH THAT STARTED THE TRANSACTION.
|
||||
* THE REASON COULD BE THAT THIS IS AN OLD DISTRIBUTION WHICH IS NO LONGER
|
||||
@ -4778,7 +4784,18 @@ void Dblqh::tupkeyConfLab(Signal* signal)
|
||||
|
||||
TRACE_OP(regTcPtr, "TUPKEYCONF");
|
||||
|
||||
if (regTcPtr->simpleRead) {
|
||||
if (readLen != 0)
|
||||
{
|
||||
jam();
|
||||
|
||||
/* SET BIT 15 IN REQINFO */
|
||||
LqhKeyReq::setApplicationAddressFlag(regTcPtr->reqinfo, 1);
|
||||
regTcPtr->readlenAi = readLen;
|
||||
}//if
|
||||
|
||||
if (regTcPtr->operation == ZREAD &&
|
||||
(regTcPtr->opSimple || regTcPtr->dirtyOp))
|
||||
{
|
||||
jam();
|
||||
/* ----------------------------------------------------------------------
|
||||
* THE OPERATION IS A SIMPLE READ.
|
||||
@ -4792,14 +4809,6 @@ void Dblqh::tupkeyConfLab(Signal* signal)
|
||||
commitContinueAfterBlockedLab(signal);
|
||||
return;
|
||||
}//if
|
||||
if (readLen != 0)
|
||||
{
|
||||
jam();
|
||||
|
||||
/* SET BIT 15 IN REQINFO */
|
||||
LqhKeyReq::setApplicationAddressFlag(regTcPtr->reqinfo, 1);
|
||||
regTcPtr->readlenAi = readLen;
|
||||
}//if
|
||||
regTcPtr->totSendlenAi = writeLen;
|
||||
ndbrequire(regTcPtr->totSendlenAi == regTcPtr->currTupAiLen);
|
||||
|
||||
@ -5178,12 +5187,15 @@ void Dblqh::packLqhkeyreqLab(Signal* signal)
|
||||
/* */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
sendLqhkeyconfTc(signal, regTcPtr->tcBlockref);
|
||||
if (regTcPtr->dirtyOp != ZTRUE) {
|
||||
if (! (regTcPtr->dirtyOp ||
|
||||
(regTcPtr->operation == ZREAD && regTcPtr->opSimple)))
|
||||
{
|
||||
jam();
|
||||
regTcPtr->transactionState = TcConnectionrec::PREPARED;
|
||||
releaseOprec(signal);
|
||||
} else {
|
||||
jam();
|
||||
|
||||
/*************************************************************>*/
|
||||
/* DIRTY WRITES ARE USED IN TWO SITUATIONS. THE FIRST */
|
||||
/* SITUATION IS WHEN THEY ARE USED TO UPDATE COUNTERS AND*/
|
||||
@ -6406,8 +6418,8 @@ void Dblqh::commitContinueAfterBlockedLab(Signal* signal)
|
||||
Ptr<TcConnectionrec> regTcPtr = tcConnectptr;
|
||||
Ptr<Fragrecord> regFragptr = fragptr;
|
||||
Uint32 operation = regTcPtr.p->operation;
|
||||
Uint32 simpleRead = regTcPtr.p->simpleRead;
|
||||
Uint32 dirtyOp = regTcPtr.p->dirtyOp;
|
||||
Uint32 opSimple = regTcPtr.p->opSimple;
|
||||
if (regTcPtr.p->activeCreat != Fragrecord::AC_IGNORED) {
|
||||
if (operation != ZREAD) {
|
||||
TupCommitReq * const tupCommitReq =
|
||||
@ -6465,20 +6477,29 @@ void Dblqh::commitContinueAfterBlockedLab(Signal* signal)
|
||||
EXECUTE_DIRECT(acc, GSN_ACC_COMMITREQ, signal, 1);
|
||||
}
|
||||
|
||||
if (simpleRead) {
|
||||
if (dirtyOp)
|
||||
{
|
||||
jam();
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/*THE OPERATION WAS A SIMPLE READ THUS THE COMMIT PHASE IS ONLY NEEDED TO */
|
||||
/*RELEASE THE LOCKS. AT THIS POINT IN THE CODE THE LOCKS ARE RELEASED AND WE */
|
||||
/*ARE IN A POSITION TO SEND LQHKEYCONF TO TC. WE WILL ALSO RELEASE ALL */
|
||||
/*RESOURCES BELONGING TO THIS OPERATION SINCE NO MORE WORK WILL BE */
|
||||
/*PERFORMED. */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/**
|
||||
* The dirtyRead does not send anything but TRANSID_AI from LDM
|
||||
*/
|
||||
fragptr = regFragptr;
|
||||
tcConnectptr = regTcPtr;
|
||||
cleanUp(signal);
|
||||
return;
|
||||
}//if
|
||||
}
|
||||
|
||||
/**
|
||||
* The simpleRead will send a LQHKEYCONF
|
||||
* but have already released the locks
|
||||
*/
|
||||
if (opSimple)
|
||||
{
|
||||
fragptr = regFragptr;
|
||||
tcConnectptr = regTcPtr;
|
||||
packLqhkeyreqLab(signal);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}//if
|
||||
jamEntry();
|
||||
@ -7088,7 +7109,7 @@ void Dblqh::abortStateHandlerLab(Signal* signal)
|
||||
/* ------------------------------------------------------------------------- */
|
||||
return;
|
||||
}//if
|
||||
if (regTcPtr->simpleRead) {
|
||||
if (regTcPtr->opSimple) {
|
||||
jam();
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/*A SIMPLE READ IS CURRENTLY RELEASING THE LOCKS OR WAITING FOR ACCESS TO */
|
||||
@ -7356,7 +7377,8 @@ void Dblqh::continueAbortLab(Signal* signal)
|
||||
void Dblqh::continueAfterLogAbortWriteLab(Signal* signal)
|
||||
{
|
||||
TcConnectionrec * const regTcPtr = tcConnectptr.p;
|
||||
if (regTcPtr->simpleRead) {
|
||||
if (regTcPtr->operation == ZREAD && regTcPtr->dirtyOp)
|
||||
{
|
||||
jam();
|
||||
TcKeyRef * const tcKeyRef = (TcKeyRef *) signal->getDataPtrSend();
|
||||
|
||||
@ -10062,6 +10084,86 @@ Dblqh::calculateHash(Uint32 tableId, const Uint32* src)
|
||||
return md5_hash(Tmp, keyLen);
|
||||
}//Dblqh::calculateHash()
|
||||
|
||||
/**
|
||||
* PREPARE COPY FRAG REQ
|
||||
*/
|
||||
void
|
||||
Dblqh::execPREPARE_COPY_FRAG_REQ(Signal* signal)
|
||||
{
|
||||
jamEntry();
|
||||
PrepareCopyFragReq req = *(PrepareCopyFragReq*)signal->getDataPtr();
|
||||
|
||||
CRASH_INSERTION(5045);
|
||||
|
||||
tabptr.i = req.tableId;
|
||||
ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
|
||||
|
||||
Uint32 max_page = RNIL;
|
||||
|
||||
if (getOwnNodeId() != req.startingNodeId)
|
||||
{
|
||||
jam();
|
||||
/**
|
||||
* This is currently dead code...
|
||||
* but is provided so we can impl. a better scan+delete on
|
||||
* starting node wo/ having to change running node
|
||||
*/
|
||||
ndbrequire(getOwnNodeId() == req.copyNodeId);
|
||||
c_tup->get_frag_info(req.tableId, req.fragId, &max_page);
|
||||
|
||||
PrepareCopyFragConf* conf = (PrepareCopyFragConf*)signal->getDataPtrSend();
|
||||
conf->senderData = req.senderData;
|
||||
conf->senderRef = reference();
|
||||
conf->tableId = req.tableId;
|
||||
conf->fragId = req.fragId;
|
||||
conf->copyNodeId = req.copyNodeId;
|
||||
conf->startingNodeId = req.startingNodeId;
|
||||
conf->maxPageNo = max_page;
|
||||
sendSignal(req.senderRef, GSN_PREPARE_COPY_FRAG_CONF,
|
||||
signal, PrepareCopyFragConf::SignalLength, JBB);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (! DictTabInfo::isOrderedIndex(tabptr.p->tableType))
|
||||
{
|
||||
jam();
|
||||
ndbrequire(getFragmentrec(signal, req.fragId));
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
if (cstartType == NodeState::ST_SYSTEM_RESTART)
|
||||
{
|
||||
jam();
|
||||
signal->theData[0] = fragptr.p->tabRef;
|
||||
signal->theData[1] = fragptr.p->fragId;
|
||||
sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
fragptr.p->m_copy_started_state = Fragrecord::AC_IGNORED;
|
||||
fragptr.p->fragStatus = Fragrecord::ACTIVE_CREATION;
|
||||
fragptr.p->logFlag = Fragrecord::STATE_FALSE;
|
||||
|
||||
c_tup->get_frag_info(req.tableId, req.fragId, &max_page);
|
||||
}
|
||||
|
||||
PrepareCopyFragConf* conf = (PrepareCopyFragConf*)signal->getDataPtrSend();
|
||||
conf->senderData = req.senderData;
|
||||
conf->senderRef = reference();
|
||||
conf->tableId = req.tableId;
|
||||
conf->fragId = req.fragId;
|
||||
conf->copyNodeId = req.copyNodeId;
|
||||
conf->startingNodeId = req.startingNodeId;
|
||||
conf->maxPageNo = max_page;
|
||||
sendSignal(req.senderRef, GSN_PREPARE_COPY_FRAG_CONF,
|
||||
signal, PrepareCopyFragConf::SignalLength, JBB);
|
||||
}
|
||||
|
||||
/* *************************************** */
|
||||
/* COPY_FRAGREQ: Start copying a fragment */
|
||||
/* *************************************** */
|
||||
@ -10097,6 +10199,13 @@ void Dblqh::execCOPY_FRAGREQ(Signal* signal)
|
||||
for (i = 0; i<nodeCount; i++)
|
||||
nodemask.set(copyFragReq->nodeList[i]);
|
||||
}
|
||||
Uint32 maxPage = copyFragReq->nodeList[nodeCount];
|
||||
Uint32 version = getNodeInfo(refToNode(userRef)).m_version;
|
||||
if (ndb_check_prep_copy_frag_version(version) < 2)
|
||||
{
|
||||
jam();
|
||||
maxPage = RNIL;
|
||||
}
|
||||
|
||||
if (DictTabInfo::isOrderedIndex(tabptr.p->tableType)) {
|
||||
jam();
|
||||
@ -10172,14 +10281,15 @@ void Dblqh::execCOPY_FRAGREQ(Signal* signal)
|
||||
req->requestInfo = 0;
|
||||
AccScanReq::setLockMode(req->requestInfo, 0);
|
||||
AccScanReq::setReadCommittedFlag(req->requestInfo, 0);
|
||||
AccScanReq::setNRScanFlag(req->requestInfo, gci ? 1 : 0);
|
||||
AccScanReq::setNRScanFlag(req->requestInfo, 1);
|
||||
AccScanReq::setNoDiskScanFlag(req->requestInfo, 1);
|
||||
|
||||
req->transId1 = tcConnectptr.p->transid[0];
|
||||
req->transId2 = tcConnectptr.p->transid[1];
|
||||
req->savePointId = tcConnectptr.p->savePointId;
|
||||
req->maxPage = maxPage;
|
||||
sendSignal(scanptr.p->scanBlockref, GSN_ACC_SCANREQ, signal,
|
||||
AccScanReq::SignalLength, JBB);
|
||||
AccScanReq::SignalLength + 1, JBB);
|
||||
|
||||
if (! nodemask.isclear())
|
||||
{
|
||||
@ -14084,11 +14194,16 @@ void Dblqh::execSTART_FRAGREQ(Signal* signal)
|
||||
fragptr.p->fragStatus = Fragrecord::ACTIVE_CREATION;
|
||||
}
|
||||
|
||||
c_tup->disk_restart_mark_no_lcp(tabptr.i, fragId);
|
||||
c_tup->disk_restart_lcp_id(tabptr.i, fragId, RNIL);
|
||||
jamEntry();
|
||||
|
||||
return;
|
||||
}//if
|
||||
}
|
||||
else
|
||||
{
|
||||
jam();
|
||||
c_tup->disk_restart_lcp_id(tabptr.i, fragId, lcpId);
|
||||
jamEntry();
|
||||
}
|
||||
|
||||
c_lcpId = (c_lcpId == 0 ? lcpId : c_lcpId);
|
||||
c_lcpId = (c_lcpId < lcpId ? c_lcpId : lcpId);
|
||||
@ -19022,7 +19137,6 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal)
|
||||
ndbout << " operation = " << tcRec.p->operation<<endl;
|
||||
ndbout << " tcNodeFailrec = " << tcRec.p->tcNodeFailrec
|
||||
<< " seqNoReplica = " << tcRec.p->seqNoReplica
|
||||
<< " simpleRead = " << tcRec.p->simpleRead
|
||||
<< endl;
|
||||
ndbout << " replicaType = " << tcRec.p->replicaType
|
||||
<< " reclenAiLqhkey = " << tcRec.p->reclenAiLqhkey
|
||||
|
@ -786,6 +786,7 @@ public:
|
||||
UintR apiConnect; /* POINTER TO API CONNECT RECORD */
|
||||
UintR nextTcConnect; /* NEXT TC RECORD*/
|
||||
Uint8 dirtyOp;
|
||||
Uint8 opSimple;
|
||||
Uint8 lastReplicaNo; /* NUMBER OF THE LAST REPLICA IN THE OPERATION */
|
||||
Uint8 noOfNodes; /* TOTAL NUMBER OF NODES IN OPERATION */
|
||||
Uint8 operation; /* OPERATION TYPE */
|
||||
@ -886,13 +887,8 @@ public:
|
||||
Uint8 opExec;
|
||||
|
||||
Uint8 unused;
|
||||
Uint8 unused1;
|
||||
|
||||
/**
|
||||
* IS THE OPERATION A SIMPLE TRANSACTION
|
||||
* 0 = NO, 1 = YES
|
||||
*/
|
||||
Uint8 opSimple;
|
||||
|
||||
//---------------------------------------------------
|
||||
// Second 16 byte cache line in second 64 byte cache
|
||||
// line. Diverse use.
|
||||
@ -1464,7 +1460,7 @@ private:
|
||||
void releaseAttrinfo();
|
||||
void releaseGcp(Signal* signal);
|
||||
void releaseKeys();
|
||||
void releaseSimpleRead(Signal*, ApiConnectRecordPtr, TcConnectRecord*);
|
||||
void releaseDirtyRead(Signal*, ApiConnectRecordPtr, TcConnectRecord*);
|
||||
void releaseDirtyWrite(Signal* signal);
|
||||
void releaseTcCon();
|
||||
void releaseTcConnectFail(Signal* signal);
|
||||
@ -1620,7 +1616,7 @@ private:
|
||||
void startphase1x010Lab(Signal* signal);
|
||||
|
||||
void lqhKeyConf_checkTransactionState(Signal * signal,
|
||||
ApiConnectRecord * const regApiPtr);
|
||||
Ptr<ApiConnectRecord> regApiPtr);
|
||||
|
||||
void checkDropTab(Signal* signal);
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <RefConvert.hpp>
|
||||
#include <ndb_limits.h>
|
||||
#include <my_sys.h>
|
||||
#include <ndb_rand.h>
|
||||
|
||||
#include <signaldata/EventReport.hpp>
|
||||
#include <signaldata/TcKeyReq.hpp>
|
||||
@ -2790,9 +2791,9 @@ void Dbtc::execTCKEYREQ(Signal* signal)
|
||||
Uint8 TNoDiskFlag = TcKeyReq::getNoDiskFlag(Treqinfo);
|
||||
Uint8 TexecuteFlag = TexecFlag;
|
||||
|
||||
regCachePtr->opSimple = TSimpleFlag;
|
||||
regCachePtr->opExec = TInterpretedFlag;
|
||||
regTcPtr->dirtyOp = TDirtyFlag;
|
||||
regTcPtr->opSimple = TSimpleFlag;
|
||||
regCachePtr->opExec = TInterpretedFlag;
|
||||
regCachePtr->distributionKeyIndicator = TDistrKeyFlag;
|
||||
regCachePtr->m_no_disk_flag = TNoDiskFlag;
|
||||
|
||||
@ -3246,9 +3247,10 @@ void Dbtc::sendlqhkeyreq(Signal* signal,
|
||||
LqhKeyReq::setScanTakeOverFlag(tslrAttrLen, regCachePtr->scanTakeOverInd);
|
||||
|
||||
Tdata10 = 0;
|
||||
sig0 = regCachePtr->opSimple;
|
||||
sig0 = regTcPtr->opSimple;
|
||||
sig1 = regTcPtr->operation;
|
||||
bool simpleRead = (sig1 == ZREAD && sig0 == ZTRUE);
|
||||
sig2 = regTcPtr->dirtyOp;
|
||||
bool dirtyRead = (sig1 == ZREAD && sig2 == ZTRUE);
|
||||
LqhKeyReq::setKeyLen(Tdata10, regCachePtr->keylen);
|
||||
LqhKeyReq::setLastReplicaNo(Tdata10, regTcPtr->lastReplicaNo);
|
||||
if (unlikely(version < NDBD_ROWID_VERSION))
|
||||
@ -3261,7 +3263,7 @@ void Dbtc::sendlqhkeyreq(Signal* signal,
|
||||
// Indicate Application Reference is present in bit 15
|
||||
/* ---------------------------------------------------------------------- */
|
||||
LqhKeyReq::setApplicationAddressFlag(Tdata10, 1);
|
||||
LqhKeyReq::setDirtyFlag(Tdata10, regTcPtr->dirtyOp);
|
||||
LqhKeyReq::setDirtyFlag(Tdata10, sig2);
|
||||
LqhKeyReq::setInterpretedFlag(Tdata10, regCachePtr->opExec);
|
||||
LqhKeyReq::setSimpleFlag(Tdata10, sig0);
|
||||
LqhKeyReq::setOperation(Tdata10, sig1);
|
||||
@ -3322,7 +3324,7 @@ void Dbtc::sendlqhkeyreq(Signal* signal,
|
||||
sig5 = regTcPtr->clientData;
|
||||
sig6 = regCachePtr->scanInfo;
|
||||
|
||||
if (! simpleRead)
|
||||
if (! dirtyRead)
|
||||
{
|
||||
regApiPtr->m_transaction_nodes.set(regTcPtr->tcNodedata[0]);
|
||||
regApiPtr->m_transaction_nodes.set(regTcPtr->tcNodedata[1]);
|
||||
@ -3395,7 +3397,6 @@ void Dbtc::packLqhkeyreq040Lab(Signal* signal,
|
||||
BlockReference TBRef)
|
||||
{
|
||||
TcConnectRecord * const regTcPtr = tcConnectptr.p;
|
||||
CacheRecord * const regCachePtr = cachePtr.p;
|
||||
#ifdef ERROR_INSERT
|
||||
ApiConnectRecord * const regApiPtr = apiConnectptr.p;
|
||||
if (ERROR_INSERTED(8009)) {
|
||||
@ -3420,8 +3421,8 @@ void Dbtc::packLqhkeyreq040Lab(Signal* signal,
|
||||
if (anAttrBufIndex == RNIL) {
|
||||
UintR TtcTimer = ctcTimer;
|
||||
UintR Tread = (regTcPtr->operation == ZREAD);
|
||||
UintR Tsimple = (regCachePtr->opSimple == ZTRUE);
|
||||
UintR Tboth = Tread & Tsimple;
|
||||
UintR Tdirty = (regTcPtr->dirtyOp == ZTRUE);
|
||||
UintR Tboth = Tread & Tdirty;
|
||||
setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__);
|
||||
jam();
|
||||
/*--------------------------------------------------------------------
|
||||
@ -3430,7 +3431,7 @@ void Dbtc::packLqhkeyreq040Lab(Signal* signal,
|
||||
releaseAttrinfo();
|
||||
if (Tboth) {
|
||||
jam();
|
||||
releaseSimpleRead(signal, apiConnectptr, tcConnectptr.p);
|
||||
releaseDirtyRead(signal, apiConnectptr, tcConnectptr.p);
|
||||
return;
|
||||
}//if
|
||||
regTcPtr->tcConnectstate = OS_OPERATING;
|
||||
@ -3490,11 +3491,11 @@ void Dbtc::releaseAttrinfo()
|
||||
}//Dbtc::releaseAttrinfo()
|
||||
|
||||
/* ========================================================================= */
|
||||
/* ------- RELEASE ALL RECORDS CONNECTED TO A SIMPLE OPERATION ------- */
|
||||
/* ------- RELEASE ALL RECORDS CONNECTED TO A DIRTY OPERATION ------- */
|
||||
/* ========================================================================= */
|
||||
void Dbtc::releaseSimpleRead(Signal* signal,
|
||||
ApiConnectRecordPtr regApiPtr,
|
||||
TcConnectRecord* regTcPtr)
|
||||
void Dbtc::releaseDirtyRead(Signal* signal,
|
||||
ApiConnectRecordPtr regApiPtr,
|
||||
TcConnectRecord* regTcPtr)
|
||||
{
|
||||
Uint32 Ttckeyrec = regApiPtr.p->tckeyrec;
|
||||
Uint32 TclientData = regTcPtr->clientData;
|
||||
@ -3504,7 +3505,7 @@ void Dbtc::releaseSimpleRead(Signal* signal,
|
||||
ConnectionState state = regApiPtr.p->apiConnectstate;
|
||||
|
||||
regApiPtr.p->tcSendArray[Ttckeyrec] = TclientData;
|
||||
regApiPtr.p->tcSendArray[Ttckeyrec + 1] = TcKeyConf::SimpleReadBit | Tnode;
|
||||
regApiPtr.p->tcSendArray[Ttckeyrec + 1] = TcKeyConf::DirtyReadBit | Tnode;
|
||||
regApiPtr.p->tckeyrec = Ttckeyrec + 2;
|
||||
|
||||
unlinkReadyTcCon(signal);
|
||||
@ -3534,8 +3535,8 @@ void Dbtc::releaseSimpleRead(Signal* signal,
|
||||
/**
|
||||
* Emulate LQHKEYCONF
|
||||
*/
|
||||
lqhKeyConf_checkTransactionState(signal, regApiPtr.p);
|
||||
}//Dbtc::releaseSimpleRead()
|
||||
lqhKeyConf_checkTransactionState(signal, regApiPtr);
|
||||
}//Dbtc::releaseDirtyRead()
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------- CHECK IF ALL TC CONNECTIONS ARE COMPLETED ------- */
|
||||
@ -3717,12 +3718,13 @@ void Dbtc::execLQHKEYCONF(Signal* signal)
|
||||
TCKEY_abort(signal, 29);
|
||||
return;
|
||||
}//if
|
||||
ApiConnectRecord * const regApiPtr =
|
||||
&localApiConnectRecord[TapiConnectptrIndex];
|
||||
Ptr<ApiConnectRecord> regApiPtr;
|
||||
regApiPtr.i = TapiConnectptrIndex;
|
||||
regApiPtr.p = &localApiConnectRecord[TapiConnectptrIndex];
|
||||
apiConnectptr.i = TapiConnectptrIndex;
|
||||
apiConnectptr.p = regApiPtr;
|
||||
compare_transid1 = regApiPtr->transid[0] ^ Ttrans1;
|
||||
compare_transid2 = regApiPtr->transid[1] ^ Ttrans2;
|
||||
apiConnectptr.p = regApiPtr.p;
|
||||
compare_transid1 = regApiPtr.p->transid[0] ^ Ttrans1;
|
||||
compare_transid2 = regApiPtr.p->transid[1] ^ Ttrans2;
|
||||
compare_transid1 = compare_transid1 | compare_transid2;
|
||||
if (compare_transid1 != 0) {
|
||||
warningReport(signal, 24);
|
||||
@ -3734,25 +3736,25 @@ void Dbtc::execLQHKEYCONF(Signal* signal)
|
||||
systemErrorLab(signal, __LINE__);
|
||||
}//if
|
||||
if (ERROR_INSERTED(8003)) {
|
||||
if (regApiPtr->apiConnectstate == CS_STARTED) {
|
||||
if (regApiPtr.p->apiConnectstate == CS_STARTED) {
|
||||
CLEAR_ERROR_INSERT_VALUE;
|
||||
return;
|
||||
}//if
|
||||
}//if
|
||||
if (ERROR_INSERTED(8004)) {
|
||||
if (regApiPtr->apiConnectstate == CS_RECEIVING) {
|
||||
if (regApiPtr.p->apiConnectstate == CS_RECEIVING) {
|
||||
CLEAR_ERROR_INSERT_VALUE;
|
||||
return;
|
||||
}//if
|
||||
}//if
|
||||
if (ERROR_INSERTED(8005)) {
|
||||
if (regApiPtr->apiConnectstate == CS_REC_COMMITTING) {
|
||||
if (regApiPtr.p->apiConnectstate == CS_REC_COMMITTING) {
|
||||
CLEAR_ERROR_INSERT_VALUE;
|
||||
return;
|
||||
}//if
|
||||
}//if
|
||||
if (ERROR_INSERTED(8006)) {
|
||||
if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
|
||||
if (regApiPtr.p->apiConnectstate == CS_START_COMMITTING) {
|
||||
CLEAR_ERROR_INSERT_VALUE;
|
||||
return;
|
||||
}//if
|
||||
@ -3767,10 +3769,12 @@ void Dbtc::execLQHKEYCONF(Signal* signal)
|
||||
regTcPtr->lastLqhNodeId = refToNode(tlastLqhBlockref);
|
||||
regTcPtr->noFiredTriggers = noFired;
|
||||
|
||||
UintR Ttckeyrec = (UintR)regApiPtr->tckeyrec;
|
||||
UintR Ttckeyrec = (UintR)regApiPtr.p->tckeyrec;
|
||||
UintR TclientData = regTcPtr->clientData;
|
||||
UintR TdirtyOp = regTcPtr->dirtyOp;
|
||||
ConnectionState TapiConnectstate = regApiPtr->apiConnectstate;
|
||||
Uint32 TopSimple = regTcPtr->opSimple;
|
||||
Uint32 Toperation = regTcPtr->operation;
|
||||
ConnectionState TapiConnectstate = regApiPtr.p->apiConnectstate;
|
||||
if (Ttckeyrec > (ZTCOPCONF_SIZE - 2)) {
|
||||
TCKEY_abort(signal, 30);
|
||||
return;
|
||||
@ -3795,23 +3799,34 @@ void Dbtc::execLQHKEYCONF(Signal* signal)
|
||||
* since they will enter execLQHKEYCONF a second time
|
||||
* Skip counting internally generated TcKeyReq
|
||||
*/
|
||||
regApiPtr->tcSendArray[Ttckeyrec] = TclientData;
|
||||
regApiPtr->tcSendArray[Ttckeyrec + 1] = treadlenAi;
|
||||
regApiPtr->tckeyrec = Ttckeyrec + 2;
|
||||
regApiPtr.p->tcSendArray[Ttckeyrec] = TclientData;
|
||||
regApiPtr.p->tcSendArray[Ttckeyrec + 1] = treadlenAi;
|
||||
regApiPtr.p->tckeyrec = Ttckeyrec + 2;
|
||||
}//if
|
||||
}//if
|
||||
if (TdirtyOp == ZTRUE) {
|
||||
UintR Tlqhkeyreqrec = regApiPtr->lqhkeyreqrec;
|
||||
if (TdirtyOp == ZTRUE)
|
||||
{
|
||||
UintR Tlqhkeyreqrec = regApiPtr.p->lqhkeyreqrec;
|
||||
jam();
|
||||
releaseDirtyWrite(signal);
|
||||
regApiPtr->lqhkeyreqrec = Tlqhkeyreqrec - 1;
|
||||
} else {
|
||||
regApiPtr.p->lqhkeyreqrec = Tlqhkeyreqrec - 1;
|
||||
}
|
||||
else if (Toperation == ZREAD && TopSimple)
|
||||
{
|
||||
UintR Tlqhkeyreqrec = regApiPtr.p->lqhkeyreqrec;
|
||||
jam();
|
||||
unlinkReadyTcCon(signal);
|
||||
releaseTcCon();
|
||||
regApiPtr.p->lqhkeyreqrec = Tlqhkeyreqrec - 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
jam();
|
||||
if (noFired == 0) {
|
||||
jam();
|
||||
// No triggers to execute
|
||||
UintR Tlqhkeyconfrec = regApiPtr->lqhkeyconfrec;
|
||||
regApiPtr->lqhkeyconfrec = Tlqhkeyconfrec + 1;
|
||||
UintR Tlqhkeyconfrec = regApiPtr.p->lqhkeyconfrec;
|
||||
regApiPtr.p->lqhkeyconfrec = Tlqhkeyconfrec + 1;
|
||||
regTcPtr->tcConnectstate = OS_PREPARED;
|
||||
}
|
||||
}//if
|
||||
@ -3841,21 +3856,18 @@ void Dbtc::execLQHKEYCONF(Signal* signal)
|
||||
jam();
|
||||
if (regTcPtr->isIndexOp) {
|
||||
jam();
|
||||
setupIndexOpReturn(regApiPtr, regTcPtr);
|
||||
setupIndexOpReturn(regApiPtr.p, regTcPtr);
|
||||
}
|
||||
lqhKeyConf_checkTransactionState(signal, regApiPtr);
|
||||
} else {
|
||||
// We have fired triggers
|
||||
jam();
|
||||
saveTriggeringOpState(signal, regTcPtr);
|
||||
if (regTcPtr->noReceivedTriggers == noFired) {
|
||||
ApiConnectRecordPtr transPtr;
|
||||
|
||||
if (regTcPtr->noReceivedTriggers == noFired)
|
||||
{
|
||||
// We have received all data
|
||||
jam();
|
||||
transPtr.i = TapiConnectptrIndex;
|
||||
transPtr.p = regApiPtr;
|
||||
executeTriggers(signal, &transPtr);
|
||||
executeTriggers(signal, ®ApiPtr);
|
||||
}
|
||||
// else wait for more trigger data
|
||||
}
|
||||
@ -3879,7 +3891,7 @@ void Dbtc::setupIndexOpReturn(ApiConnectRecord* regApiPtr,
|
||||
*/
|
||||
void
|
||||
Dbtc::lqhKeyConf_checkTransactionState(Signal * signal,
|
||||
ApiConnectRecord * const apiConnectPtrP)
|
||||
Ptr<ApiConnectRecord> regApiPtr)
|
||||
{
|
||||
/*---------------------------------------------------------------*/
|
||||
/* IF THE COMMIT FLAG IS SET IN SIGNAL TCKEYREQ THEN DBTC HAS TO */
|
||||
@ -3890,9 +3902,9 @@ Dbtc::lqhKeyConf_checkTransactionState(Signal * signal,
|
||||
/* FOR ALL OPERATIONS, AND THEN WAIT FOR THE API TO CONCLUDE THE */
|
||||
/* TRANSACTION */
|
||||
/*---------------------------------------------------------------*/
|
||||
ConnectionState TapiConnectstate = apiConnectPtrP->apiConnectstate;
|
||||
UintR Tlqhkeyconfrec = apiConnectPtrP->lqhkeyconfrec;
|
||||
UintR Tlqhkeyreqrec = apiConnectPtrP->lqhkeyreqrec;
|
||||
ConnectionState TapiConnectstate = regApiPtr.p->apiConnectstate;
|
||||
UintR Tlqhkeyconfrec = regApiPtr.p->lqhkeyconfrec;
|
||||
UintR Tlqhkeyreqrec = regApiPtr.p->lqhkeyreqrec;
|
||||
int TnoOfOutStanding = Tlqhkeyreqrec - Tlqhkeyconfrec;
|
||||
|
||||
switch (TapiConnectstate) {
|
||||
@ -3902,11 +3914,11 @@ Dbtc::lqhKeyConf_checkTransactionState(Signal * signal,
|
||||
diverify010Lab(signal);
|
||||
return;
|
||||
} else if (TnoOfOutStanding > 0) {
|
||||
if (apiConnectPtrP->tckeyrec == ZTCOPCONF_SIZE) {
|
||||
if (regApiPtr.p->tckeyrec == ZTCOPCONF_SIZE) {
|
||||
jam();
|
||||
sendtckeyconf(signal, 0);
|
||||
return;
|
||||
} else if (apiConnectPtrP->indexOpReturn) {
|
||||
} else if (regApiPtr.p->indexOpReturn) {
|
||||
jam();
|
||||
sendtckeyconf(signal, 0);
|
||||
return;
|
||||
@ -3925,11 +3937,11 @@ Dbtc::lqhKeyConf_checkTransactionState(Signal * signal,
|
||||
sendtckeyconf(signal, 2);
|
||||
return;
|
||||
} else {
|
||||
if (apiConnectPtrP->tckeyrec == ZTCOPCONF_SIZE) {
|
||||
if (regApiPtr.p->tckeyrec == ZTCOPCONF_SIZE) {
|
||||
jam();
|
||||
sendtckeyconf(signal, 0);
|
||||
return;
|
||||
} else if (apiConnectPtrP->indexOpReturn) {
|
||||
} else if (regApiPtr.p->indexOpReturn) {
|
||||
jam();
|
||||
sendtckeyconf(signal, 0);
|
||||
return;
|
||||
@ -3939,11 +3951,11 @@ Dbtc::lqhKeyConf_checkTransactionState(Signal * signal,
|
||||
return;
|
||||
case CS_REC_COMMITTING:
|
||||
if (TnoOfOutStanding > 0) {
|
||||
if (apiConnectPtrP->tckeyrec == ZTCOPCONF_SIZE) {
|
||||
if (regApiPtr.p->tckeyrec == ZTCOPCONF_SIZE) {
|
||||
jam();
|
||||
sendtckeyconf(signal, 0);
|
||||
return;
|
||||
} else if (apiConnectPtrP->indexOpReturn) {
|
||||
} else if (regApiPtr.p->indexOpReturn) {
|
||||
jam();
|
||||
sendtckeyconf(signal, 0);
|
||||
return;
|
||||
@ -3960,7 +3972,7 @@ Dbtc::lqhKeyConf_checkTransactionState(Signal * signal,
|
||||
/* CONSISTING OF DIRTY WRITES AND ALL OF THOSE WERE */
|
||||
/* COMPLETED. ENSURE TCKEYREC IS ZERO TO PREVENT ERRORS. */
|
||||
/*---------------------------------------------------------------*/
|
||||
apiConnectPtrP->tckeyrec = 0;
|
||||
regApiPtr.p->tckeyrec = 0;
|
||||
return;
|
||||
default:
|
||||
TCKEY_abort(signal, 46);
|
||||
@ -4218,34 +4230,46 @@ void Dbtc::diverify010Lab(Signal* signal)
|
||||
jam();
|
||||
systemErrorLab(signal, __LINE__);
|
||||
}//if
|
||||
if (TfirstfreeApiConnectCopy != RNIL) {
|
||||
seizeApiConnectCopy(signal);
|
||||
regApiPtr->apiConnectstate = CS_PREPARE_TO_COMMIT;
|
||||
/*-----------------------------------------------------------------------
|
||||
* WE COME HERE ONLY IF THE TRANSACTION IS PREPARED ON ALL TC CONNECTIONS.
|
||||
* THUS WE CAN START THE COMMIT PHASE BY SENDING DIVERIFY ON ALL TC
|
||||
* CONNECTIONS AND THEN WHEN ALL DIVERIFYCONF HAVE BEEN RECEIVED THE
|
||||
* COMMIT MESSAGE CAN BE SENT TO ALL INVOLVED PARTS.
|
||||
*-----------------------------------------------------------------------*/
|
||||
EXECUTE_DIRECT(DBDIH, GSN_DIVERIFYREQ, signal, 1);
|
||||
if (signal->theData[2] == 0) {
|
||||
execDIVERIFYCONF(signal);
|
||||
|
||||
if (regApiPtr->lqhkeyreqrec)
|
||||
{
|
||||
if (TfirstfreeApiConnectCopy != RNIL) {
|
||||
seizeApiConnectCopy(signal);
|
||||
regApiPtr->apiConnectstate = CS_PREPARE_TO_COMMIT;
|
||||
/*-----------------------------------------------------------------------
|
||||
* WE COME HERE ONLY IF THE TRANSACTION IS PREPARED ON ALL TC CONNECTIONS
|
||||
* THUS WE CAN START THE COMMIT PHASE BY SENDING DIVERIFY ON ALL TC
|
||||
* CONNECTIONS AND THEN WHEN ALL DIVERIFYCONF HAVE BEEN RECEIVED THE
|
||||
* COMMIT MESSAGE CAN BE SENT TO ALL INVOLVED PARTS.
|
||||
*---------------------------------------------------------------------*/
|
||||
EXECUTE_DIRECT(DBDIH, GSN_DIVERIFYREQ, signal, 1);
|
||||
if (signal->theData[2] == 0) {
|
||||
execDIVERIFYCONF(signal);
|
||||
}
|
||||
return;
|
||||
} else {
|
||||
/*-----------------------------------------------------------------------
|
||||
* There were no free copy connections available. We must abort the
|
||||
* transaction since otherwise we will have a problem with the report
|
||||
* to the application.
|
||||
* This should more or less not happen but if it happens we do
|
||||
* not want to crash and we do not want to create code to handle it
|
||||
* properly since it is difficult to test it and will be complex to
|
||||
* handle a problem more or less not occurring.
|
||||
*---------------------------------------------------------------------*/
|
||||
terrorCode = ZSEIZE_API_COPY_ERROR;
|
||||
abortErrorLab(signal);
|
||||
return;
|
||||
}
|
||||
return;
|
||||
} else {
|
||||
/*-----------------------------------------------------------------------
|
||||
* There were no free copy connections available. We must abort the
|
||||
* transaction since otherwise we will have a problem with the report
|
||||
* to the application.
|
||||
* This should more or less not happen but if it happens we do not want to
|
||||
* crash and we do not want to create code to handle it properly since
|
||||
* it is difficult to test it and will be complex to handle a problem
|
||||
* more or less not occurring.
|
||||
*-----------------------------------------------------------------------*/
|
||||
terrorCode = ZSEIZE_API_COPY_ERROR;
|
||||
abortErrorLab(signal);
|
||||
return;
|
||||
}//if
|
||||
}
|
||||
else
|
||||
{
|
||||
jam();
|
||||
sendtckeyconf(signal, 1);
|
||||
regApiPtr->apiConnectstate = CS_CONNECTED;
|
||||
regApiPtr->m_transaction_nodes.clear();
|
||||
setApiConTimer(apiConnectptr.i, 0,__LINE__);
|
||||
}
|
||||
}//Dbtc::diverify010Lab()
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
@ -5260,16 +5284,8 @@ void Dbtc::execLQHKEYREF(Signal* signal)
|
||||
regApiPtr->lqhkeyreqrec--;
|
||||
if (regApiPtr->lqhkeyconfrec == regApiPtr->lqhkeyreqrec) {
|
||||
if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
|
||||
if(regApiPtr->lqhkeyconfrec) {
|
||||
jam();
|
||||
diverify010Lab(signal);
|
||||
} else {
|
||||
jam();
|
||||
sendtckeyconf(signal, 1);
|
||||
regApiPtr->apiConnectstate = CS_CONNECTED;
|
||||
regApiPtr->m_transaction_nodes.clear();
|
||||
setApiConTimer(apiConnectptr.i, 0,__LINE__);
|
||||
}
|
||||
jam();
|
||||
diverify010Lab(signal);
|
||||
return;
|
||||
} else if (regApiPtr->tckeyrec > 0 || regApiPtr->m_exec_flag) {
|
||||
jam();
|
||||
@ -6278,7 +6294,7 @@ void Dbtc::timeOutLoopStartLab(Signal* signal, Uint32 api_con_ptr)
|
||||
jam();
|
||||
if (api_timer != 0) {
|
||||
Uint32 error= ZTIME_OUT_ERROR;
|
||||
time_out_value= time_out_param + (api_con_ptr & mask_value);
|
||||
time_out_value= time_out_param + (ndb_rand() & mask_value);
|
||||
if (unlikely(old_mask_value)) // abort during single user mode
|
||||
{
|
||||
apiConnectptr.i = api_con_ptr;
|
||||
@ -6481,6 +6497,7 @@ void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr, Uint32 errCode)
|
||||
return;
|
||||
case CS_WAIT_COMMIT_CONF:
|
||||
jam();
|
||||
CRASH_INSERTION(8053);
|
||||
tcConnectptr.i = apiConnectptr.p->currentTcConnect;
|
||||
ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
|
||||
arrGuard(apiConnectptr.p->currentReplicaNo, MAX_REPLICAS);
|
||||
|
@ -518,6 +518,7 @@ typedef Ptr<Fragoperrec> FragoperrecPtr;
|
||||
Uint32 m_savePointId;
|
||||
Uint32 m_scanGCI;
|
||||
};
|
||||
Uint32 m_endPage;
|
||||
// lock waited for or obtained and not yet passed to LQH
|
||||
Uint32 m_accLockOp;
|
||||
|
||||
@ -641,6 +642,8 @@ typedef Ptr<Fragoperrec> FragoperrecPtr;
|
||||
*/
|
||||
Page_request_list::Head m_page_requests[MAX_FREE_LIST];
|
||||
|
||||
DLList<Page>::Head m_unmap_pages;
|
||||
|
||||
/**
|
||||
* Current extent
|
||||
*/
|
||||
@ -702,7 +705,8 @@ struct Fragrecord {
|
||||
|
||||
DLList<ScanOp>::Head m_scanList;
|
||||
|
||||
enum { UC_LCP = 1, UC_CREATE = 2 };
|
||||
enum { UC_LCP = 1, UC_CREATE = 2, UC_SET_LCP = 3 };
|
||||
Uint32 m_restore_lcp_id;
|
||||
Uint32 m_undo_complete;
|
||||
Uint32 m_tablespace_id;
|
||||
Uint32 m_logfile_group_id;
|
||||
@ -1573,6 +1577,8 @@ public:
|
||||
|
||||
void nr_delete_page_callback(Signal*, Uint32 op, Uint32 page);
|
||||
void nr_delete_log_buffer_callback(Signal*, Uint32 op, Uint32 page);
|
||||
|
||||
bool get_frag_info(Uint32 tableId, Uint32 fragId, Uint32* maxPage);
|
||||
private:
|
||||
BLOCK_DEFINES(Dbtup);
|
||||
|
||||
@ -2830,7 +2836,7 @@ private:
|
||||
public:
|
||||
int disk_page_load_hook(Uint32 page_id);
|
||||
|
||||
void disk_page_unmap_callback(Uint32 page_id, Uint32 dirty_count);
|
||||
void disk_page_unmap_callback(Uint32 when, Uint32 page, Uint32 dirty_count);
|
||||
|
||||
int disk_restart_alloc_extent(Uint32 tableId, Uint32 fragId,
|
||||
const Local_key* key, Uint32 pages);
|
||||
@ -2851,11 +2857,11 @@ public:
|
||||
Local_key m_key;
|
||||
};
|
||||
|
||||
void disk_restart_mark_no_lcp(Uint32 table, Uint32 frag);
|
||||
void disk_restart_lcp_id(Uint32 table, Uint32 frag, Uint32 lcpId);
|
||||
|
||||
private:
|
||||
void disk_restart_undo_next(Signal*);
|
||||
void disk_restart_undo_lcp(Uint32, Uint32, Uint32 flag);
|
||||
void disk_restart_undo_lcp(Uint32, Uint32, Uint32 flag, Uint32 lcpId);
|
||||
void disk_restart_undo_callback(Signal* signal, Uint32, Uint32);
|
||||
void disk_restart_undo_alloc(Apply_undo*);
|
||||
void disk_restart_undo_update(Apply_undo*);
|
||||
|
@ -903,8 +903,10 @@ Dbtup::disk_page_set_dirty(PagePtr pagePtr)
|
||||
}
|
||||
|
||||
void
|
||||
Dbtup::disk_page_unmap_callback(Uint32 page_id, Uint32 dirty_count)
|
||||
Dbtup::disk_page_unmap_callback(Uint32 when,
|
||||
Uint32 page_id, Uint32 dirty_count)
|
||||
{
|
||||
jamEntry();
|
||||
Ptr<GlobalPage> gpage;
|
||||
m_global_page_pool.getPtr(gpage, page_id);
|
||||
PagePtr pagePtr;
|
||||
@ -918,17 +920,9 @@ Dbtup::disk_page_unmap_callback(Uint32 page_id, Uint32 dirty_count)
|
||||
{
|
||||
return ;
|
||||
}
|
||||
|
||||
Local_key key;
|
||||
key.m_page_no = pagePtr.p->m_page_no;
|
||||
key.m_file_no = pagePtr.p->m_file_no;
|
||||
|
||||
Uint32 idx = pagePtr.p->list_index;
|
||||
|
||||
ndbassert((idx & 0x8000) == 0);
|
||||
|
||||
if (DBG_DISK)
|
||||
ndbout << "disk_page_unmap_callback " << key << endl;
|
||||
|
||||
Ptr<Tablerec> tabPtr;
|
||||
tabPtr.i= pagePtr.p->m_table_id;
|
||||
ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
|
||||
@ -938,26 +932,89 @@ Dbtup::disk_page_unmap_callback(Uint32 page_id, Uint32 dirty_count)
|
||||
|
||||
Disk_alloc_info& alloc= fragPtr.p->m_disk_alloc_info;
|
||||
|
||||
if (dirty_count == 0)
|
||||
if (when == 0)
|
||||
{
|
||||
Uint32 free = pagePtr.p->free_space;
|
||||
Uint32 used = pagePtr.p->uncommitted_used_space;
|
||||
ddassert(free >= used);
|
||||
ddassert(alloc.calc_page_free_bits(free - used) == idx);
|
||||
/**
|
||||
* Before pageout
|
||||
*/
|
||||
jam();
|
||||
|
||||
if (DBG_DISK)
|
||||
{
|
||||
Local_key key;
|
||||
key.m_page_no = pagePtr.p->m_page_no;
|
||||
key.m_file_no = pagePtr.p->m_file_no;
|
||||
ndbout << "disk_page_unmap_callback(before) " << key
|
||||
<< " cnt: " << dirty_count << " " << (idx & ~0x8000) << endl;
|
||||
}
|
||||
|
||||
ndbassert((idx & 0x8000) == 0);
|
||||
|
||||
ArrayPool<Page> *pool= (ArrayPool<Page>*)&m_global_page_pool;
|
||||
LocalDLList<Page> list(*pool, alloc.m_dirty_pages[idx]);
|
||||
LocalDLList<Page> list2(*pool, alloc.m_unmap_pages);
|
||||
list.remove(pagePtr);
|
||||
list2.add(pagePtr);
|
||||
|
||||
if (dirty_count == 0)
|
||||
{
|
||||
jam();
|
||||
pagePtr.p->list_index = idx | 0x8000;
|
||||
|
||||
Local_key key;
|
||||
key.m_page_no = pagePtr.p->m_page_no;
|
||||
key.m_file_no = pagePtr.p->m_file_no;
|
||||
|
||||
Uint32 free = pagePtr.p->free_space;
|
||||
Uint32 used = pagePtr.p->uncommitted_used_space;
|
||||
ddassert(free >= used);
|
||||
ddassert(alloc.calc_page_free_bits(free - used) == idx);
|
||||
|
||||
Tablespace_client tsman(0, c_tsman,
|
||||
fragPtr.p->fragTableId,
|
||||
fragPtr.p->fragmentId,
|
||||
fragPtr.p->m_tablespace_id);
|
||||
|
||||
tsman.unmap_page(&key, idx);
|
||||
jamEntry();
|
||||
}
|
||||
}
|
||||
else if (when == 1)
|
||||
{
|
||||
/**
|
||||
* After page out
|
||||
*/
|
||||
jam();
|
||||
|
||||
Local_key key;
|
||||
key.m_page_no = pagePtr.p->m_page_no;
|
||||
key.m_file_no = pagePtr.p->m_file_no;
|
||||
Uint32 real_free = pagePtr.p->free_space;
|
||||
|
||||
if (DBG_DISK)
|
||||
{
|
||||
ndbout << "disk_page_unmap_callback(after) " << key
|
||||
<< " cnt: " << dirty_count << " " << (idx & ~0x8000) << endl;
|
||||
}
|
||||
|
||||
ArrayPool<Page> *pool= (ArrayPool<Page>*)&m_global_page_pool;
|
||||
LocalDLList<Page> list(*pool, alloc.m_unmap_pages);
|
||||
list.remove(pagePtr);
|
||||
|
||||
Tablespace_client tsman(0, c_tsman,
|
||||
fragPtr.p->fragTableId,
|
||||
fragPtr.p->fragmentId,
|
||||
fragPtr.p->m_tablespace_id);
|
||||
|
||||
tsman.unmap_page(&key, idx);
|
||||
jamEntry();
|
||||
pagePtr.p->list_index = idx | 0x8000;
|
||||
if (DBG_DISK && alloc.calc_page_free_bits(real_free) != (idx & ~0x8000))
|
||||
{
|
||||
ndbout << key
|
||||
<< " calc: " << alloc.calc_page_free_bits(real_free)
|
||||
<< " idx: " << (idx & ~0x8000)
|
||||
<< endl;
|
||||
}
|
||||
tsman.update_page_free_bits(&key, alloc.calc_page_free_bits(real_free));
|
||||
}
|
||||
|
||||
ArrayPool<Page> *pool= (ArrayPool<Page>*)&m_global_page_pool;
|
||||
LocalDLList<Page> list(*pool, alloc.m_dirty_pages[idx]);
|
||||
list.remove(pagePtr);
|
||||
}
|
||||
|
||||
void
|
||||
@ -969,8 +1026,6 @@ Dbtup::disk_page_alloc(Signal* signal,
|
||||
Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info;
|
||||
|
||||
Uint64 lsn;
|
||||
Uint32 old_free = pagePtr.p->free_space;
|
||||
Uint32 old_bits= alloc.calc_page_free_bits(old_free);
|
||||
if (tabPtrP->m_attributes[DD].m_no_of_varsize == 0)
|
||||
{
|
||||
ddassert(pagePtr.p->uncommitted_used_space > 0);
|
||||
@ -988,20 +1043,6 @@ Dbtup::disk_page_alloc(Signal* signal,
|
||||
|
||||
lsn= disk_page_undo_alloc(pagePtr.p, key, sz, gci, logfile_group_id);
|
||||
}
|
||||
|
||||
Uint32 new_free = pagePtr.p->free_space;
|
||||
Uint32 new_bits= alloc.calc_page_free_bits(new_free);
|
||||
|
||||
if (old_bits != new_bits)
|
||||
{
|
||||
Tablespace_client tsman(signal, c_tsman,
|
||||
fragPtrP->fragTableId,
|
||||
fragPtrP->fragmentId,
|
||||
fragPtrP->m_tablespace_id);
|
||||
|
||||
tsman.update_page_free_bits(key, new_bits, lsn);
|
||||
jamEntry();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
@ -1016,7 +1057,6 @@ Dbtup::disk_page_free(Signal *signal,
|
||||
Uint32 logfile_group_id= fragPtrP->m_logfile_group_id;
|
||||
Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info;
|
||||
Uint32 old_free= pagePtr.p->free_space;
|
||||
Uint32 old_bits= alloc.calc_page_free_bits(old_free);
|
||||
|
||||
Uint32 sz;
|
||||
Uint64 lsn;
|
||||
@ -1043,19 +1083,7 @@ Dbtup::disk_page_free(Signal *signal,
|
||||
}
|
||||
|
||||
Uint32 new_free = pagePtr.p->free_space;
|
||||
Uint32 new_bits = alloc.calc_page_free_bits(new_free);
|
||||
|
||||
if (old_bits != new_bits)
|
||||
{
|
||||
Tablespace_client tsman(signal, c_tsman,
|
||||
fragPtrP->fragTableId,
|
||||
fragPtrP->fragmentId,
|
||||
fragPtrP->m_tablespace_id);
|
||||
|
||||
tsman.update_page_free_bits(key, new_bits, lsn);
|
||||
jamEntry();
|
||||
}
|
||||
|
||||
Uint32 ext = pagePtr.p->m_extent_info_ptr;
|
||||
Uint32 used = pagePtr.p->uncommitted_used_space;
|
||||
Uint32 old_idx = pagePtr.p->list_index;
|
||||
@ -1341,15 +1369,23 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn,
|
||||
case File_formats::Undofile::UNDO_LCP_FIRST:
|
||||
case File_formats::Undofile::UNDO_LCP:
|
||||
{
|
||||
jam();
|
||||
ndbrequire(len == 3);
|
||||
Uint32 lcp = ptr[0];
|
||||
Uint32 tableId = ptr[1] >> 16;
|
||||
Uint32 fragId = ptr[1] & 0xFFFF;
|
||||
disk_restart_undo_lcp(tableId, fragId, Fragrecord::UC_LCP);
|
||||
disk_restart_undo_lcp(tableId, fragId, Fragrecord::UC_LCP, lcp);
|
||||
disk_restart_undo_next(signal);
|
||||
|
||||
if (DBG_UNDO)
|
||||
{
|
||||
ndbout_c("UNDO LCP %u (%u, %u)", lcp, tableId, fragId);
|
||||
}
|
||||
return;
|
||||
}
|
||||
case File_formats::Undofile::UNDO_TUP_ALLOC:
|
||||
{
|
||||
jam();
|
||||
Disk_undo::Alloc* rec= (Disk_undo::Alloc*)ptr;
|
||||
preq.m_page.m_page_no = rec->m_page_no;
|
||||
preq.m_page.m_file_no = rec->m_file_no_page_idx >> 16;
|
||||
@ -1358,6 +1394,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn,
|
||||
}
|
||||
case File_formats::Undofile::UNDO_TUP_UPDATE:
|
||||
{
|
||||
jam();
|
||||
Disk_undo::Update* rec= (Disk_undo::Update*)ptr;
|
||||
preq.m_page.m_page_no = rec->m_page_no;
|
||||
preq.m_page.m_file_no = rec->m_file_no_page_idx >> 16;
|
||||
@ -1366,6 +1403,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn,
|
||||
}
|
||||
case File_formats::Undofile::UNDO_TUP_FREE:
|
||||
{
|
||||
jam();
|
||||
Disk_undo::Free* rec= (Disk_undo::Free*)ptr;
|
||||
preq.m_page.m_page_no = rec->m_page_no;
|
||||
preq.m_page.m_file_no = rec->m_file_no_page_idx >> 16;
|
||||
@ -1377,6 +1415,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn,
|
||||
*
|
||||
*/
|
||||
{
|
||||
jam();
|
||||
Disk_undo::Create* rec= (Disk_undo::Create*)ptr;
|
||||
Ptr<Tablerec> tabPtr;
|
||||
tabPtr.i= rec->m_table;
|
||||
@ -1384,12 +1423,34 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn,
|
||||
for(Uint32 i = 0; i<MAX_FRAG_PER_NODE; i++)
|
||||
if (tabPtr.p->fragrec[i] != RNIL)
|
||||
disk_restart_undo_lcp(tabPtr.i, tabPtr.p->fragid[i],
|
||||
Fragrecord::UC_CREATE);
|
||||
Fragrecord::UC_CREATE, 0);
|
||||
disk_restart_undo_next(signal);
|
||||
|
||||
if (DBG_UNDO)
|
||||
{
|
||||
ndbout_c("UNDO CREATE (%u)", tabPtr.i);
|
||||
}
|
||||
return;
|
||||
}
|
||||
case File_formats::Undofile::UNDO_TUP_DROP:
|
||||
{
|
||||
jam();
|
||||
Disk_undo::Drop* rec = (Disk_undo::Drop*)ptr;
|
||||
Ptr<Tablerec> tabPtr;
|
||||
tabPtr.i= rec->m_table;
|
||||
ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
|
||||
for(Uint32 i = 0; i<MAX_FRAG_PER_NODE; i++)
|
||||
if (tabPtr.p->fragrec[i] != RNIL)
|
||||
disk_restart_undo_lcp(tabPtr.i, tabPtr.p->fragid[i],
|
||||
Fragrecord::UC_CREATE, 0);
|
||||
disk_restart_undo_next(signal);
|
||||
|
||||
if (DBG_UNDO)
|
||||
{
|
||||
ndbout_c("UNDO DROP (%u)", tabPtr.i);
|
||||
}
|
||||
return;
|
||||
}
|
||||
case File_formats::Undofile::UNDO_TUP_ALLOC_EXTENT:
|
||||
jam();
|
||||
case File_formats::Undofile::UNDO_TUP_FREE_EXTENT:
|
||||
@ -1398,6 +1459,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn,
|
||||
return;
|
||||
|
||||
case File_formats::Undofile::UNDO_END:
|
||||
jam();
|
||||
f_undo_done = true;
|
||||
return;
|
||||
default:
|
||||
@ -1431,14 +1493,32 @@ Dbtup::disk_restart_undo_next(Signal* signal)
|
||||
}
|
||||
|
||||
void
|
||||
Dbtup::disk_restart_mark_no_lcp(Uint32 tableId, Uint32 fragId)
|
||||
Dbtup::disk_restart_lcp_id(Uint32 tableId, Uint32 fragId, Uint32 lcpId)
|
||||
{
|
||||
jamEntry();
|
||||
disk_restart_undo_lcp(tableId, fragId, Fragrecord::UC_CREATE);
|
||||
|
||||
if (lcpId == RNIL)
|
||||
{
|
||||
disk_restart_undo_lcp(tableId, fragId, Fragrecord::UC_CREATE, 0);
|
||||
if (DBG_UNDO)
|
||||
{
|
||||
ndbout_c("mark_no_lcp (%u, %u)", tableId, fragId);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
disk_restart_undo_lcp(tableId, fragId, Fragrecord::UC_SET_LCP, lcpId);
|
||||
if (DBG_UNDO)
|
||||
{
|
||||
ndbout_c("mark_no_lcp (%u, %u)", tableId, fragId);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
Dbtup::disk_restart_undo_lcp(Uint32 tableId, Uint32 fragId, Uint32 flag)
|
||||
Dbtup::disk_restart_undo_lcp(Uint32 tableId, Uint32 fragId, Uint32 flag,
|
||||
Uint32 lcpId)
|
||||
{
|
||||
Ptr<Tablerec> tabPtr;
|
||||
tabPtr.i= tableId;
|
||||
@ -1446,11 +1526,43 @@ Dbtup::disk_restart_undo_lcp(Uint32 tableId, Uint32 fragId, Uint32 flag)
|
||||
|
||||
if (tabPtr.p->tableStatus == DEFINED)
|
||||
{
|
||||
jam();
|
||||
FragrecordPtr fragPtr;
|
||||
getFragmentrec(fragPtr, fragId, tabPtr.p);
|
||||
if (!fragPtr.isNull())
|
||||
{
|
||||
fragPtr.p->m_undo_complete |= flag;
|
||||
jam();
|
||||
switch(flag){
|
||||
case Fragrecord::UC_CREATE:
|
||||
jam();
|
||||
fragPtr.p->m_undo_complete |= flag;
|
||||
return;
|
||||
case Fragrecord::UC_LCP:
|
||||
jam();
|
||||
if (fragPtr.p->m_undo_complete == 0 &&
|
||||
fragPtr.p->m_restore_lcp_id == lcpId)
|
||||
{
|
||||
jam();
|
||||
fragPtr.p->m_undo_complete |= flag;
|
||||
if (DBG_UNDO)
|
||||
ndbout_c("table: %u fragment: %u lcp: %u -> done",
|
||||
tableId, fragId, lcpId);
|
||||
}
|
||||
return;
|
||||
case Fragrecord::UC_SET_LCP:
|
||||
{
|
||||
jam();
|
||||
if (DBG_UNDO)
|
||||
ndbout_c("table: %u fragment: %u restore to lcp: %u",
|
||||
tableId, fragId, lcpId);
|
||||
ndbrequire(fragPtr.p->m_undo_complete == 0);
|
||||
ndbrequire(fragPtr.p->m_restore_lcp_id == RNIL);
|
||||
fragPtr.p->m_restore_lcp_id = lcpId;
|
||||
return;
|
||||
}
|
||||
}
|
||||
jamLine(flag);
|
||||
ndbrequire(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1474,6 +1586,7 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
|
||||
pagePtr.p->nextList != RNIL ||
|
||||
pagePtr.p->prevList != RNIL)
|
||||
{
|
||||
jam();
|
||||
update = true;
|
||||
pagePtr.p->list_index |= 0x8000;
|
||||
pagePtr.p->nextList = pagePtr.p->prevList = RNIL;
|
||||
@ -1484,6 +1597,9 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
|
||||
|
||||
if (tableId >= cnoOfTablerec)
|
||||
{
|
||||
jam();
|
||||
if (DBG_UNDO)
|
||||
ndbout_c("UNDO table> %u", tableId);
|
||||
disk_restart_undo_next(signal);
|
||||
return;
|
||||
}
|
||||
@ -1492,6 +1608,9 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
|
||||
|
||||
if (undo->m_table_ptr.p->tableStatus != DEFINED)
|
||||
{
|
||||
jam();
|
||||
if (DBG_UNDO)
|
||||
ndbout_c("UNDO !defined (%u) ", tableId);
|
||||
disk_restart_undo_next(signal);
|
||||
return;
|
||||
}
|
||||
@ -1499,19 +1618,25 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
|
||||
getFragmentrec(undo->m_fragment_ptr, fragId, undo->m_table_ptr.p);
|
||||
if(undo->m_fragment_ptr.isNull())
|
||||
{
|
||||
jam();
|
||||
if (DBG_UNDO)
|
||||
ndbout_c("UNDO fragment null %u/%u", tableId, fragId);
|
||||
disk_restart_undo_next(signal);
|
||||
return;
|
||||
}
|
||||
|
||||
if (undo->m_fragment_ptr.p->m_undo_complete)
|
||||
{
|
||||
jam();
|
||||
if (DBG_UNDO)
|
||||
ndbout_c("UNDO undo complete %u/%u", tableId, fragId);
|
||||
disk_restart_undo_next(signal);
|
||||
return;
|
||||
}
|
||||
|
||||
Local_key key;
|
||||
key.m_page_no = pagePtr.p->m_page_no;
|
||||
key.m_file_no = pagePtr.p->m_file_no;
|
||||
Local_key key = undo->m_key;
|
||||
// key.m_page_no = pagePtr.p->m_page_no;
|
||||
// key.m_file_no = pagePtr.p->m_file_no;
|
||||
|
||||
Uint64 lsn = 0;
|
||||
lsn += pagePtr.p->m_page_header.m_page_lsn_hi; lsn <<= 32;
|
||||
@ -1521,6 +1646,7 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
|
||||
|
||||
if (undo->m_lsn <= lsn)
|
||||
{
|
||||
jam();
|
||||
if (DBG_UNDO)
|
||||
{
|
||||
ndbout << "apply: " << undo->m_lsn << "(" << lsn << " )"
|
||||
@ -1535,12 +1661,15 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
|
||||
*/
|
||||
switch(undo->m_type){
|
||||
case File_formats::Undofile::UNDO_TUP_ALLOC:
|
||||
jam();
|
||||
disk_restart_undo_alloc(undo);
|
||||
break;
|
||||
case File_formats::Undofile::UNDO_TUP_UPDATE:
|
||||
jam();
|
||||
disk_restart_undo_update(undo);
|
||||
break;
|
||||
case File_formats::Undofile::UNDO_TUP_FREE:
|
||||
jam();
|
||||
disk_restart_undo_free(undo);
|
||||
break;
|
||||
default:
|
||||
@ -1555,14 +1684,17 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
|
||||
|
||||
m_pgman.update_lsn(undo->m_key, lsn);
|
||||
jamEntry();
|
||||
|
||||
disk_restart_undo_page_bits(signal, undo);
|
||||
}
|
||||
else if (DBG_UNDO)
|
||||
{
|
||||
jam();
|
||||
ndbout << "ignore: " << undo->m_lsn << "(" << lsn << " )"
|
||||
<< key << " type: " << undo->m_type << endl;
|
||||
<< key << " type: " << undo->m_type
|
||||
<< " tab: " << tableId << endl;
|
||||
}
|
||||
|
||||
disk_restart_undo_page_bits(signal, undo);
|
||||
disk_restart_undo_next(signal);
|
||||
}
|
||||
|
||||
@ -1637,16 +1769,12 @@ Dbtup::disk_restart_undo_page_bits(Signal* signal, Apply_undo* undo)
|
||||
Uint32 new_bits = alloc.calc_page_free_bits(free);
|
||||
pageP->list_index = 0x8000 | new_bits;
|
||||
|
||||
Uint64 lsn = 0;
|
||||
lsn += pageP->m_page_header.m_page_lsn_hi; lsn <<= 32;
|
||||
lsn += pageP->m_page_header.m_page_lsn_lo;
|
||||
|
||||
Tablespace_client tsman(signal, c_tsman,
|
||||
fragPtrP->fragTableId,
|
||||
fragPtrP->fragmentId,
|
||||
fragPtrP->m_tablespace_id);
|
||||
|
||||
tsman.restart_undo_page_free_bits(&undo->m_key, new_bits, undo->m_lsn, lsn);
|
||||
tsman.restart_undo_page_free_bits(&undo->m_key, new_bits);
|
||||
jamEntry();
|
||||
}
|
||||
|
||||
@ -1683,6 +1811,7 @@ Dbtup::disk_restart_alloc_extent(Uint32 tableId, Uint32 fragId,
|
||||
|
||||
if (alloc.m_curr_extent_info_ptr_i != RNIL)
|
||||
{
|
||||
jam();
|
||||
Ptr<Extent_info> old;
|
||||
c_extent_pool.getPtr(old, alloc.m_curr_extent_info_ptr_i);
|
||||
ndbassert(old.p->m_free_matrix_pos == RNIL);
|
||||
@ -1709,6 +1838,7 @@ void
|
||||
Dbtup::disk_restart_page_bits(Uint32 tableId, Uint32 fragId,
|
||||
const Local_key*, Uint32 bits)
|
||||
{
|
||||
jam();
|
||||
TablerecPtr tabPtr;
|
||||
FragrecordPtr fragPtr;
|
||||
tabPtr.i = tableId;
|
||||
|
@ -1957,9 +1957,8 @@ int Dbtup::interpreterNextLab(Signal* signal,
|
||||
Uint32 TdataForUpdate[3];
|
||||
Uint32 Tlen;
|
||||
|
||||
AttributeHeader& ah= AttributeHeader::init(&TdataForUpdate[0],
|
||||
TattrId,
|
||||
TattrNoOfWords << 2);
|
||||
AttributeHeader ah(TattrId, TattrNoOfWords << 2);
|
||||
TdataForUpdate[0]= ah.m_value;
|
||||
TdataForUpdate[1]= TregMemBuffer[theRegister + 2];
|
||||
TdataForUpdate[2]= TregMemBuffer[theRegister + 3];
|
||||
Tlen= TattrNoOfWords + 1;
|
||||
@ -1975,6 +1974,7 @@ int Dbtup::interpreterNextLab(Signal* signal,
|
||||
// Write a NULL value into the attribute
|
||||
/* --------------------------------------------------------- */
|
||||
ah.setNULL();
|
||||
TdataForUpdate[0]= ah.m_value;
|
||||
Tlen= 1;
|
||||
}
|
||||
int TnoDataRW= updateAttributes(req_struct,
|
||||
|
@ -143,6 +143,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
|
||||
regFragPtr.p->m_lcp_scan_op = RNIL;
|
||||
regFragPtr.p->m_lcp_keep_list = RNIL;
|
||||
regFragPtr.p->m_var_page_chunks = RNIL;
|
||||
regFragPtr.p->m_restore_lcp_id = RNIL;
|
||||
|
||||
if (ERROR_INSERTED(4007) && regTabPtr.p->fragid[0] == fragId ||
|
||||
ERROR_INSERTED(4008) && regTabPtr.p->fragid[1] == fragId) {
|
||||
@ -674,11 +675,11 @@ Dbtup::undo_createtable_callback(Signal* signal, Uint32 opPtrI, Uint32 unused)
|
||||
switch(ret){
|
||||
case 0:
|
||||
return;
|
||||
case -1:
|
||||
warningEvent("Failed to sync log for create of table: %u", regTabPtr.i);
|
||||
default:
|
||||
ndbout_c("ret: %d", ret);
|
||||
ndbrequire(false);
|
||||
execute(signal, req.m_callback, regFragPtr.p->m_logfile_group_id);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void
|
||||
@ -959,8 +960,6 @@ void Dbtup::releaseFragment(Signal* signal, Uint32 tableId,
|
||||
return;
|
||||
}
|
||||
|
||||
#if NOT_YET_UNDO_DROP_TABLE
|
||||
#error "This code is complete, but I prefer not to enable it until I need it"
|
||||
if (logfile_group_id != RNIL)
|
||||
{
|
||||
Callback cb;
|
||||
@ -968,8 +967,15 @@ void Dbtup::releaseFragment(Signal* signal, Uint32 tableId,
|
||||
cb.m_callbackFunction =
|
||||
safe_cast(&Dbtup::drop_table_log_buffer_callback);
|
||||
Uint32 sz= sizeof(Disk_undo::Drop) >> 2;
|
||||
(void) c_lgman->alloc_log_space(logfile_group_id, sz);
|
||||
|
||||
int r0 = c_lgman->alloc_log_space(logfile_group_id, sz);
|
||||
if (r0)
|
||||
{
|
||||
jam();
|
||||
warningEvent("Failed to alloc log space for drop table: %u",
|
||||
tabPtr.i);
|
||||
goto done;
|
||||
}
|
||||
|
||||
Logfile_client lgman(this, c_lgman, logfile_group_id);
|
||||
int res= lgman.get_log_buffer(signal, sz, &cb);
|
||||
switch(res){
|
||||
@ -977,15 +983,18 @@ void Dbtup::releaseFragment(Signal* signal, Uint32 tableId,
|
||||
jam();
|
||||
return;
|
||||
case -1:
|
||||
ndbrequire("NOT YET IMPLEMENTED" == 0);
|
||||
warningEvent("Failed to get log buffer for drop table: %u",
|
||||
tabPtr.i);
|
||||
c_lgman->free_log_space(logfile_group_id, sz);
|
||||
goto done;
|
||||
break;
|
||||
default:
|
||||
execute(signal, cb, logfile_group_id);
|
||||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
done:
|
||||
drop_table_logsync_callback(signal, tabPtr.i, RNIL);
|
||||
}
|
||||
|
||||
@ -997,7 +1006,20 @@ Dbtup::drop_fragment_unmap_pages(Signal *signal,
|
||||
{
|
||||
if (tabPtr.p->m_no_of_disk_attributes)
|
||||
{
|
||||
jam();
|
||||
Disk_alloc_info& alloc_info= fragPtr.p->m_disk_alloc_info;
|
||||
|
||||
if (!alloc_info.m_unmap_pages.isEmpty())
|
||||
{
|
||||
jam();
|
||||
ndbout_c("waiting for unmape pages");
|
||||
signal->theData[0] = ZUNMAP_PAGES;
|
||||
signal->theData[1] = tabPtr.i;
|
||||
signal->theData[2] = fragPtr.i;
|
||||
signal->theData[3] = pos;
|
||||
sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
|
||||
return;
|
||||
}
|
||||
while(alloc_info.m_dirty_pages[pos].isEmpty() && pos < MAX_FREE_LIST)
|
||||
pos++;
|
||||
|
||||
@ -1164,9 +1186,10 @@ Dbtup::drop_table_log_buffer_callback(Signal* signal, Uint32 tablePtrI,
|
||||
switch(ret){
|
||||
case 0:
|
||||
return;
|
||||
case -1:
|
||||
warningEvent("Failed to syn log for drop of table: %u", tablePtrI);
|
||||
default:
|
||||
ndbout_c("ret: %d", ret);
|
||||
ndbrequire(false);
|
||||
execute(signal, req.m_callback, logfile_group_id);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1441,3 +1464,22 @@ Dbtup::complete_restore_lcp(Uint32 tableId, Uint32 fragId)
|
||||
tabDesc += 2;
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
Dbtup::get_frag_info(Uint32 tableId, Uint32 fragId, Uint32* maxPage)
|
||||
{
|
||||
jamEntry();
|
||||
TablerecPtr tabPtr;
|
||||
tabPtr.i= tableId;
|
||||
ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
|
||||
|
||||
FragrecordPtr fragPtr;
|
||||
getFragmentrec(fragPtr, fragId, tabPtr.p);
|
||||
|
||||
if (maxPage)
|
||||
{
|
||||
* maxPage = fragPtr.p->noOfPages;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -818,9 +818,7 @@ Dbtup::checkUpdateOfPrimaryKey(KeyReqStruct* req_struct,
|
||||
Tablerec* const regTabPtr)
|
||||
{
|
||||
Uint32 keyReadBuffer[MAX_KEY_SIZE_IN_WORDS];
|
||||
Uint32 attributeHeader;
|
||||
TableDescriptor* attr_descr = req_struct->attr_descr;
|
||||
AttributeHeader* ahOut = (AttributeHeader*)&attributeHeader;
|
||||
AttributeHeader ahIn(*updateBuffer);
|
||||
Uint32 attributeId = ahIn.getAttributeId();
|
||||
Uint32 attrDescriptorIndex = attributeId << ZAD_LOG_SIZE;
|
||||
@ -843,7 +841,7 @@ Dbtup::checkUpdateOfPrimaryKey(KeyReqStruct* req_struct,
|
||||
|
||||
ReadFunction f = regTabPtr->readFunctionArray[attributeId];
|
||||
|
||||
AttributeHeader::init(&attributeHeader, attributeId, 0);
|
||||
AttributeHeader attributeHeader(attributeId, 0);
|
||||
req_struct->out_buf_index = 0;
|
||||
req_struct->max_read = MAX_KEY_SIZE_IN_WORDS;
|
||||
req_struct->attr_descriptor = attrDescriptor;
|
||||
@ -852,12 +850,12 @@ Dbtup::checkUpdateOfPrimaryKey(KeyReqStruct* req_struct,
|
||||
req_struct->xfrm_flag = true;
|
||||
ndbrequire((this->*f)(&keyReadBuffer[0],
|
||||
req_struct,
|
||||
ahOut,
|
||||
&attributeHeader,
|
||||
attributeOffset));
|
||||
req_struct->xfrm_flag = tmp;
|
||||
|
||||
ndbrequire(req_struct->out_buf_index == ahOut->getDataSize());
|
||||
if (ahIn.getDataSize() != ahOut->getDataSize()) {
|
||||
ndbrequire(req_struct->out_buf_index == attributeHeader.getDataSize());
|
||||
if (ahIn.getDataSize() != attributeHeader.getDataSize()) {
|
||||
jam();
|
||||
return true;
|
||||
}
|
||||
|
@ -95,7 +95,23 @@ Dbtup::execACC_SCANREQ(Signal* signal)
|
||||
}
|
||||
}
|
||||
|
||||
bits |= AccScanReq::getNRScanFlag(req->requestInfo) ? ScanOp::SCAN_NR : 0;
|
||||
if (AccScanReq::getNRScanFlag(req->requestInfo))
|
||||
{
|
||||
jam();
|
||||
bits |= ScanOp::SCAN_NR;
|
||||
scanPtr.p->m_endPage = req->maxPage;
|
||||
if (req->maxPage != RNIL && req->maxPage > frag.noOfPages)
|
||||
{
|
||||
ndbout_c("%u %u endPage: %u (noOfPages: %u)",
|
||||
tablePtr.i, fragId,
|
||||
req->maxPage, fragPtr.p->noOfPages);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
jam();
|
||||
scanPtr.p->m_endPage = RNIL;
|
||||
}
|
||||
|
||||
// set up scan op
|
||||
new (scanPtr.p) ScanOp();
|
||||
@ -540,7 +556,7 @@ Dbtup::scanFirst(Signal*, ScanOpPtr scanPtr)
|
||||
ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
|
||||
Fragrecord& frag = *fragPtr.p;
|
||||
// in the future should not pre-allocate pages
|
||||
if (frag.noOfPages == 0) {
|
||||
if (frag.noOfPages == 0 && ((bits & ScanOp::SCAN_NR) == 0)) {
|
||||
jam();
|
||||
scan.m_state = ScanOp::Last;
|
||||
return;
|
||||
@ -632,11 +648,23 @@ Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr)
|
||||
key.m_page_no++;
|
||||
if (key.m_page_no >= frag.noOfPages) {
|
||||
jam();
|
||||
|
||||
if ((bits & ScanOp::SCAN_NR) && (scan.m_endPage != RNIL))
|
||||
{
|
||||
jam();
|
||||
if (key.m_page_no < scan.m_endPage)
|
||||
{
|
||||
jam();
|
||||
ndbout_c("scanning page %u", key.m_page_no);
|
||||
goto cont;
|
||||
}
|
||||
}
|
||||
// no more pages, scan ends
|
||||
pos.m_get = ScanPos::Get_undef;
|
||||
scan.m_state = ScanOp::Last;
|
||||
return true;
|
||||
}
|
||||
cont:
|
||||
key.m_page_idx = 0;
|
||||
pos.m_get = ScanPos::Get_page_mm;
|
||||
// clear cached value
|
||||
@ -649,7 +677,13 @@ Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr)
|
||||
{
|
||||
if (pos.m_realpid_mm == RNIL) {
|
||||
jam();
|
||||
pos.m_realpid_mm = getRealpid(fragPtr.p, key.m_page_no);
|
||||
if (key.m_page_no < frag.noOfPages)
|
||||
pos.m_realpid_mm = getRealpid(fragPtr.p, key.m_page_no);
|
||||
else
|
||||
{
|
||||
ndbassert(bits & ScanOp::SCAN_NR);
|
||||
goto nopage;
|
||||
}
|
||||
}
|
||||
PagePtr pagePtr;
|
||||
c_page_pool.getPtr(pagePtr, pos.m_realpid_mm);
|
||||
@ -657,9 +691,18 @@ Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr)
|
||||
if (pagePtr.p->page_state == ZEMPTY_MM) {
|
||||
// skip empty page
|
||||
jam();
|
||||
pos.m_get = ScanPos::Get_next_page_mm;
|
||||
break; // incr loop count
|
||||
if (! (bits & ScanOp::SCAN_NR))
|
||||
{
|
||||
pos.m_get = ScanPos::Get_next_page_mm;
|
||||
break; // incr loop count
|
||||
}
|
||||
else
|
||||
{
|
||||
jam();
|
||||
pos.m_realpid_mm = RNIL;
|
||||
}
|
||||
}
|
||||
nopage:
|
||||
pos.m_page = pagePtr.p;
|
||||
pos.m_get = ScanPos::Get_tuple;
|
||||
}
|
||||
@ -772,7 +815,7 @@ Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr)
|
||||
uncommitted = committed = ~(unsigned)0;
|
||||
int ret = tsman.get_page_free_bits(&key, &uncommitted, &committed);
|
||||
ndbrequire(ret == 0);
|
||||
if (committed == 0) {
|
||||
if (committed == 0 && uncommitted == 0) {
|
||||
// skip empty page
|
||||
jam();
|
||||
pos.m_get = ScanPos::Get_next_page_dd;
|
||||
@ -820,11 +863,11 @@ Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr)
|
||||
{
|
||||
pos.m_get = ScanPos::Get_next_tuple_fs;
|
||||
th = (Tuple_header*)&page->m_data[key.m_page_idx];
|
||||
thbits = th->m_header_bits;
|
||||
|
||||
if (likely(! (bits & ScanOp::SCAN_NR)))
|
||||
{
|
||||
jam();
|
||||
thbits = th->m_header_bits;
|
||||
if (! (thbits & Tuple_header::FREE))
|
||||
{
|
||||
goto found_tuple;
|
||||
@ -832,7 +875,15 @@ Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr)
|
||||
}
|
||||
else
|
||||
{
|
||||
if ((foundGCI = *th->get_mm_gci(tablePtr.p)) > scanGCI)
|
||||
if (pos.m_realpid_mm == RNIL)
|
||||
{
|
||||
jam();
|
||||
foundGCI = 0;
|
||||
goto found_deleted_rowid;
|
||||
}
|
||||
thbits = th->m_header_bits;
|
||||
if ((foundGCI = *th->get_mm_gci(tablePtr.p)) > scanGCI ||
|
||||
foundGCI == 0)
|
||||
{
|
||||
if (! (thbits & Tuple_header::FREE))
|
||||
{
|
||||
@ -904,7 +955,8 @@ Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr)
|
||||
|
||||
Fix_page *mmpage = (Fix_page*)c_page_pool.getPtr(pos.m_realpid_mm);
|
||||
th = (Tuple_header*)(mmpage->m_data + key_mm.m_page_idx);
|
||||
if ((foundGCI = *th->get_mm_gci(tablePtr.p)) > scanGCI)
|
||||
if ((foundGCI = *th->get_mm_gci(tablePtr.p)) > scanGCI ||
|
||||
foundGCI == 0)
|
||||
{
|
||||
if (! (thbits & Tuple_header::FREE))
|
||||
break;
|
||||
|
@ -1169,9 +1169,7 @@ DbUtil::prepareOperation(Signal* signal, PreparePtr prepPtr)
|
||||
/**************************************************************
|
||||
* Attribute found - store in mapping (AttributeId, Position)
|
||||
**************************************************************/
|
||||
AttributeHeader & attrMap =
|
||||
AttributeHeader::init(attrMappingIt.data,
|
||||
attrDesc.AttributeId, // 1. Store AttrId
|
||||
AttributeHeader attrMap(attrDesc.AttributeId, // 1. Store AttrId
|
||||
0);
|
||||
|
||||
if (attrDesc.AttributeKeyFlag) {
|
||||
@ -1200,6 +1198,7 @@ DbUtil::prepareOperation(Signal* signal, PreparePtr prepPtr)
|
||||
return;
|
||||
}
|
||||
}
|
||||
*(attrMappingIt.data) = attrMap.m_value;
|
||||
#if 0
|
||||
ndbout << "BEFORE: attrLength: " << attrLength << endl;
|
||||
#endif
|
||||
|
@ -2701,8 +2701,16 @@ Lgman::execute_undo_record(Signal* signal)
|
||||
Uint32 lcp = * (ptr - len + 1);
|
||||
if(m_latest_lcp && lcp > m_latest_lcp)
|
||||
{
|
||||
// Just ignore
|
||||
break;
|
||||
if (0)
|
||||
{
|
||||
const Uint32 * base = ptr - len + 1;
|
||||
Uint32 lcp = base[0];
|
||||
Uint32 tableId = base[1] >> 16;
|
||||
Uint32 fragId = base[1] & 0xFFFF;
|
||||
|
||||
ndbout_c("NOT! ignoring lcp: %u tab: %u frag: %u",
|
||||
lcp, tableId, fragId);
|
||||
}
|
||||
}
|
||||
|
||||
if(m_latest_lcp == 0 ||
|
||||
|
@ -238,6 +238,13 @@ Pgman::execCONTINUEB(Signal* signal)
|
||||
}
|
||||
else
|
||||
{
|
||||
if (ERROR_INSERTED(11007))
|
||||
{
|
||||
ndbout << "No more writes..." << endl;
|
||||
SET_ERROR_INSERT_VALUE(11008);
|
||||
signal->theData[0] = 9999;
|
||||
sendSignalWithDelay(CMVMI_REF, GSN_NDB_TAMPER, signal, 10000, 1);
|
||||
}
|
||||
signal->theData[0] = m_end_lcp_req.senderData;
|
||||
sendSignal(m_end_lcp_req.senderRef, GSN_END_LCP_CONF, signal, 1, JBB);
|
||||
}
|
||||
@ -493,6 +500,11 @@ Pgman::release_page_entry(Ptr<Page_entry>& ptr)
|
||||
|
||||
if (! (state & Page_entry::LOCKED))
|
||||
ndbrequire(! (state & Page_entry::REQUEST));
|
||||
|
||||
if (ptr.p->m_copy_page_i != RNIL)
|
||||
{
|
||||
m_global_page_pool.release(ptr.p->m_copy_page_i);
|
||||
}
|
||||
|
||||
set_page_state(ptr, 0);
|
||||
m_page_hashlist.remove(ptr);
|
||||
@ -1142,7 +1154,8 @@ Pgman::process_cleanup(Signal* signal)
|
||||
#ifdef VM_TRACE
|
||||
debugOut << "PGMAN: " << ptr << " : process_cleanup" << endl;
|
||||
#endif
|
||||
c_tup->disk_page_unmap_callback(ptr.p->m_real_page_i,
|
||||
c_tup->disk_page_unmap_callback(0,
|
||||
ptr.p->m_real_page_i,
|
||||
ptr.p->m_dirty_count);
|
||||
pageout(signal, ptr);
|
||||
max_count--;
|
||||
@ -1180,6 +1193,11 @@ Pgman::move_cleanup_ptr(Ptr<Page_entry> ptr)
|
||||
void
|
||||
Pgman::execLCP_FRAG_ORD(Signal* signal)
|
||||
{
|
||||
if (ERROR_INSERTED(11008))
|
||||
{
|
||||
ndbout_c("Ignore LCP_FRAG_ORD");
|
||||
return;
|
||||
}
|
||||
LcpFragOrd* ord = (LcpFragOrd*)signal->getDataPtr();
|
||||
ndbrequire(ord->lcpId >= m_last_lcp_complete + 1 || m_last_lcp_complete == 0);
|
||||
m_last_lcp = ord->lcpId;
|
||||
@ -1196,6 +1214,12 @@ Pgman::execLCP_FRAG_ORD(Signal* signal)
|
||||
void
|
||||
Pgman::execEND_LCP_REQ(Signal* signal)
|
||||
{
|
||||
if (ERROR_INSERTED(11008))
|
||||
{
|
||||
ndbout_c("Ignore END_LCP");
|
||||
return;
|
||||
}
|
||||
|
||||
EndLcpReq* req = (EndLcpReq*)signal->getDataPtr();
|
||||
m_end_lcp_req = *req;
|
||||
|
||||
@ -1274,7 +1298,8 @@ Pgman::process_lcp(Signal* signal)
|
||||
{
|
||||
DBG_LCP(" pageout()" << endl);
|
||||
ptr.p->m_state |= Page_entry::LCP;
|
||||
c_tup->disk_page_unmap_callback(ptr.p->m_real_page_i,
|
||||
c_tup->disk_page_unmap_callback(0,
|
||||
ptr.p->m_real_page_i,
|
||||
ptr.p->m_dirty_count);
|
||||
pageout(signal, ptr);
|
||||
}
|
||||
@ -1301,6 +1326,13 @@ Pgman::process_lcp(Signal* signal)
|
||||
}
|
||||
else
|
||||
{
|
||||
if (ERROR_INSERTED(11007))
|
||||
{
|
||||
ndbout << "No more writes..." << endl;
|
||||
signal->theData[0] = 9999;
|
||||
sendSignalWithDelay(CMVMI_REF, GSN_NDB_TAMPER, signal, 10000, 1);
|
||||
SET_ERROR_INSERT_VALUE(11008);
|
||||
}
|
||||
signal->theData[0] = m_end_lcp_req.senderData;
|
||||
sendSignal(m_end_lcp_req.senderRef, GSN_END_LCP_CONF, signal, 1, JBB);
|
||||
}
|
||||
@ -1489,6 +1521,10 @@ Pgman::fswriteconf(Signal* signal, Ptr<Page_entry> ptr)
|
||||
Page_state state = ptr.p->m_state;
|
||||
ndbrequire(state & Page_entry::PAGEOUT);
|
||||
|
||||
c_tup->disk_page_unmap_callback(1,
|
||||
ptr.p->m_real_page_i,
|
||||
ptr.p->m_dirty_count);
|
||||
|
||||
state &= ~ Page_entry::PAGEOUT;
|
||||
state &= ~ Page_entry::EMPTY;
|
||||
state &= ~ Page_entry::DIRTY;
|
||||
@ -1588,8 +1624,11 @@ Pgman::fswritereq(Signal* signal, Ptr<Page_entry> ptr)
|
||||
}
|
||||
#endif
|
||||
|
||||
sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal,
|
||||
FsReadWriteReq::FixedLength + 1, JBA);
|
||||
if (!ERROR_INSERTED(11008))
|
||||
{
|
||||
sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal,
|
||||
FsReadWriteReq::FixedLength + 1, JBA);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
@ -1739,7 +1778,7 @@ Pgman::get_page(Signal* signal, Ptr<Page_entry> ptr, Page_request page_req)
|
||||
#endif
|
||||
|
||||
state |= Page_entry::REQUEST;
|
||||
if (only_request && req_flags & Page_request::EMPTY_PAGE)
|
||||
if (only_request && (req_flags & Page_request::EMPTY_PAGE))
|
||||
{
|
||||
state |= Page_entry::EMPTY;
|
||||
}
|
||||
@ -2401,7 +2440,8 @@ Pgman::execDUMP_STATE_ORD(Signal* signal)
|
||||
if (pl_hash.find(ptr, key))
|
||||
{
|
||||
ndbout << "pageout " << ptr << endl;
|
||||
c_tup->disk_page_unmap_callback(ptr.p->m_real_page_i,
|
||||
c_tup->disk_page_unmap_callback(0,
|
||||
ptr.p->m_real_page_i,
|
||||
ptr.p->m_dirty_count);
|
||||
pageout(signal, ptr);
|
||||
}
|
||||
@ -2452,6 +2492,16 @@ Pgman::execDUMP_STATE_ORD(Signal* signal)
|
||||
{
|
||||
SET_ERROR_INSERT_VALUE(11006);
|
||||
}
|
||||
|
||||
if (signal->theData[0] == 11007)
|
||||
{
|
||||
SET_ERROR_INSERT_VALUE(11007);
|
||||
}
|
||||
|
||||
if (signal->theData[0] == 11008)
|
||||
{
|
||||
SET_ERROR_INSERT_VALUE(11008);
|
||||
}
|
||||
}
|
||||
|
||||
// page cache client
|
||||
|
@ -3655,6 +3655,8 @@ Suma::execSUB_GCP_COMPLETE_REP(Signal* signal)
|
||||
|
||||
if(m_gcp_complete_rep_count && !c_subscriber_nodes.isclear())
|
||||
{
|
||||
CRASH_INSERTION(13033);
|
||||
|
||||
NodeReceiverGroup rg(API_CLUSTERMGR, c_subscriber_nodes);
|
||||
sendSignal(rg, GSN_SUB_GCP_COMPLETE_REP, signal,
|
||||
SubGcpCompleteRep::SignalLength, JBB);
|
||||
@ -3674,8 +3676,8 @@ Suma::execSUB_GCP_COMPLETE_REP(Signal* signal)
|
||||
{
|
||||
if(m_active_buckets.get(i))
|
||||
continue;
|
||||
|
||||
if(c_buckets[i].m_buffer_tail != RNIL)
|
||||
|
||||
if (!c_subscriber_nodes.isclear())
|
||||
{
|
||||
//Uint32* dst;
|
||||
get_buffer_ptr(signal, i, gci, 0);
|
||||
|
@ -299,7 +299,7 @@ Tsman::execDUMP_STATE_ORD(Signal* signal){
|
||||
Uint32 new_bits = curr_bits ^ rand();
|
||||
Local_key key = chunks[chunk].start_page;
|
||||
key.m_page_no += page;
|
||||
ndbrequire(update_page_free_bits(signal, &key, new_bits, 0) == 0);
|
||||
ndbrequire(update_page_free_bits(signal, &key, new_bits) == 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -366,6 +366,20 @@ Tsman::execCREATE_FILEGROUP_REQ(Signal* signal){
|
||||
CreateFilegroupImplRef::SignalLength, JBB);
|
||||
}
|
||||
|
||||
NdbOut&
|
||||
operator<<(NdbOut& out, const File_formats::Datafile::Extent_header & obj)
|
||||
{
|
||||
out << "table: " << obj.m_table
|
||||
<< " fragment: " << obj.m_fragment_id << " ";
|
||||
for(Uint32 i = 0; i<32; i++)
|
||||
{
|
||||
char t[2];
|
||||
BaseString::snprintf(t, sizeof(t), "%x", obj.get_free_bits(i));
|
||||
out << t;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
void
|
||||
Tsman::execDROP_FILEGROUP_REQ(Signal* signal){
|
||||
jamEntry();
|
||||
@ -1590,8 +1604,7 @@ Tsman::execFREE_EXTENT_REQ(Signal* signal)
|
||||
int
|
||||
Tsman::update_page_free_bits(Signal* signal,
|
||||
Local_key *key,
|
||||
unsigned committed_bits,
|
||||
Uint64 lsn)
|
||||
unsigned committed_bits)
|
||||
{
|
||||
jamEntry();
|
||||
|
||||
@ -1626,6 +1639,18 @@ Tsman::update_page_free_bits(Signal* signal,
|
||||
File_formats::Datafile::Extent_header* header =
|
||||
page->get_header(val.m_extent_no, val.m_extent_size);
|
||||
|
||||
if (header->m_table == RNIL)
|
||||
{
|
||||
ndbout << "update page free bits page: " << *key
|
||||
<< " " << *header << endl;
|
||||
}
|
||||
|
||||
if (0)
|
||||
{
|
||||
ndbout << "update page free bits page(" << committed_bits << ") "
|
||||
<< *key << " " << *header << endl;
|
||||
}
|
||||
|
||||
ndbrequire(header->m_table != RNIL);
|
||||
|
||||
Uint32 page_no_in_extent = calc_page_no_in_extent(key->m_page_no, &val);
|
||||
@ -1637,7 +1662,7 @@ Tsman::update_page_free_bits(Signal* signal,
|
||||
Uint32 src = header->get_free_bits(page_no_in_extent) & UNCOMMITTED_MASK;
|
||||
header->update_free_bits(page_no_in_extent, src | committed_bits);
|
||||
|
||||
m_page_cache_client.update_lsn(preq.m_page, lsn);
|
||||
m_page_cache_client.update_lsn(preq.m_page, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1725,6 +1750,11 @@ Tsman::unmap_page(Signal* signal, Local_key *key, Uint32 uncommitted_bits)
|
||||
File_formats::Datafile::Extent_header* header =
|
||||
page->get_header(val.m_extent_no, val.m_extent_size);
|
||||
|
||||
if (header->m_table == RNIL)
|
||||
{
|
||||
ndbout << "trying to unmap page: " << *key
|
||||
<< " " << *header << endl;
|
||||
}
|
||||
ndbrequire(header->m_table != RNIL);
|
||||
|
||||
Uint32 page_no_in_extent = calc_page_no_in_extent(key->m_page_no, &val);
|
||||
@ -1746,9 +1776,7 @@ Tsman::restart_undo_page_free_bits(Signal* signal,
|
||||
Uint32 tableId,
|
||||
Uint32 fragId,
|
||||
Local_key *key,
|
||||
unsigned bits,
|
||||
Uint64 undo_lsn,
|
||||
Uint64 page_lsn)
|
||||
unsigned bits)
|
||||
{
|
||||
jamEntry();
|
||||
|
||||
@ -1782,21 +1810,7 @@ Tsman::restart_undo_page_free_bits(Signal* signal,
|
||||
(File_formats::Datafile::Extent_page*)ptr_p;
|
||||
File_formats::Datafile::Extent_header* header =
|
||||
page->get_header(val.m_extent_no, val.m_extent_size);
|
||||
|
||||
Uint64 lsn = 0;
|
||||
lsn += page->m_page_header.m_page_lsn_hi; lsn <<= 32;
|
||||
lsn += page->m_page_header.m_page_lsn_lo;
|
||||
|
||||
if (undo_lsn > lsn && undo_lsn > page_lsn)
|
||||
{
|
||||
if (DBG_UNDO)
|
||||
ndbout << "tsman: ignore " << undo_lsn << "(" << lsn << ", "
|
||||
<< page_lsn << ") "
|
||||
<< *key << " "
|
||||
<< " -> " << bits << endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
if (header->m_table == RNIL)
|
||||
{
|
||||
if (DBG_UNDO)
|
||||
@ -1815,7 +1829,7 @@ Tsman::restart_undo_page_free_bits(Signal* signal,
|
||||
*/
|
||||
if (DBG_UNDO)
|
||||
{
|
||||
ndbout << "tsman: apply " << undo_lsn << "(" << lsn << ") "
|
||||
ndbout << "tsman: apply "
|
||||
<< *key << " " << (src & COMMITTED_MASK)
|
||||
<< " -> " << bits << endl;
|
||||
}
|
||||
@ -1863,7 +1877,7 @@ Tsman::execALLOC_PAGE_REQ(Signal* signal)
|
||||
/**
|
||||
* Handling of unmapped extent header pages is not implemented
|
||||
*/
|
||||
int flags = 0;
|
||||
int flags = Page_cache_client::DIRTY_REQ;
|
||||
int real_page_id;
|
||||
Uint32 page_no;
|
||||
Uint32 src_bits;
|
||||
|
@ -209,12 +209,12 @@ private:
|
||||
void load_extent_page_callback(Signal*, Uint32, Uint32);
|
||||
void create_file_ref(Signal*, Ptr<Tablespace>, Ptr<Datafile>,
|
||||
Uint32,Uint32,Uint32);
|
||||
int update_page_free_bits(Signal*, Local_key*, unsigned committed_bits,
|
||||
Uint64 lsn);
|
||||
int update_page_free_bits(Signal*, Local_key*, unsigned committed_bits);
|
||||
|
||||
int get_page_free_bits(Signal*, Local_key*, unsigned*, unsigned*);
|
||||
int unmap_page(Signal*, Local_key*, unsigned uncommitted_bits);
|
||||
int restart_undo_page_free_bits(Signal*, Uint32, Uint32, Local_key*,
|
||||
unsigned committed_bits, Uint64, Uint64);
|
||||
unsigned committed_bits);
|
||||
|
||||
int alloc_extent(Signal* signal, Uint32 tablespace, Local_key* key);
|
||||
int alloc_page_from_extent(Signal*, Uint32, Local_key*, Uint32 bits);
|
||||
@ -320,7 +320,7 @@ public:
|
||||
/**
|
||||
* Update page free bits
|
||||
*/
|
||||
int update_page_free_bits(Local_key*, unsigned bits, Uint64 lsn);
|
||||
int update_page_free_bits(Local_key*, unsigned bits);
|
||||
|
||||
/**
|
||||
* Get page free bits
|
||||
@ -336,8 +336,7 @@ public:
|
||||
/**
|
||||
* Undo handling of page bits
|
||||
*/
|
||||
int restart_undo_page_free_bits(Local_key*, unsigned bits,
|
||||
Uint64 lsn, Uint64 page_lsn);
|
||||
int restart_undo_page_free_bits(Local_key*, unsigned bits);
|
||||
|
||||
/**
|
||||
* Get tablespace info
|
||||
@ -417,10 +416,9 @@ Tablespace_client::free_extent(Local_key* key, Uint64 lsn)
|
||||
inline
|
||||
int
|
||||
Tablespace_client::update_page_free_bits(Local_key *key,
|
||||
unsigned committed_bits,
|
||||
Uint64 lsn)
|
||||
unsigned committed_bits)
|
||||
{
|
||||
return m_tsman->update_page_free_bits(m_signal, key, committed_bits, lsn);
|
||||
return m_tsman->update_page_free_bits(m_signal, key, committed_bits);
|
||||
}
|
||||
|
||||
inline
|
||||
@ -442,17 +440,13 @@ Tablespace_client::unmap_page(Local_key *key, unsigned uncommitted_bits)
|
||||
inline
|
||||
int
|
||||
Tablespace_client::restart_undo_page_free_bits(Local_key* key,
|
||||
unsigned committed_bits,
|
||||
Uint64 lsn,
|
||||
Uint64 page_lsn)
|
||||
unsigned committed_bits)
|
||||
{
|
||||
return m_tsman->restart_undo_page_free_bits(m_signal,
|
||||
m_table_id,
|
||||
m_fragment_id,
|
||||
key,
|
||||
committed_bits,
|
||||
lsn,
|
||||
page_lsn);
|
||||
committed_bits);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -269,8 +269,8 @@ static const char* helpText =
|
||||
"CLUSTERLOG TOGGLE [<severity>] ... Toggle severity filter on/off\n"
|
||||
"CLUSTERLOG INFO Print cluster log information\n"
|
||||
"<id> START Start data node (started with -n)\n"
|
||||
"<id> RESTART [-n] [-i] Restart data or management server node\n"
|
||||
"<id> STOP Stop data or management server node\n"
|
||||
"<id> RESTART [-n] [-i] [-a] Restart data or management server node\n"
|
||||
"<id> STOP [-a] Stop data or management server node\n"
|
||||
"ENTER SINGLE USER MODE <id> Enter single user mode\n"
|
||||
"EXIT SINGLE USER MODE Exit single user mode\n"
|
||||
"<id> STATUS Print status\n"
|
||||
@ -434,7 +434,7 @@ static const char* helpTextRestart =
|
||||
" NDB Cluster -- Management Client -- Help for RESTART command\n"
|
||||
"---------------------------------------------------------------------------\n"
|
||||
"RESTART Restart data or management server node\n\n"
|
||||
"<id> RESTART [-n] [-i] \n"
|
||||
"<id> RESTART [-n] [-i] [-a]\n"
|
||||
" Restart the data or management node <id>(or All data nodes).\n\n"
|
||||
" -n (--nostart) restarts the node but does not\n"
|
||||
" make it join the cluster. Use '<id> START' to\n"
|
||||
@ -445,6 +445,7 @@ static const char* helpTextRestart =
|
||||
" in the same node group during start up.\n\n"
|
||||
" Consult the documentation before using -i.\n\n"
|
||||
" INCORRECT USE OF -i WILL CAUSE DATA LOSS!\n"
|
||||
" -a Aborts the node, not syncing GCP.\n"
|
||||
;
|
||||
|
||||
static const char* helpTextStop =
|
||||
@ -452,10 +453,11 @@ static const char* helpTextStop =
|
||||
" NDB Cluster -- Management Client -- Help for STOP command\n"
|
||||
"---------------------------------------------------------------------------\n"
|
||||
"STOP Stop data or management server node\n\n"
|
||||
"<id> STOP Stop the data or management server node <id>.\n\n"
|
||||
"<id> STOP [-a] Stop the data or management server node <id>.\n\n"
|
||||
" ALL STOP will just stop all data nodes.\n\n"
|
||||
" If you desire to also shut down management servers,\n"
|
||||
" use SHUTDOWN instead.\n"
|
||||
" use SHUTDOWN instead.\n"
|
||||
" -a Aborts the node, not syncing GCP.\n"
|
||||
;
|
||||
|
||||
static const char* helpTextEnterSingleUserMode =
|
||||
|
@ -234,10 +234,10 @@ MgmtSrvr::startEventLog()
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
MgmtSrvr::stopEventLog()
|
||||
void
|
||||
MgmtSrvr::stopEventLog()
|
||||
{
|
||||
// Nothing yet
|
||||
g_eventLogger.close();
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -1141,7 +1141,7 @@ NdbBlob::readTableParts(char* buf, Uint32 part, Uint32 count)
|
||||
* table tuple does not fully protect blob parts since DBTUP
|
||||
* commits each tuple separately.
|
||||
*/
|
||||
tOp->readTuple() == -1 ||
|
||||
tOp->readTuple(NdbOperation::LM_SimpleRead) == -1 ||
|
||||
setPartKeyValue(tOp, part + n) == -1 ||
|
||||
tOp->getValue((Uint32)3, buf) == NULL) {
|
||||
setErrorCode(tOp);
|
||||
|
@ -85,6 +85,9 @@ int NdbIndexOperation::readTuple(NdbOperation::LockMode lm)
|
||||
case LM_CommittedRead:
|
||||
return readTuple();
|
||||
break;
|
||||
case LM_SimpleRead:
|
||||
return readTuple();
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
};
|
||||
|
@ -429,3 +429,9 @@ NdbOperation::getTable() const
|
||||
{
|
||||
return m_currentTable;
|
||||
}
|
||||
|
||||
NdbTransaction*
|
||||
NdbOperation::getNdbTransaction()
|
||||
{
|
||||
return theNdbCon;
|
||||
}
|
||||
|
@ -131,6 +131,8 @@ NdbOperation::readTuple(NdbOperation::LockMode lm)
|
||||
case LM_CommittedRead:
|
||||
return committedRead();
|
||||
break;
|
||||
case LM_SimpleRead:
|
||||
return simpleRead();
|
||||
default:
|
||||
return -1;
|
||||
};
|
||||
@ -185,24 +187,22 @@ NdbOperation::readTupleExclusive()
|
||||
int
|
||||
NdbOperation::simpleRead()
|
||||
{
|
||||
/**
|
||||
* Currently/still disabled
|
||||
*/
|
||||
return readTuple();
|
||||
#if 0
|
||||
NdbTransaction* tNdbCon = theNdbCon;
|
||||
int tErrorLine = theErrorLine;
|
||||
if (theStatus == Init) {
|
||||
theStatus = OperationDefined;
|
||||
theOperationType = ReadRequest;
|
||||
theSimpleIndicator = 1;
|
||||
theDirtyIndicator = 0;
|
||||
theErrorLine = tErrorLine++;
|
||||
theLockMode = LM_Read;
|
||||
theLockMode = LM_SimpleRead;
|
||||
m_abortOption = AO_IgnoreError;
|
||||
tNdbCon->theSimpleState = 0;
|
||||
return 0;
|
||||
} else {
|
||||
setErrorCode(4200);
|
||||
return -1;
|
||||
}//if
|
||||
#endif
|
||||
}//NdbOperation::simpleRead()
|
||||
|
||||
/*****************************************************************************
|
||||
@ -338,28 +338,32 @@ NdbOperation::setReadLockMode(LockMode lockMode)
|
||||
{
|
||||
/* We only support changing lock mode for read operations at this time. */
|
||||
assert(theOperationType == ReadRequest || theOperationType == ReadExclusive);
|
||||
switch (lockMode)
|
||||
{
|
||||
case LM_CommittedRead:
|
||||
theOperationType= ReadRequest;
|
||||
theSimpleIndicator= 1;
|
||||
theDirtyIndicator= 1;
|
||||
break;
|
||||
case LM_Read:
|
||||
theNdbCon->theSimpleState= 0;
|
||||
theOperationType= ReadRequest;
|
||||
theSimpleIndicator= 0;
|
||||
theDirtyIndicator= 0;
|
||||
break;
|
||||
case LM_Exclusive:
|
||||
theNdbCon->theSimpleState= 0;
|
||||
theOperationType= ReadExclusive;
|
||||
theSimpleIndicator= 0;
|
||||
theDirtyIndicator= 0;
|
||||
break;
|
||||
default:
|
||||
/* Not supported / invalid. */
|
||||
assert(false);
|
||||
switch (lockMode) {
|
||||
case LM_CommittedRead: /* TODO, check theNdbCon->theSimpleState */
|
||||
theOperationType= ReadRequest;
|
||||
theSimpleIndicator= 1;
|
||||
theDirtyIndicator= 1;
|
||||
break;
|
||||
case LM_SimpleRead: /* TODO, check theNdbCon->theSimpleState */
|
||||
theOperationType= ReadRequest;
|
||||
theSimpleIndicator= 1;
|
||||
theDirtyIndicator= 0;
|
||||
break;
|
||||
case LM_Read:
|
||||
theNdbCon->theSimpleState= 0;
|
||||
theOperationType= ReadRequest;
|
||||
theSimpleIndicator= 0;
|
||||
theDirtyIndicator= 0;
|
||||
break;
|
||||
case LM_Exclusive:
|
||||
theNdbCon->theSimpleState= 0;
|
||||
theOperationType= ReadExclusive;
|
||||
theSimpleIndicator= 0;
|
||||
theDirtyIndicator= 0;
|
||||
break;
|
||||
default:
|
||||
/* Not supported / invalid. */
|
||||
assert(false);
|
||||
}
|
||||
theLockMode= lockMode;
|
||||
}
|
||||
@ -404,9 +408,8 @@ NdbOperation::getValue_impl(const NdbColumnImpl* tAttrInfo, char* aValue)
|
||||
return NULL;
|
||||
}//if
|
||||
}//if
|
||||
Uint32 ah;
|
||||
AttributeHeader::init(&ah, tAttrInfo->m_attrId, 0);
|
||||
if (insertATTRINFO(ah) != -1) {
|
||||
AttributeHeader ah(tAttrInfo->m_attrId, 0);
|
||||
if (insertATTRINFO(ah.m_value) != -1) {
|
||||
// Insert Attribute Id into ATTRINFO part.
|
||||
|
||||
/************************************************************************
|
||||
@ -532,12 +535,11 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo,
|
||||
tAttrId = tAttrInfo->m_attrId;
|
||||
m_no_disk_flag &= (tAttrInfo->m_storageType == NDB_STORAGETYPE_DISK ? 0:1);
|
||||
const char *aValue = aValuePassed;
|
||||
Uint32 ahValue;
|
||||
if (aValue == NULL) {
|
||||
if (tAttrInfo->m_nullable) {
|
||||
AttributeHeader& ah = AttributeHeader::init(&ahValue, tAttrId, 0);
|
||||
AttributeHeader ah(tAttrId, 0);
|
||||
ah.setNULL();
|
||||
insertATTRINFO(ahValue);
|
||||
insertATTRINFO(ah.m_value);
|
||||
// Insert Attribute Id with the value
|
||||
// NULL into ATTRINFO part.
|
||||
DBUG_RETURN(0);
|
||||
@ -573,8 +575,8 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo,
|
||||
|
||||
// Excluding bits in last word
|
||||
const Uint32 sizeInWords = sizeInBytes / 4;
|
||||
(void) AttributeHeader::init(&ahValue, tAttrId, sizeInBytes);
|
||||
insertATTRINFO( ahValue );
|
||||
AttributeHeader ah(tAttrId, sizeInBytes);
|
||||
insertATTRINFO( ah.m_value );
|
||||
|
||||
/***********************************************************************
|
||||
* Check if the pointer of the value passed is aligned on a 4 byte boundary.
|
||||
|
@ -175,12 +175,11 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr,
|
||||
Uint8 tInterpretIndicator = theInterpretIndicator;
|
||||
Uint8 tNoDisk = m_no_disk_flag;
|
||||
|
||||
//-------------------------------------------------------------
|
||||
// Simple state is set if start and commit is set and it is
|
||||
// a read request. Otherwise it is set to zero.
|
||||
//-------------------------------------------------------------
|
||||
/**
|
||||
* A dirty read, can not abort the transaction
|
||||
*/
|
||||
Uint8 tReadInd = (theOperationType == ReadRequest);
|
||||
Uint8 tSimpleState = tReadInd & tSimpleIndicator;
|
||||
Uint8 tDirtyState = tReadInd & tDirtyIndicator;
|
||||
|
||||
tcKeyReq->transId1 = tTransId1;
|
||||
tcKeyReq->transId2 = tTransId2;
|
||||
@ -206,8 +205,8 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr,
|
||||
tcKeyReq->setOperationType(tReqInfo, tOperationType);
|
||||
tcKeyReq->setKeyLength(tReqInfo, tTupKeyLen);
|
||||
|
||||
// A simple read is always ignore error
|
||||
abortOption = tSimpleState ? (Uint8) AO_IgnoreError : (Uint8) abortOption;
|
||||
// A dirty read is always ignore error
|
||||
abortOption = tDirtyState ? (Uint8) AO_IgnoreError : (Uint8) abortOption;
|
||||
tcKeyReq->setAbortOption(tReqInfo, abortOption);
|
||||
m_abortOption = abortOption;
|
||||
|
||||
@ -549,8 +548,8 @@ NdbOperation::receiveTCKEYREF( NdbApiSignal* aSignal)
|
||||
theStatus = Finished;
|
||||
theReceiver.m_received_result_length = ~0;
|
||||
|
||||
// not simple read
|
||||
if(! (theOperationType == ReadRequest && theSimpleIndicator))
|
||||
// not dirty read
|
||||
if(! (theOperationType == ReadRequest && theDirtyIndicator))
|
||||
{
|
||||
theNdbCon->OpCompleteFailure(this);
|
||||
return -1;
|
||||
|
@ -283,7 +283,7 @@ NdbReceiver::execTRANSID_AI(const Uint32* aDataPtr, Uint32 aLength)
|
||||
Uint32 tmp = m_received_result_length + aLength;
|
||||
m_received_result_length = tmp;
|
||||
|
||||
return (tmp == exp || (exp > TcKeyConf::SimpleReadBit) ? 1 : 0);
|
||||
return (tmp == exp || (exp > TcKeyConf::DirtyReadBit) ? 1 : 0);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -14,11 +14,15 @@
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
#include <NdbScanFilter.hpp>
|
||||
#include <Ndb.hpp>
|
||||
#include <NdbOperation.hpp>
|
||||
#include "NdbDictionaryImpl.hpp"
|
||||
#include <Vector.hpp>
|
||||
#include <NdbOut.hpp>
|
||||
#include <Interpreter.hpp>
|
||||
#include <signaldata/AttrInfo.hpp>
|
||||
#include "NdbApiSignal.hpp"
|
||||
#include "NdbUtil.hpp"
|
||||
|
||||
#ifdef VM_TRACE
|
||||
#include <NdbEnv.h>
|
||||
@ -52,14 +56,37 @@ public:
|
||||
|
||||
int cond_col_const(Interpreter::BinaryCondition, Uint32 attrId,
|
||||
const void * value, Uint32 len);
|
||||
|
||||
bool m_abort_on_too_large;
|
||||
|
||||
NdbOperation::OperationStatus m_initial_op_status;
|
||||
Uint32 m_initial_AI_size;
|
||||
Uint32 m_max_size;
|
||||
|
||||
Uint32 get_size() {
|
||||
assert(m_operation->theTotalCurrAI_Len >= m_initial_AI_size);
|
||||
return m_operation->theTotalCurrAI_Len - m_initial_AI_size;
|
||||
}
|
||||
bool check_size() {
|
||||
if (get_size() <= m_max_size)
|
||||
return true;
|
||||
handle_filter_too_large();
|
||||
return false;
|
||||
}
|
||||
void handle_filter_too_large();
|
||||
|
||||
NdbError m_error;
|
||||
};
|
||||
|
||||
const Uint32 LabelExit = ~0;
|
||||
|
||||
|
||||
NdbScanFilter::NdbScanFilter(class NdbOperation * op)
|
||||
NdbScanFilter::NdbScanFilter(class NdbOperation * op,
|
||||
bool abort_on_too_large,
|
||||
Uint32 max_size)
|
||||
: m_impl(* new NdbScanFilterImpl())
|
||||
{
|
||||
DBUG_ENTER("NdbScanFilter::NdbScanFilter");
|
||||
m_impl.m_current.m_group = (NdbScanFilter::Group)0;
|
||||
m_impl.m_current.m_popCount = 0;
|
||||
m_impl.m_current.m_ownLabel = 0;
|
||||
@ -69,6 +96,21 @@ NdbScanFilter::NdbScanFilter(class NdbOperation * op)
|
||||
m_impl.m_latestAttrib = ~0;
|
||||
m_impl.m_operation = op;
|
||||
m_impl.m_negative = 0;
|
||||
|
||||
DBUG_PRINT("info", ("op status: %d tot AI: %u in curr: %u",
|
||||
op->theStatus,
|
||||
op->theTotalCurrAI_Len, op->theAI_LenInCurrAI));
|
||||
|
||||
m_impl.m_abort_on_too_large = abort_on_too_large;
|
||||
|
||||
m_impl.m_initial_op_status = op->theStatus;
|
||||
m_impl.m_initial_AI_size = op->theTotalCurrAI_Len;
|
||||
if (max_size > NDB_MAX_SCANFILTER_SIZE_IN_WORDS)
|
||||
max_size = NDB_MAX_SCANFILTER_SIZE_IN_WORDS;
|
||||
m_impl.m_max_size = max_size;
|
||||
|
||||
m_impl.m_error.code = 0;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
NdbScanFilter::~NdbScanFilter(){
|
||||
@ -200,30 +242,38 @@ NdbScanFilter::end(){
|
||||
switch(tmp.m_group){
|
||||
case NdbScanFilter::AND:
|
||||
if(tmp.m_trueLabel == (Uint32)~0){
|
||||
m_impl.m_operation->interpret_exit_ok();
|
||||
if (m_impl.m_operation->interpret_exit_ok() == -1)
|
||||
return -1;
|
||||
} else {
|
||||
m_impl.m_operation->branch_label(tmp.m_trueLabel);
|
||||
if (m_impl.m_operation->branch_label(tmp.m_trueLabel) == -1)
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
case NdbScanFilter::NAND:
|
||||
if(tmp.m_trueLabel == (Uint32)~0){
|
||||
m_impl.m_operation->interpret_exit_nok();
|
||||
if (m_impl.m_operation->interpret_exit_nok() == -1)
|
||||
return -1;
|
||||
} else {
|
||||
m_impl.m_operation->branch_label(tmp.m_falseLabel);
|
||||
if (m_impl.m_operation->branch_label(tmp.m_falseLabel) == -1)
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
case NdbScanFilter::OR:
|
||||
if(tmp.m_falseLabel == (Uint32)~0){
|
||||
m_impl.m_operation->interpret_exit_nok();
|
||||
if (m_impl.m_operation->interpret_exit_nok() == -1)
|
||||
return -1;
|
||||
} else {
|
||||
m_impl.m_operation->branch_label(tmp.m_falseLabel);
|
||||
if (m_impl.m_operation->branch_label(tmp.m_falseLabel) == -1)
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
case NdbScanFilter::NOR:
|
||||
if(tmp.m_falseLabel == (Uint32)~0){
|
||||
m_impl.m_operation->interpret_exit_ok();
|
||||
if (m_impl.m_operation->interpret_exit_ok() == -1)
|
||||
return -1;
|
||||
} else {
|
||||
m_impl.m_operation->branch_label(tmp.m_trueLabel);
|
||||
if (m_impl.m_operation->branch_label(tmp.m_trueLabel) == -1)
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@ -231,24 +281,29 @@ NdbScanFilter::end(){
|
||||
return -1;
|
||||
}
|
||||
|
||||
m_impl.m_operation->def_label(tmp.m_ownLabel);
|
||||
if (m_impl.m_operation->def_label(tmp.m_ownLabel) == -1)
|
||||
return -1;
|
||||
|
||||
if(m_impl.m_stack.size() == 0){
|
||||
switch(tmp.m_group){
|
||||
case NdbScanFilter::AND:
|
||||
case NdbScanFilter::NOR:
|
||||
m_impl.m_operation->interpret_exit_nok();
|
||||
if (m_impl.m_operation->interpret_exit_nok() == -1)
|
||||
return -1;
|
||||
break;
|
||||
case NdbScanFilter::OR:
|
||||
case NdbScanFilter::NAND:
|
||||
m_impl.m_operation->interpret_exit_ok();
|
||||
if (m_impl.m_operation->interpret_exit_ok() == -1)
|
||||
return -1;
|
||||
break;
|
||||
default:
|
||||
m_impl.m_operation->setErrorCodeAbort(4260);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (!m_impl.check_size())
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -261,10 +316,16 @@ NdbScanFilter::istrue(){
|
||||
}
|
||||
|
||||
if(m_impl.m_current.m_trueLabel == (Uint32)~0){
|
||||
return m_impl.m_operation->interpret_exit_ok();
|
||||
if (m_impl.m_operation->interpret_exit_ok() == -1)
|
||||
return -1;
|
||||
} else {
|
||||
return m_impl.m_operation->branch_label(m_impl.m_current.m_trueLabel);
|
||||
if (m_impl.m_operation->branch_label(m_impl.m_current.m_trueLabel) == -1)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!m_impl.check_size())
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
@ -276,12 +337,22 @@ NdbScanFilter::isfalse(){
|
||||
}
|
||||
|
||||
if(m_impl.m_current.m_falseLabel == (Uint32)~0){
|
||||
return m_impl.m_operation->interpret_exit_nok();
|
||||
if (m_impl.m_operation->interpret_exit_nok() == -1)
|
||||
return -1;
|
||||
} else {
|
||||
return m_impl.m_operation->branch_label(m_impl.m_current.m_falseLabel);
|
||||
if (m_impl.m_operation->branch_label(m_impl.m_current.m_falseLabel) == -1)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!m_impl.check_size())
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
NdbOperation *
|
||||
NdbScanFilter::getNdbOperation(){
|
||||
return m_impl.m_operation;
|
||||
}
|
||||
|
||||
#define action(x, y, z)
|
||||
|
||||
@ -330,7 +401,11 @@ NdbScanFilterImpl::cond_col(Interpreter::UnaryCondition op, Uint32 AttrId){
|
||||
}
|
||||
|
||||
Branch1 branch = table2[op].m_branches[m_current.m_group];
|
||||
(m_operation->* branch)(AttrId, m_current.m_ownLabel);
|
||||
if ((m_operation->* branch)(AttrId, m_current.m_ownLabel) == -1)
|
||||
return -1;
|
||||
|
||||
if (!check_size())
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -463,8 +538,12 @@ NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition op,
|
||||
return -1;
|
||||
}
|
||||
|
||||
int ret = (m_operation->* branch)(AttrId, value, len, false, m_current.m_ownLabel);
|
||||
return ret;
|
||||
if ((m_operation->* branch)(AttrId, value, len, false, m_current.m_ownLabel) == -1)
|
||||
return -1;
|
||||
|
||||
if (!check_size())
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
@ -490,7 +569,130 @@ NdbScanFilter::cmp(BinaryCondition cond, int ColId,
|
||||
return m_impl.cond_col_const(Interpreter::NOT_LIKE, ColId, val, len);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
NdbScanFilterImpl::handle_filter_too_large()
|
||||
{
|
||||
DBUG_ENTER("NdbScanFilterImpl::handle_filter_too_large");
|
||||
|
||||
NdbOperation* const op = m_operation;
|
||||
m_error.code = NdbScanFilter::FilterTooLarge;
|
||||
if (m_abort_on_too_large)
|
||||
op->setErrorCodeAbort(m_error.code);
|
||||
|
||||
/*
|
||||
* Possible interpreted parts at this point are:
|
||||
*
|
||||
* 1. initial read
|
||||
* 2. interpreted program
|
||||
*
|
||||
* It is assumed that NdbScanFilter has created all of 2
|
||||
* so that we don't have to save interpreter state.
|
||||
*/
|
||||
|
||||
const Uint32 size = get_size();
|
||||
assert(size != 0);
|
||||
|
||||
// new ATTRINFO size
|
||||
const Uint32 new_size = m_initial_AI_size;
|
||||
|
||||
// find last signal for new size
|
||||
assert(op->theFirstATTRINFO != NULL);
|
||||
NdbApiSignal* lastSignal = op->theFirstATTRINFO;
|
||||
Uint32 n = 0;
|
||||
while (n + AttrInfo::DataLength < new_size) {
|
||||
lastSignal = lastSignal->next();
|
||||
assert(lastSignal != NULL);
|
||||
n += AttrInfo::DataLength;
|
||||
}
|
||||
assert(n < size);
|
||||
|
||||
// release remaining signals
|
||||
NdbApiSignal* tSignal = lastSignal->next();
|
||||
op->theNdb->releaseSignalsInList(&tSignal);
|
||||
lastSignal->next(NULL);
|
||||
|
||||
// length of lastSignal
|
||||
const Uint32 new_curr = AttrInfo::HeaderLength + new_size - n;
|
||||
assert(new_curr <= 25);
|
||||
|
||||
DBUG_PRINT("info", ("op status: %d->%d tot AI: %u->%u in curr: %u->%u",
|
||||
op->theStatus, m_initial_op_status,
|
||||
op->theTotalCurrAI_Len, new_size,
|
||||
op->theAI_LenInCurrAI, new_curr));
|
||||
|
||||
// reset op state
|
||||
op->theStatus = m_initial_op_status;
|
||||
|
||||
// reset interpreter state to initial
|
||||
|
||||
NdbBranch* tBranch = op->theFirstBranch;
|
||||
while (tBranch != NULL) {
|
||||
NdbBranch* tmp = tBranch;
|
||||
tBranch = tBranch->theNext;
|
||||
op->theNdb->releaseNdbBranch(tmp);
|
||||
}
|
||||
op->theFirstBranch = NULL;
|
||||
op->theLastBranch = NULL;
|
||||
|
||||
NdbLabel* tLabel = op->theFirstLabel;
|
||||
while (tLabel != NULL) {
|
||||
NdbLabel* tmp = tLabel;
|
||||
tLabel = tLabel->theNext;
|
||||
op->theNdb->releaseNdbLabel(tmp);
|
||||
}
|
||||
op->theFirstLabel = NULL;
|
||||
op->theLastLabel = NULL;
|
||||
|
||||
NdbCall* tCall = op->theFirstCall;
|
||||
while (tCall != NULL) {
|
||||
NdbCall* tmp = tCall;
|
||||
tCall = tCall->theNext;
|
||||
op->theNdb->releaseNdbCall(tmp);
|
||||
}
|
||||
op->theFirstCall = NULL;
|
||||
op->theLastCall = NULL;
|
||||
|
||||
NdbSubroutine* tSubroutine = op->theFirstSubroutine;
|
||||
while (tSubroutine != NULL) {
|
||||
NdbSubroutine* tmp = tSubroutine;
|
||||
tSubroutine = tSubroutine->theNext;
|
||||
op->theNdb->releaseNdbSubroutine(tmp);
|
||||
}
|
||||
op->theFirstSubroutine = NULL;
|
||||
op->theLastSubroutine = NULL;
|
||||
|
||||
op->theNoOfLabels = 0;
|
||||
op->theNoOfSubroutines = 0;
|
||||
|
||||
// reset AI size
|
||||
op->theTotalCurrAI_Len = new_size;
|
||||
op->theAI_LenInCurrAI = new_curr;
|
||||
|
||||
// reset signal pointers
|
||||
op->theCurrentATTRINFO = lastSignal;
|
||||
op->theATTRINFOptr = &lastSignal->getDataPtrSend()[new_curr];
|
||||
|
||||
// interpreter sizes are set later somewhere
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
static void
|
||||
update(const NdbError & _err){
|
||||
NdbError & error = (NdbError &) _err;
|
||||
ndberror_struct ndberror = (ndberror_struct)error;
|
||||
ndberror_update(&ndberror);
|
||||
error = NdbError(ndberror);
|
||||
}
|
||||
|
||||
const NdbError &
|
||||
NdbScanFilter::getNdbError() const
|
||||
{
|
||||
update(m_impl.m_error);
|
||||
return m_impl.m_error;
|
||||
}
|
||||
|
||||
|
||||
#if 0
|
||||
|
@ -237,6 +237,7 @@ NdbScanOperation::setReadLockMode(LockMode lockMode)
|
||||
lockHoldMode= false;
|
||||
readCommitted= true;
|
||||
break;
|
||||
case LM_SimpleRead:
|
||||
case LM_Read:
|
||||
lockExcl= false;
|
||||
lockHoldMode= true;
|
||||
@ -872,6 +873,10 @@ NdbScanOperation::doSendScan(int aProcessorId)
|
||||
// sending it. This could not be done in openScan because
|
||||
// we created the ATTRINFO signals after the SCAN_TABREQ signal.
|
||||
ScanTabReq * const req = CAST_PTR(ScanTabReq, tSignal->getDataPtrSend());
|
||||
if (unlikely(theTotalCurrAI_Len > ScanTabReq::MaxTotalAttrInfo)) {
|
||||
setErrorCode(4257);
|
||||
return -1;
|
||||
}
|
||||
req->attrLenKeyLen = (tupKeyLen << 16) | theTotalCurrAI_Len;
|
||||
Uint32 tmp = req->requestInfo;
|
||||
ScanTabReq::setDistributionKeyFlag(tmp, theDistrKeyIndicator_);
|
||||
|
@ -1787,8 +1787,8 @@ from other transactions.
|
||||
const Uint32 tAttrInfoLen = *tPtr++;
|
||||
if (tOp && tOp->checkMagicNumber()) {
|
||||
Uint32 done = tOp->execTCOPCONF(tAttrInfoLen);
|
||||
if(tAttrInfoLen > TcKeyConf::SimpleReadBit){
|
||||
Uint32 node = tAttrInfoLen & (~TcKeyConf::SimpleReadBit);
|
||||
if(tAttrInfoLen > TcKeyConf::DirtyReadBit){
|
||||
Uint32 node = tAttrInfoLen & (~TcKeyConf::DirtyReadBit);
|
||||
NdbNodeBitmask::set(m_db_nodes, node);
|
||||
if(NdbNodeBitmask::get(m_failed_db_nodes, node) && !done)
|
||||
{
|
||||
@ -2182,7 +2182,7 @@ NdbTransaction::report_node_failure(Uint32 id){
|
||||
* 4) X X
|
||||
*/
|
||||
NdbOperation* tmp = theFirstExecOpInList;
|
||||
const Uint32 len = TcKeyConf::SimpleReadBit | id;
|
||||
const Uint32 len = TcKeyConf::DirtyReadBit | id;
|
||||
Uint32 tNoComp = theNoOfOpCompleted;
|
||||
Uint32 tNoSent = theNoOfOpSent;
|
||||
Uint32 count = 0;
|
||||
|
@ -624,6 +624,7 @@ ErrorBundle ErrorCodes[] = {
|
||||
{ 4273, DMEC, IE, "No blob table in dict cache" },
|
||||
{ 4274, DMEC, IE, "Corrupted main table PK in blob operation" },
|
||||
{ 4275, DMEC, AE, "The blob method is incompatible with operation type or lock mode" },
|
||||
{ 4294, DMEC, AE, "Scan filter is too large, discarded" },
|
||||
|
||||
{ NO_CONTACT_WITH_PROCESS, DMEC, AE,
|
||||
"No contact with the process (dead ?)."},
|
||||
|
@ -36,6 +36,16 @@ public:
|
||||
int updateValue = 0,
|
||||
bool abort = false);
|
||||
|
||||
int loadTableStartFrom(Ndb*,
|
||||
int startFrom,
|
||||
int records,
|
||||
int batch = 512,
|
||||
bool allowConstraintViolation = true,
|
||||
int doSleep = 0,
|
||||
bool oneTrans = false,
|
||||
int updateValue = 0,
|
||||
bool abort = false);
|
||||
|
||||
int scanReadRecords(Ndb*,
|
||||
int records,
|
||||
int abort = 0,
|
||||
@ -56,6 +66,11 @@ public:
|
||||
int batchsize = 1,
|
||||
NdbOperation::LockMode = NdbOperation::LM_Read);
|
||||
|
||||
int scanUpdateRecords(Ndb*, NdbScanOperation::ScanFlag,
|
||||
int records,
|
||||
int abort = 0,
|
||||
int parallelism = 0);
|
||||
|
||||
int scanUpdateRecords(Ndb*,
|
||||
int records,
|
||||
int abort = 0,
|
||||
@ -90,9 +105,12 @@ public:
|
||||
int records,
|
||||
int percentToLock = 1,
|
||||
int lockTime = 1000);
|
||||
|
||||
int fillTable(Ndb*,
|
||||
int batch=512);
|
||||
|
||||
int fillTableStartFrom(Ndb*, int startFrom, int batch=512);
|
||||
|
||||
/**
|
||||
* Reading using UniqHashIndex with key = pk
|
||||
*/
|
||||
|
@ -29,6 +29,11 @@ public:
|
||||
|
||||
int closeTransaction(Ndb*);
|
||||
|
||||
int clearTable(Ndb*,
|
||||
NdbScanOperation::ScanFlag,
|
||||
int records = 0,
|
||||
int parallelism = 0);
|
||||
|
||||
int clearTable(Ndb*,
|
||||
int records = 0,
|
||||
int parallelism = 0);
|
||||
|
@ -136,31 +136,13 @@ int runPkRead(NDBT_Context* ctx, NDBT_Step* step){
|
||||
int loops = ctx->getNumLoops();
|
||||
int records = ctx->getNumRecords();
|
||||
int batchSize = ctx->getProperty("BatchSize", 1);
|
||||
int lm = ctx->getProperty("LockMode", NdbOperation::LM_Read);
|
||||
int i = 0;
|
||||
HugoTransactions hugoTrans(*ctx->getTab());
|
||||
while (i<loops) {
|
||||
g_info << i << ": ";
|
||||
if (hugoTrans.pkReadRecords(GETNDB(step), records, batchSize) != NDBT_OK){
|
||||
g_info << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
g_info << endl;
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
int runPkDirtyRead(NDBT_Context* ctx, NDBT_Step* step){
|
||||
int loops = ctx->getNumLoops();
|
||||
int records = ctx->getNumRecords();
|
||||
int batchSize = ctx->getProperty("BatchSize", 1);
|
||||
int i = 0;
|
||||
bool dirty = true;
|
||||
HugoTransactions hugoTrans(*ctx->getTab());
|
||||
while (i<loops) {
|
||||
g_info << i << ": ";
|
||||
if (hugoTrans.pkReadRecords(GETNDB(step), records, batchSize,
|
||||
NdbOperation::LM_CommittedRead) != NDBT_OK){
|
||||
if (hugoTrans.pkReadRecords(GETNDB(step), records, batchSize,
|
||||
(NdbOperation::LockMode)lm) != NDBT_OK){
|
||||
g_info << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
@ -1552,14 +1534,23 @@ TESTCASE("PkInsert",
|
||||
}
|
||||
TESTCASE("PkRead",
|
||||
"Verify that we can insert, read and delete from this table using PK"){
|
||||
TC_PROPERTY("LockMode", NdbOperation::LM_Read);
|
||||
INITIALIZER(runLoadTable);
|
||||
STEP(runPkRead);
|
||||
FINALIZER(runClearTable);
|
||||
}
|
||||
TESTCASE("PkDirtyRead",
|
||||
"Verify that we can insert, dirty read and delete from this table using PK"){
|
||||
TC_PROPERTY("LockMode", NdbOperation::LM_Dirty);
|
||||
INITIALIZER(runLoadTable);
|
||||
STEP(runPkDirtyRead);
|
||||
STEP(runPkRead);
|
||||
FINALIZER(runClearTable);
|
||||
}
|
||||
TESTCASE("PkSimpleRead",
|
||||
"Verify that we can insert, simple read and delete from this table using PK"){
|
||||
TC_PROPERTY("LockMode", NdbOperation::LM_SimpleRead);
|
||||
INITIALIZER(runLoadTable);
|
||||
STEP(runPkRead);
|
||||
FINALIZER(runClearTable);
|
||||
}
|
||||
TESTCASE("PkUpdate",
|
||||
|
@ -684,7 +684,7 @@ int runTestFragmentTypes(NDBT_Context* ctx, NDBT_Step* step){
|
||||
CHECK(utilTrans.selectCount(pNdb, 64, &count) == 0);
|
||||
CHECK(count == records);
|
||||
CHECK(hugoTrans.pkDelRecords(pNdb, records/2) == 0);
|
||||
CHECK(hugoTrans.scanUpdateRecords(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.scanUpdateRecords(pNdb, records/2) == 0);
|
||||
CHECK(utilTrans.selectCount(pNdb, 64, &count) == 0);
|
||||
CHECK(count == (records/2));
|
||||
|
||||
@ -862,7 +862,7 @@ int runPkSizes(NDBT_Context* ctx, NDBT_Step* step){
|
||||
CHECK(utilTrans.selectCount(pNdb, 64, &count) == 0);
|
||||
CHECK(count == records);
|
||||
CHECK(hugoTrans.pkDelRecords(pNdb, records/2) == 0);
|
||||
CHECK(hugoTrans.scanUpdateRecords(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.scanUpdateRecords(pNdb, records/2) == 0);
|
||||
CHECK(utilTrans.selectCount(pNdb, 64, &count) == 0);
|
||||
CHECK(count == (records/2));
|
||||
CHECK(utilTrans.clearTable(pNdb, records) == 0);
|
||||
@ -2706,7 +2706,262 @@ runDictRestart(NDBT_Context* ctx, NDBT_Step* step)
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
int
|
||||
runBug29501(NDBT_Context* ctx, NDBT_Step* step) {
|
||||
NdbRestarter res;
|
||||
NdbDictionary::LogfileGroup lg;
|
||||
lg.setName("DEFAULT-LG");
|
||||
lg.setUndoBufferSize(8*1024*1024);
|
||||
|
||||
if (res.getNumDbNodes() < 2)
|
||||
return NDBT_OK;
|
||||
|
||||
Ndb* pNdb = GETNDB(step);
|
||||
NdbDictionary::Dictionary* pDict = pNdb->getDictionary();
|
||||
|
||||
int node = res.getRandomNotMasterNodeId(rand());
|
||||
res.restartOneDbNode(node, true, true, false);
|
||||
|
||||
if(pDict->createLogfileGroup(lg) != 0){
|
||||
g_err << "Failed to create logfilegroup:"
|
||||
<< endl << pDict->getNdbError() << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
NdbDictionary::Undofile uf;
|
||||
uf.setPath("undofile01.dat");
|
||||
uf.setSize(5*1024*1024);
|
||||
uf.setLogfileGroup("DEFAULT-LG");
|
||||
|
||||
if(pDict->createUndofile(uf) != 0){
|
||||
g_err << "Failed to create undofile:"
|
||||
<< endl << pDict->getNdbError() << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
res.waitNodesNoStart(&node, 1);
|
||||
res.startNodes(&node, 1);
|
||||
|
||||
if (res.waitClusterStarted()){
|
||||
g_err << "Node restart failed"
|
||||
<< endl << pDict->getNdbError() << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
if (pDict->dropLogfileGroup(pDict->getLogfileGroup(lg.getName())) != 0){
|
||||
g_err << "Drop of LFG Failed"
|
||||
<< endl << pDict->getNdbError() << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
int
|
||||
runDropDDObjects(NDBT_Context* ctx, NDBT_Step* step){
|
||||
//Purpose is to drop all tables, data files, Table spaces and LFG's
|
||||
Uint32 i = 0;
|
||||
|
||||
Ndb* pNdb = GETNDB(step);
|
||||
NdbDictionary::Dictionary* pDict = pNdb->getDictionary();
|
||||
|
||||
NdbDictionary::Dictionary::List list;
|
||||
if (pDict->listObjects(list) == -1)
|
||||
return NDBT_FAILED;
|
||||
|
||||
//Search the list and drop all tables found
|
||||
const char * tableFound = 0;
|
||||
for (i = 0; i < list.count; i++){
|
||||
switch(list.elements[i].type){
|
||||
case NdbDictionary::Object::UserTable:
|
||||
tableFound = list.elements[i].name;
|
||||
if(tableFound != 0){
|
||||
if(pDict->dropTable(tableFound) != 0){
|
||||
g_err << "Failed to drop table: " << pDict->getNdbError() << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
}
|
||||
tableFound = 0;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
//Search the list and drop all data file found
|
||||
const char * dfFound = 0;
|
||||
for (i = 0; i < list.count; i++){
|
||||
switch(list.elements[i].type){
|
||||
case NdbDictionary::Object::Datafile:
|
||||
dfFound = list.elements[i].name;
|
||||
if(dfFound != 0){
|
||||
if(pDict->dropDatafile(pDict->getDatafile(0, dfFound)) != 0){
|
||||
g_err << "Failed to drop datafile: " << pDict->getNdbError() << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
}
|
||||
dfFound = 0;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
//Search the list and drop all Table Spaces Found
|
||||
const char * tsFound = 0;
|
||||
for (i = 0; i <list.count; i++){
|
||||
switch(list.elements[i].type){
|
||||
case NdbDictionary::Object::Tablespace:
|
||||
tsFound = list.elements[i].name;
|
||||
if(tsFound != 0){
|
||||
if(pDict->dropTablespace(pDict->getTablespace(tsFound)) != 0){
|
||||
g_err << "Failed to drop tablespace: " << pDict->getNdbError() << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
}
|
||||
tsFound = 0;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
//Search the list and drop all LFG Found
|
||||
//Currently only 1 LGF is supported, but written for future
|
||||
//when more then one is supported.
|
||||
const char * lgFound = 0;
|
||||
for (i = 0; i < list.count; i++){
|
||||
switch(list.elements[i].type){
|
||||
case NdbDictionary::Object::LogfileGroup:
|
||||
lgFound = list.elements[i].name;
|
||||
if(lgFound != 0){
|
||||
if (pDict->dropLogfileGroup(pDict->getLogfileGroup(lgFound)) != 0){
|
||||
g_err << "Failed to drop tablespace: " << pDict->getNdbError() << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
}
|
||||
lgFound = 0;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
int
|
||||
runWaitStarted(NDBT_Context* ctx, NDBT_Step* step){
|
||||
|
||||
NdbRestarter restarter;
|
||||
restarter.waitClusterStarted(300);
|
||||
|
||||
NdbSleep_SecSleep(3);
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
int
|
||||
testDropDDObjectsSetup(NDBT_Context* ctx, NDBT_Step* step){
|
||||
//Purpose is to setup to test DropDDObjects
|
||||
char tsname[256];
|
||||
char dfname[256];
|
||||
|
||||
Ndb* pNdb = GETNDB(step);
|
||||
NdbDictionary::Dictionary* pDict = pNdb->getDictionary();
|
||||
|
||||
NdbDictionary::LogfileGroup lg;
|
||||
lg.setName("DEFAULT-LG");
|
||||
lg.setUndoBufferSize(8*1024*1024);
|
||||
|
||||
|
||||
if(pDict->createLogfileGroup(lg) != 0){
|
||||
g_err << "Failed to create logfilegroup:"
|
||||
<< endl << pDict->getNdbError() << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
NdbDictionary::Undofile uf;
|
||||
uf.setPath("undofile01.dat");
|
||||
uf.setSize(5*1024*1024);
|
||||
uf.setLogfileGroup("DEFAULT-LG");
|
||||
|
||||
if(pDict->createUndofile(uf) != 0){
|
||||
g_err << "Failed to create undofile:"
|
||||
<< endl << pDict->getNdbError() << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
BaseString::snprintf(tsname, sizeof(tsname), "TS-%u", rand());
|
||||
BaseString::snprintf(dfname, sizeof(dfname), "%s-%u.dat", tsname, rand());
|
||||
|
||||
if (create_tablespace(pDict, lg.getName(), tsname, dfname)){
|
||||
g_err << "Failed to create undofile:"
|
||||
<< endl << pDict->getNdbError() << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
int
|
||||
DropDDObjectsVerify(NDBT_Context* ctx, NDBT_Step* step){
|
||||
//Purpose is to verify test DropDDObjects worked
|
||||
Uint32 i = 0;
|
||||
|
||||
Ndb* pNdb = GETNDB(step);
|
||||
NdbDictionary::Dictionary* pDict = pNdb->getDictionary();
|
||||
|
||||
NdbDictionary::Dictionary::List list;
|
||||
if (pDict->listObjects(list) == -1)
|
||||
return NDBT_FAILED;
|
||||
|
||||
bool ddFound = false;
|
||||
for (i = 0; i <list.count; i++){
|
||||
switch(list.elements[i].type){
|
||||
case NdbDictionary::Object::Tablespace:
|
||||
ddFound = true;
|
||||
break;
|
||||
case NdbDictionary::Object::LogfileGroup:
|
||||
ddFound = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if(ddFound == true){
|
||||
g_err << "DropDDObjects Failed: DD found:"
|
||||
<< endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
}
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
NDBT_TESTSUITE(testDict);
|
||||
TESTCASE("testDropDDObjects",
|
||||
"* 1. start cluster\n"
|
||||
"* 2. Create LFG\n"
|
||||
"* 3. create TS\n"
|
||||
"* 4. run DropDDObjects\n"
|
||||
"* 5. Verify DropDDObjectsRestart worked\n"){
|
||||
INITIALIZER(runWaitStarted);
|
||||
INITIALIZER(runDropDDObjects);
|
||||
INITIALIZER(testDropDDObjectsSetup);
|
||||
STEP(runDropDDObjects);
|
||||
FINALIZER(DropDDObjectsVerify);
|
||||
}
|
||||
|
||||
TESTCASE("Bug29501",
|
||||
"* 1. start cluster\n"
|
||||
"* 2. Restart 1 node -abort -nostart\n"
|
||||
"* 3. create LFG\n"
|
||||
"* 4. Restart data node\n"
|
||||
"* 5. Restart 1 node -nostart\n"
|
||||
"* 6. Drop LFG\n"){
|
||||
INITIALIZER(runWaitStarted);
|
||||
INITIALIZER(runDropDDObjects);
|
||||
STEP(runBug29501);
|
||||
FINALIZER(runDropDDObjects);
|
||||
}
|
||||
TESTCASE("CreateAndDrop",
|
||||
"Try to create and drop the table loop number of times\n"){
|
||||
INITIALIZER(runCreateAndDrop);
|
||||
|
@ -809,7 +809,7 @@ int runSystemRestart1(NDBT_Context* ctx, NDBT_Step* step){
|
||||
CHECK(hugoTrans.pkUpdateRecords(pNdb, records) == 0);
|
||||
CHECK(utilTrans.verifyIndex(pNdb, idxName, 16, false) == 0);
|
||||
CHECK(hugoTrans.pkDelRecords(pNdb, records/2) == 0);
|
||||
CHECK(hugoTrans.scanUpdateRecords(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.scanUpdateRecords(pNdb, records/2) == 0);
|
||||
CHECK(utilTrans.verifyIndex(pNdb, idxName, 16, false) == 0);
|
||||
CHECK(utilTrans.clearTable(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.loadTable(pNdb, records, 1) == 0);
|
||||
@ -834,7 +834,7 @@ int runSystemRestart1(NDBT_Context* ctx, NDBT_Step* step){
|
||||
CHECK(utilTrans.verifyIndex(pNdb, idxName, 16, false) == 0);
|
||||
CHECK(hugoTrans.pkDelRecords(pNdb, records/2) == 0);
|
||||
CHECK(utilTrans.verifyIndex(pNdb, idxName, 16, false) == 0);
|
||||
CHECK(hugoTrans.scanUpdateRecords(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.scanUpdateRecords(pNdb, records/2) == 0);
|
||||
CHECK(utilTrans.verifyIndex(pNdb, idxName, 16, false) == 0);
|
||||
CHECK(utilTrans.clearTable(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.loadTable(pNdb, records, 1) == 0);
|
||||
|
@ -1762,6 +1762,80 @@ runBug28717(NDBT_Context* ctx, NDBT_Step* step)
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
int
|
||||
runBug31525(NDBT_Context* ctx, NDBT_Step* step)
|
||||
{
|
||||
int result = NDBT_OK;
|
||||
int loops = ctx->getNumLoops();
|
||||
int records = ctx->getNumRecords();
|
||||
Ndb* pNdb = GETNDB(step);
|
||||
NdbRestarter res;
|
||||
|
||||
if (res.getNumDbNodes() < 2)
|
||||
{
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
int nodes[2];
|
||||
nodes[0] = res.getMasterNodeId();
|
||||
nodes[1] = res.getNextMasterNodeId(nodes[0]);
|
||||
|
||||
while (res.getNodeGroup(nodes[0]) != res.getNodeGroup(nodes[1]))
|
||||
{
|
||||
ndbout_c("Restarting %u as it not in same node group as %u",
|
||||
nodes[1], nodes[0]);
|
||||
if (res.restartOneDbNode(nodes[1], false, true, true))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (res.waitNodesNoStart(nodes+1, 1))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (res.startNodes(nodes+1, 1))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (res.waitClusterStarted())
|
||||
return NDBT_FAILED;
|
||||
|
||||
nodes[1] = res.getNextMasterNodeId(nodes[0]);
|
||||
}
|
||||
|
||||
ndbout_c("nodes[0]: %u nodes[1]: %u", nodes[0], nodes[1]);
|
||||
|
||||
int val = DumpStateOrd::DihMinTimeBetweenLCP;
|
||||
if (res.dumpStateAllNodes(&val, 1))
|
||||
return NDBT_FAILED;
|
||||
|
||||
int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
|
||||
if (res.dumpStateAllNodes(val2, 2))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (res.insertErrorInAllNodes(932))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (res.insertErrorInNode(nodes[1], 7192))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (res.insertErrorInNode(nodes[0], 7191))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (res.waitClusterNoStart())
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (res.startAll())
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (res.waitClusterStarted())
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (res.restartOneDbNode(nodes[1], false, false, true))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (res.waitClusterStarted())
|
||||
return NDBT_FAILED;
|
||||
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
NDBT_TESTSUITE(testNodeRestart);
|
||||
TESTCASE("NoLoad",
|
||||
"Test that one node at a time can be stopped and then restarted "\
|
||||
@ -2085,6 +2159,9 @@ TESTCASE("Bug21271",
|
||||
STEP(runPkUpdateUntilStopped);
|
||||
FINALIZER(runClearTable);
|
||||
}
|
||||
TESTCASE("Bug31525", ""){
|
||||
INITIALIZER(runBug31525);
|
||||
}
|
||||
TESTCASE("Bug24717", ""){
|
||||
INITIALIZER(runBug24717);
|
||||
}
|
||||
|
@ -579,7 +579,7 @@ int runScanUpdateUntilStopped(NDBT_Context* ctx, NDBT_Step* step){
|
||||
para = myRandom48(239)+1;
|
||||
|
||||
g_info << i << ": ";
|
||||
if (hugoTrans.scanUpdateRecords(GETNDB(step), records, 0, para) == NDBT_FAILED){
|
||||
if (hugoTrans.scanUpdateRecords(GETNDB(step), 0, 0, para) == NDBT_FAILED){
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
i++;
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <NdbRestarter.hpp>
|
||||
#include <Vector.hpp>
|
||||
#include <signaldata/DumpStateOrd.hpp>
|
||||
#include <NdbBackup.hpp>
|
||||
|
||||
int runLoadTable(NDBT_Context* ctx, NDBT_Step* step){
|
||||
|
||||
@ -121,7 +122,7 @@ int runSystemRestart1(NDBT_Context* ctx, NDBT_Step* step){
|
||||
CHECK(hugoTrans.loadTable(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.pkUpdateRecords(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.pkDelRecords(pNdb, records/2) == 0);
|
||||
CHECK(hugoTrans.scanUpdateRecords(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.scanUpdateRecords(pNdb, records/2) == 0);
|
||||
CHECK(utilTrans.clearTable(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.loadTable(pNdb, records) == 0);
|
||||
CHECK(utilTrans.clearTable(pNdb, records) == 0);
|
||||
@ -142,7 +143,7 @@ int runSystemRestart1(NDBT_Context* ctx, NDBT_Step* step){
|
||||
CHECK(hugoTrans.loadTable(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.pkUpdateRecords(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.pkDelRecords(pNdb, records/2) == 0);
|
||||
CHECK(hugoTrans.scanUpdateRecords(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.scanUpdateRecords(pNdb, records/2) == 0);
|
||||
CHECK(utilTrans.clearTable(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.loadTable(pNdb, records) == 0);
|
||||
CHECK(utilTrans.clearTable(pNdb, records) == 0);
|
||||
@ -265,7 +266,7 @@ int runSystemRestart2(NDBT_Context* ctx, NDBT_Step* step){
|
||||
CHECK(hugoTrans.loadTable(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.pkUpdateRecords(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.pkDelRecords(pNdb, records/2) == 0);
|
||||
CHECK(hugoTrans.scanUpdateRecords(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.scanUpdateRecords(pNdb, records/2) == 0);
|
||||
CHECK(utilTrans.clearTable(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.loadTable(pNdb, records) == 0);
|
||||
CHECK(utilTrans.clearTable(pNdb, records) == 0);
|
||||
@ -329,7 +330,7 @@ int runSystemRestartTestUndoLog(NDBT_Context* ctx, NDBT_Step* step){
|
||||
CHECK(hugoTrans.loadTable(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.pkUpdateRecords(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.pkDelRecords(pNdb, records/2) == 0);
|
||||
CHECK(hugoTrans.scanUpdateRecords(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.scanUpdateRecords(pNdb, records/2) == 0);
|
||||
CHECK(utilTrans.clearTable(pNdb, records) == 0);
|
||||
CHECK(hugoTrans.loadTable(pNdb, records) == 0);
|
||||
CHECK(utilTrans.clearTable(pNdb, records) == 0);
|
||||
@ -1293,6 +1294,260 @@ runBug28770(NDBT_Context* ctx, NDBT_Step* step) {
|
||||
return result;
|
||||
}
|
||||
|
||||
int
|
||||
runStopper(NDBT_Context* ctx, NDBT_Step* step)
|
||||
{
|
||||
NdbRestarter restarter;
|
||||
Uint32 stop = 0;
|
||||
loop:
|
||||
while (!ctx->isTestStopped() &&
|
||||
((stop = ctx->getProperty("StopAbort", Uint32(0))) == 0))
|
||||
{
|
||||
NdbSleep_MilliSleep(30);
|
||||
}
|
||||
|
||||
if (ctx->isTestStopped())
|
||||
{
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
ndbout << "Killing in " << stop << "ms..." << flush;
|
||||
NdbSleep_MilliSleep(stop);
|
||||
restarter.restartAll(false, true, true);
|
||||
ctx->setProperty("StopAbort", Uint32(0));
|
||||
goto loop;
|
||||
}
|
||||
|
||||
int runSR_DD_1(NDBT_Context* ctx, NDBT_Step* step)
|
||||
{
|
||||
Ndb* pNdb = GETNDB(step);
|
||||
int result = NDBT_OK;
|
||||
Uint32 loops = ctx->getNumLoops();
|
||||
int count;
|
||||
NdbRestarter restarter;
|
||||
NdbBackup backup(GETNDB(step)->getNodeId()+1);
|
||||
bool lcploop = ctx->getProperty("LCP", (unsigned)0);
|
||||
bool all = ctx->getProperty("ALL", (unsigned)0);
|
||||
|
||||
Uint32 i = 1;
|
||||
Uint32 backupId;
|
||||
|
||||
int val[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
|
||||
int lcp = DumpStateOrd::DihMinTimeBetweenLCP;
|
||||
|
||||
int startFrom = 0;
|
||||
|
||||
HugoTransactions hugoTrans(*ctx->getTab());
|
||||
while(i<=loops && result != NDBT_FAILED)
|
||||
{
|
||||
|
||||
if (lcploop)
|
||||
{
|
||||
CHECK(restarter.dumpStateAllNodes(&lcp, 1) == 0);
|
||||
}
|
||||
|
||||
int nodeId = restarter.getDbNodeId(rand() % restarter.getNumDbNodes());
|
||||
//CHECK(restarter.dumpStateAllNodes(&val, 1) == 0);
|
||||
|
||||
ndbout << "Loop " << i << "/"<< loops <<" started" << endl;
|
||||
ndbout << "Loading records..." << startFrom << endl;
|
||||
CHECK(hugoTrans.loadTable(pNdb, startFrom) == 0);
|
||||
|
||||
if (!all)
|
||||
{
|
||||
ndbout << "Making " << nodeId << " crash" << endl;
|
||||
int kill[] = { 9999, 1000, 3000 };
|
||||
CHECK(restarter.dumpStateOneNode(nodeId, val, 2) == 0);
|
||||
CHECK(restarter.dumpStateOneNode(nodeId, kill, 3) == 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
ndbout << "Crashing cluster" << endl;
|
||||
ctx->setProperty("StopAbort", 1000 + rand() % (3000 - 1000));
|
||||
}
|
||||
Uint64 end = NdbTick_CurrentMillisecond() + 4000;
|
||||
Uint32 row = startFrom;
|
||||
do {
|
||||
ndbout << "Loading from " << row << " to " << row + 1000 << endl;
|
||||
if (hugoTrans.loadTableStartFrom(pNdb, row, 1000) != 0)
|
||||
break;
|
||||
row += 1000;
|
||||
} while (NdbTick_CurrentMillisecond() < end);
|
||||
|
||||
if (!all)
|
||||
{
|
||||
ndbout << "Waiting for " << nodeId << " to restart" << endl;
|
||||
CHECK(restarter.waitNodesNoStart(&nodeId, 1) == 0);
|
||||
ndbout << "Restarting cluster" << endl;
|
||||
CHECK(restarter.restartAll(false, true, true) == 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
ndbout << "Waiting for cluster to restart" << endl;
|
||||
}
|
||||
CHECK(restarter.waitClusterNoStart() == 0);
|
||||
CHECK(restarter.startAll() == 0);
|
||||
CHECK(restarter.waitClusterStarted() == 0);
|
||||
|
||||
ndbout << "Starting backup..." << flush;
|
||||
CHECK(backup.start(backupId) == 0);
|
||||
ndbout << "done" << endl;
|
||||
|
||||
int cnt = 0;
|
||||
CHECK(hugoTrans.selectCount(pNdb, 0, &cnt) == 0);
|
||||
ndbout << "Found " << cnt << " records..." << endl;
|
||||
ndbout << "Clearing..." << endl;
|
||||
CHECK(hugoTrans.clearTable(pNdb,
|
||||
NdbScanOperation::SF_TupScan, cnt) == 0);
|
||||
|
||||
if (cnt > startFrom)
|
||||
{
|
||||
startFrom = cnt;
|
||||
}
|
||||
startFrom += 1000;
|
||||
i++;
|
||||
}
|
||||
|
||||
ndbout << "runSR_DD_1 finished" << endl;
|
||||
ctx->stopTest();
|
||||
return result;
|
||||
}
|
||||
|
||||
int runSR_DD_2(NDBT_Context* ctx, NDBT_Step* step)
|
||||
{
|
||||
Ndb* pNdb = GETNDB(step);
|
||||
int result = NDBT_OK;
|
||||
Uint32 loops = ctx->getNumLoops();
|
||||
Uint32 rows = ctx->getNumRecords();
|
||||
int count;
|
||||
NdbRestarter restarter;
|
||||
NdbBackup backup(GETNDB(step)->getNodeId()+1);
|
||||
bool lcploop = ctx->getProperty("LCP", (unsigned)0);
|
||||
bool all = ctx->getProperty("ALL", (unsigned)0);
|
||||
|
||||
Uint32 i = 1;
|
||||
Uint32 backupId;
|
||||
|
||||
int val[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
|
||||
int lcp = DumpStateOrd::DihMinTimeBetweenLCP;
|
||||
|
||||
int startFrom = 0;
|
||||
|
||||
HugoTransactions hugoTrans(*ctx->getTab());
|
||||
while(i<=loops && result != NDBT_FAILED)
|
||||
{
|
||||
|
||||
if (lcploop)
|
||||
{
|
||||
CHECK(restarter.dumpStateAllNodes(&lcp, 1) == 0);
|
||||
}
|
||||
|
||||
int nodeId = restarter.getDbNodeId(rand() % restarter.getNumDbNodes());
|
||||
|
||||
if (!all)
|
||||
{
|
||||
ndbout << "Making " << nodeId << " crash" << endl;
|
||||
int kill[] = { 9999, 3000, 10000 };
|
||||
CHECK(restarter.dumpStateOneNode(nodeId, val, 2) == 0);
|
||||
CHECK(restarter.dumpStateOneNode(nodeId, kill, 3) == 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
ndbout << "Crashing cluster" << endl;
|
||||
ctx->setProperty("StopAbort", 1000 + rand() % (3000 - 1000));
|
||||
}
|
||||
|
||||
Uint64 end = NdbTick_CurrentMillisecond() + 11000;
|
||||
Uint32 row = startFrom;
|
||||
do {
|
||||
if (hugoTrans.loadTable(pNdb, rows) != 0)
|
||||
break;
|
||||
|
||||
if (hugoTrans.clearTable(pNdb, NdbScanOperation::SF_TupScan, rows) != 0)
|
||||
break;
|
||||
} while (NdbTick_CurrentMillisecond() < end);
|
||||
|
||||
if (!all)
|
||||
{
|
||||
ndbout << "Waiting for " << nodeId << " to restart" << endl;
|
||||
CHECK(restarter.waitNodesNoStart(&nodeId, 1) == 0);
|
||||
ndbout << "Restarting cluster" << endl;
|
||||
CHECK(restarter.restartAll(false, true, true) == 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
ndbout << "Waiting for cluster to restart" << endl;
|
||||
}
|
||||
|
||||
CHECK(restarter.waitClusterNoStart() == 0);
|
||||
CHECK(restarter.startAll() == 0);
|
||||
CHECK(restarter.waitClusterStarted() == 0);
|
||||
|
||||
ndbout << "Starting backup..." << flush;
|
||||
CHECK(backup.start(backupId) == 0);
|
||||
ndbout << "done" << endl;
|
||||
|
||||
int cnt = 0;
|
||||
CHECK(hugoTrans.selectCount(pNdb, 0, &cnt) == 0);
|
||||
ndbout << "Found " << cnt << " records..." << endl;
|
||||
ndbout << "Clearing..." << endl;
|
||||
CHECK(hugoTrans.clearTable(pNdb,
|
||||
NdbScanOperation::SF_TupScan, cnt) == 0);
|
||||
i++;
|
||||
}
|
||||
|
||||
ndbout << "runSR_DD_2 finished" << endl;
|
||||
ctx->stopTest();
|
||||
return result;
|
||||
}
|
||||
|
||||
int
|
||||
runBug27434(NDBT_Context* ctx, NDBT_Step* step)
|
||||
{
|
||||
int result = NDBT_OK;
|
||||
NdbRestarter restarter;
|
||||
Ndb* pNdb = GETNDB(step);
|
||||
const Uint32 nodeCount = restarter.getNumDbNodes();
|
||||
|
||||
if (nodeCount < 2)
|
||||
return NDBT_OK;
|
||||
|
||||
int args[] = { DumpStateOrd::DihMaxTimeBetweenLCP };
|
||||
int dump[] = { DumpStateOrd::DihStartLcpImmediately };
|
||||
|
||||
int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_CHECKPOINT, 0 };
|
||||
NdbLogEventHandle handle =
|
||||
ndb_mgm_create_logevent_handle(restarter.handle, filter);
|
||||
|
||||
struct ndb_logevent event;
|
||||
|
||||
do {
|
||||
int node1 = restarter.getDbNodeId(rand() % nodeCount);
|
||||
CHECK(restarter.restartOneDbNode(node1, false, true, true) == 0);
|
||||
NdbSleep_SecSleep(3);
|
||||
CHECK(restarter.waitNodesNoStart(&node1, 1) == 0);
|
||||
|
||||
CHECK(restarter.dumpStateAllNodes(args, 1) == 0);
|
||||
|
||||
for (Uint32 i = 0; i<3; i++)
|
||||
{
|
||||
CHECK(restarter.dumpStateAllNodes(dump, 1) == 0);
|
||||
while(ndb_logevent_get_next(handle, &event, 0) >= 0 &&
|
||||
event.type != NDB_LE_LocalCheckpointStarted);
|
||||
while(ndb_logevent_get_next(handle, &event, 0) >= 0 &&
|
||||
event.type != NDB_LE_LocalCheckpointCompleted);
|
||||
}
|
||||
|
||||
restarter.restartAll(false, true, true);
|
||||
NdbSleep_SecSleep(3);
|
||||
CHECK(restarter.waitClusterNoStart() == 0);
|
||||
restarter.insertErrorInNode(node1, 5046);
|
||||
restarter.startAll();
|
||||
CHECK(restarter.waitClusterStarted() == 0);
|
||||
} while(false);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
NDBT_TESTSUITE(testSystemRestart);
|
||||
TESTCASE("SR1",
|
||||
@ -1474,6 +1729,72 @@ TESTCASE("Bug24664",
|
||||
STEP(runBug24664);
|
||||
FINALIZER(runClearTable);
|
||||
}
|
||||
TESTCASE("Bug27434",
|
||||
"")
|
||||
{
|
||||
INITIALIZER(runWaitStarted);
|
||||
STEP(runBug27434);
|
||||
}
|
||||
TESTCASE("SR_DD_1", "")
|
||||
{
|
||||
TC_PROPERTY("ALL", 1);
|
||||
INITIALIZER(runWaitStarted);
|
||||
STEP(runStopper);
|
||||
STEP(runSR_DD_1);
|
||||
FINALIZER(runClearTable);
|
||||
}
|
||||
TESTCASE("SR_DD_1b", "")
|
||||
{
|
||||
INITIALIZER(runWaitStarted);
|
||||
STEP(runSR_DD_1);
|
||||
FINALIZER(runClearTable);
|
||||
}
|
||||
TESTCASE("SR_DD_1_LCP", "")
|
||||
{
|
||||
TC_PROPERTY("ALL", 1);
|
||||
TC_PROPERTY("LCP", 1);
|
||||
INITIALIZER(runWaitStarted);
|
||||
STEP(runStopper);
|
||||
STEP(runSR_DD_1);
|
||||
FINALIZER(runClearTable);
|
||||
}
|
||||
TESTCASE("SR_DD_1b_LCP", "")
|
||||
{
|
||||
TC_PROPERTY("LCP", 1);
|
||||
INITIALIZER(runWaitStarted);
|
||||
STEP(runSR_DD_1);
|
||||
FINALIZER(runClearTable);
|
||||
}
|
||||
TESTCASE("SR_DD_2", "")
|
||||
{
|
||||
TC_PROPERTY("ALL", 1);
|
||||
INITIALIZER(runWaitStarted);
|
||||
STEP(runStopper);
|
||||
STEP(runSR_DD_2);
|
||||
FINALIZER(runClearTable);
|
||||
}
|
||||
TESTCASE("SR_DD_2b", "")
|
||||
{
|
||||
INITIALIZER(runWaitStarted);
|
||||
STEP(runSR_DD_2);
|
||||
FINALIZER(runClearTable);
|
||||
}
|
||||
TESTCASE("SR_DD_2_LCP", "")
|
||||
{
|
||||
TC_PROPERTY("ALL", 1);
|
||||
TC_PROPERTY("LCP", 1);
|
||||
INITIALIZER(runWaitStarted);
|
||||
STEP(runStopper);
|
||||
STEP(runSR_DD_2);
|
||||
FINALIZER(runClearTable);
|
||||
}
|
||||
TESTCASE("SR_DD_2b_LCP", "")
|
||||
{
|
||||
TC_PROPERTY("LCP", 1);
|
||||
INITIALIZER(runWaitStarted);
|
||||
STEP(runSR_DD_2);
|
||||
FINALIZER(runClearTable);
|
||||
}
|
||||
TESTCASE("Bug29167", "")
|
||||
{
|
||||
INITIALIZER(runWaitStarted);
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <NdbAutoPtr.hpp>
|
||||
#include <NdbRestarter.hpp>
|
||||
#include <NdbRestarts.hpp>
|
||||
#include <signaldata/DumpStateOrd.hpp>
|
||||
|
||||
#define GETNDB(ps) ((NDBT_NdbApiStep*)ps)->getNdb()
|
||||
|
||||
@ -1730,7 +1731,7 @@ runScanUpdateUntilStopped(NDBT_Context* ctx, NDBT_Step* step){
|
||||
HugoTransactions hugoTrans(*ctx->getTab());
|
||||
while (ctx->isTestStopped() == false)
|
||||
{
|
||||
if (hugoTrans.scanUpdateRecords(GETNDB(step), records, abort,
|
||||
if (hugoTrans.scanUpdateRecords(GETNDB(step), 0, abort,
|
||||
parallelism) == NDBT_FAILED){
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
@ -1758,6 +1759,85 @@ runInsertDeleteUntilStopped(NDBT_Context* ctx, NDBT_Step* step)
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
int
|
||||
runBug31701(NDBT_Context* ctx, NDBT_Step* step)
|
||||
{
|
||||
int result = NDBT_OK;
|
||||
|
||||
NdbRestarter restarter;
|
||||
|
||||
if (restarter.getNumDbNodes() < 2){
|
||||
ctx->stopTest();
|
||||
return NDBT_OK;
|
||||
}
|
||||
// This should really wait for applier to start...10s is likely enough
|
||||
NdbSleep_SecSleep(10);
|
||||
|
||||
int nodeId = restarter.getDbNodeId(rand() % restarter.getNumDbNodes());
|
||||
|
||||
int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
|
||||
if (restarter.dumpStateOneNode(nodeId, val2, 2))
|
||||
return NDBT_FAILED;
|
||||
|
||||
restarter.insertErrorInNode(nodeId, 13033);
|
||||
if (restarter.waitNodesNoStart(&nodeId, 1))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (restarter.startNodes(&nodeId, 1))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (restarter.waitClusterStarted())
|
||||
return NDBT_FAILED;
|
||||
|
||||
|
||||
int records = ctx->getNumRecords();
|
||||
HugoTransactions hugoTrans(*ctx->getTab());
|
||||
|
||||
if(ctx->getPropertyWait("LastGCI", ~(Uint32)0))
|
||||
{
|
||||
g_err << "FAIL " << __LINE__ << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
hugoTrans.clearTable(GETNDB(step), 0);
|
||||
|
||||
if (hugoTrans.loadTable(GETNDB(step), 3*records, 1, true, 1) != 0){
|
||||
g_err << "FAIL " << __LINE__ << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
if (hugoTrans.pkDelRecords(GETNDB(step), 3*records, 1, true, 1) != 0){
|
||||
g_err << "FAIL " << __LINE__ << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
if (hugoTrans.loadTable(GETNDB(step), records, 1, true, 1) != 0){
|
||||
g_err << "FAIL " << __LINE__ << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
if (hugoTrans.pkUpdateRecords(GETNDB(step), records, 1, 1) != 0){
|
||||
g_err << "FAIL " << __LINE__ << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
if (hugoTrans.pkUpdateRecords(GETNDB(step), records, 1, 1) != 0){
|
||||
g_err << "FAIL " << __LINE__ << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
if (hugoTrans.pkUpdateRecords(GETNDB(step), records, 1, 1) != 0){
|
||||
g_err << "FAIL " << __LINE__ << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
ctx->setProperty("LastGCI", hugoTrans.m_latest_gci);
|
||||
if(ctx->getPropertyWait("LastGCI", ~(Uint32)0))
|
||||
{
|
||||
g_err << "FAIL " << __LINE__ << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
ctx->stopTest();
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
NDBT_TESTSUITE(test_event);
|
||||
TESTCASE("BasicEventOperation",
|
||||
"Verify that we can listen to Events"
|
||||
@ -1887,6 +1967,14 @@ TESTCASE("Bug27169", ""){
|
||||
STEP(runRestarterLoop);
|
||||
FINALIZER(runDropEvent);
|
||||
}
|
||||
TESTCASE("Bug31701", ""){
|
||||
INITIALIZER(runCreateEvent);
|
||||
INITIALIZER(runCreateShadowTable);
|
||||
STEP(runEventApplier);
|
||||
STEP(runBug31701);
|
||||
FINALIZER(runDropEvent);
|
||||
FINALIZER(runDropShadowTable);
|
||||
}
|
||||
NDBT_TESTSUITE_END(test_event);
|
||||
|
||||
int main(int argc, const char** argv){
|
||||
|
@ -63,6 +63,14 @@ max-time: 500
|
||||
cmd: testBasic
|
||||
args: -n PkRead
|
||||
|
||||
max-time: 500
|
||||
cmd: testBasic
|
||||
args: -n PkSimpleRead
|
||||
|
||||
max-time: 500
|
||||
cmd: testBasic
|
||||
args: -n PkDirtyRead
|
||||
|
||||
max-time: 500
|
||||
cmd: testBasic
|
||||
args: -n PkUpdate
|
||||
@ -555,7 +563,7 @@ args: -n Bug25554 T1
|
||||
|
||||
max-time: 3000
|
||||
cmd: testNodeRestart
|
||||
args: -n Bug25984
|
||||
args: -n Bug25984 T1
|
||||
|
||||
max-time: 1000
|
||||
cmd: testNodeRestart
|
||||
@ -575,6 +583,14 @@ args: -n Bug29364 T1
|
||||
|
||||
#
|
||||
# DICT TESTS
|
||||
max-time: 500
|
||||
cmd: testDict
|
||||
args: -n Bug29501 T1
|
||||
|
||||
max-time: 500
|
||||
cmd: testDict
|
||||
args: -n testDropDDObjects T1
|
||||
|
||||
max-time: 1500
|
||||
cmd: testDict
|
||||
args: -n CreateAndDrop
|
||||
@ -864,6 +880,10 @@ max-time: 1000
|
||||
cmd: testNodeRestart
|
||||
args: -n Bug27466 T1
|
||||
|
||||
max-time: 1500
|
||||
cmd: testSystemRestart
|
||||
args: -n Bug27434 T1
|
||||
|
||||
max-time: 1000
|
||||
cmd: test_event
|
||||
args: -l 10 -n Bug27169 T1
|
||||
@ -945,3 +965,77 @@ args: -n Bug28804 T1 T3
|
||||
max-time: 180
|
||||
cmd: testIndex
|
||||
args: -n Bug28804_ATTRINFO T1 T3
|
||||
|
||||
max-time: 1500
|
||||
cmd: testSystemRestart
|
||||
args: -n SR_DD_1 D1
|
||||
|
||||
max-time: 1500
|
||||
cmd: testSystemRestart
|
||||
args: -n SR_DD_1b D1
|
||||
|
||||
max-time: 1500
|
||||
cmd: testSystemRestart
|
||||
args: -n SR_DD_1 D2
|
||||
|
||||
max-time: 1500
|
||||
cmd: testSystemRestart
|
||||
args: -n SR_DD_1b D2
|
||||
|
||||
max-time: 1500
|
||||
cmd: testSystemRestart
|
||||
args: -n SR_DD_1_LCP D1
|
||||
|
||||
max-time: 1500
|
||||
cmd: testSystemRestart
|
||||
args: -n SR_DD_1b_LCP D1
|
||||
|
||||
max-time: 1500
|
||||
cmd: testSystemRestart
|
||||
args: -n SR_DD_1_LCP D2
|
||||
|
||||
max-time: 1500
|
||||
cmd: testSystemRestart
|
||||
args: -n SR_DD_1b_LCP D2
|
||||
|
||||
max-time: 1500
|
||||
cmd: testSystemRestart
|
||||
args: -n SR_DD_2 D1
|
||||
|
||||
max-time: 1500
|
||||
cmd: testSystemRestart
|
||||
args: -n SR_DD_2b D1
|
||||
|
||||
max-time: 1500
|
||||
cmd: testSystemRestart
|
||||
args: -n SR_DD_2 D2
|
||||
|
||||
max-time: 1500
|
||||
cmd: testSystemRestart
|
||||
args: -n SR_DD_2b D2
|
||||
|
||||
max-time: 1500
|
||||
cmd: testSystemRestart
|
||||
args: -n SR_DD_2_LCP D1
|
||||
|
||||
max-time: 1500
|
||||
cmd: testSystemRestart
|
||||
args: -n SR_DD_2b_LCP D1
|
||||
|
||||
max-time: 1500
|
||||
cmd: testSystemRestart
|
||||
args: -n SR_DD_2_LCP D2
|
||||
|
||||
max-time: 1500
|
||||
cmd: testSystemRestart
|
||||
args: -n SR_DD_2b_LCP D2
|
||||
|
||||
max-time: 600
|
||||
cmd: testNodeRestart
|
||||
args: -n Bug31525 T1
|
||||
|
||||
max-time: 300
|
||||
cmd: test_event
|
||||
args: -n Bug31701 T1
|
||||
|
||||
|
||||
|
@ -93,6 +93,7 @@ rand_lock_mode:
|
||||
case NdbOperation::LM_Read:
|
||||
case NdbOperation::LM_Exclusive:
|
||||
case NdbOperation::LM_CommittedRead:
|
||||
case NdbOperation::LM_SimpleRead:
|
||||
if(idx && idx->getType() == NdbDictionary::Index::OrderedIndex &&
|
||||
pIndexScanOp == 0)
|
||||
{
|
||||
|
@ -341,50 +341,14 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
|
||||
|
||||
int
|
||||
HugoTransactions::scanUpdateRecords(Ndb* pNdb,
|
||||
int records,
|
||||
int abortPercent,
|
||||
int parallelism){
|
||||
if(m_defaultScanUpdateMethod == 1){
|
||||
return scanUpdateRecords1(pNdb, records, abortPercent, parallelism);
|
||||
} else if(m_defaultScanUpdateMethod == 2){
|
||||
return scanUpdateRecords2(pNdb, records, abortPercent, parallelism);
|
||||
} else {
|
||||
return scanUpdateRecords3(pNdb, records, abortPercent, parallelism);
|
||||
}
|
||||
}
|
||||
|
||||
// Scan all records exclusive and update
|
||||
// them one by one
|
||||
int
|
||||
HugoTransactions::scanUpdateRecords1(Ndb* pNdb,
|
||||
int records,
|
||||
int abortPercent,
|
||||
int parallelism){
|
||||
return scanUpdateRecords3(pNdb, records, abortPercent, 1);
|
||||
}
|
||||
|
||||
// Scan all records exclusive and update
|
||||
// them batched by asking nextScanResult to
|
||||
// give us all cached records before fetching new
|
||||
// records from db
|
||||
int
|
||||
HugoTransactions::scanUpdateRecords2(Ndb* pNdb,
|
||||
int records,
|
||||
int abortPercent,
|
||||
int parallelism){
|
||||
return scanUpdateRecords3(pNdb, records, abortPercent, parallelism);
|
||||
}
|
||||
|
||||
int
|
||||
HugoTransactions::scanUpdateRecords3(Ndb* pNdb,
|
||||
int records,
|
||||
int abortPercent,
|
||||
int parallelism){
|
||||
int retryAttempt = 0;
|
||||
NdbScanOperation::ScanFlag flags,
|
||||
int records,
|
||||
int abortPercent,
|
||||
int parallelism){
|
||||
int retryAttempt = 0;
|
||||
int check, a;
|
||||
NdbScanOperation *pOp;
|
||||
|
||||
|
||||
while (true){
|
||||
restart:
|
||||
if (retryAttempt++ >= m_retryMax){
|
||||
@ -411,8 +375,9 @@ restart:
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
if( pOp->readTuplesExclusive(parallelism) ) {
|
||||
ERR(pTrans->getNdbError());
|
||||
if( pOp->readTuples(NdbOperation::LM_Exclusive, flags,
|
||||
parallelism))
|
||||
{
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
@ -429,15 +394,18 @@ restart:
|
||||
check = pTrans->execute(NoCommit, AbortOnError);
|
||||
if( check == -1 ) {
|
||||
const NdbError err = pTrans->getNdbError();
|
||||
ERR(err);
|
||||
closeTransaction(pNdb);
|
||||
if (err.status == NdbError::TemporaryError){
|
||||
ERR(err);
|
||||
closeTransaction(pNdb);
|
||||
NdbSleep_MilliSleep(50);
|
||||
retryAttempt++;
|
||||
continue;
|
||||
}
|
||||
ERR(err);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
|
||||
// Abort after 1-100 or 1-records rows
|
||||
int ranVal = rand();
|
||||
int abortCount = ranVal % (records == 0 ? 100 : records);
|
||||
@ -448,74 +416,113 @@ restart:
|
||||
abortTrans = true;
|
||||
}
|
||||
|
||||
int eof;
|
||||
int rows = 0;
|
||||
while((check = pOp->nextResult(true)) == 0){
|
||||
do {
|
||||
rows++;
|
||||
NdbOperation* pUp = pOp->updateCurrentTuple();
|
||||
if(pUp == 0){
|
||||
while((eof = pOp->nextResult(true)) == 0){
|
||||
rows++;
|
||||
if (calc.verifyRowValues(&row) != 0){
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
if (abortCount == rows && abortTrans == true){
|
||||
ndbout << "Scan is aborted" << endl;
|
||||
g_info << "Scan is aborted" << endl;
|
||||
pOp->close();
|
||||
if( check == -1 ) {
|
||||
ERR(pTrans->getNdbError());
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
const int updates = calc.getUpdatesValue(&row) + 1;
|
||||
const int r = calc.getIdValue(&row);
|
||||
for(a = 0; a<tab.getNoOfColumns(); a++){
|
||||
if (tab.getColumn(a)->getPrimaryKey() == false){
|
||||
if(setValueForAttr(pUp, a, r, updates ) != 0){
|
||||
ERR(pTrans->getNdbError());
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (rows == abortCount && abortTrans == true){
|
||||
g_info << "Scan is aborted" << endl;
|
||||
// This scan should be aborted
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_OK;
|
||||
}
|
||||
} while((check = pOp->nextResult(false)) == 0);
|
||||
|
||||
if(check != -1){
|
||||
check = pTrans->execute(Commit, AbortOnError);
|
||||
if(check != -1)
|
||||
m_latest_gci = pTrans->getGCI();
|
||||
pTrans->restart();
|
||||
}
|
||||
|
||||
const NdbError err = pTrans->getNdbError();
|
||||
if( check == -1 ) {
|
||||
|
||||
closeTransaction(pNdb);
|
||||
ERR(err);
|
||||
if (err.status == NdbError::TemporaryError){
|
||||
NdbSleep_MilliSleep(50);
|
||||
goto restart;
|
||||
}
|
||||
return NDBT_FAILED;
|
||||
return NDBT_OK;
|
||||
}
|
||||
}
|
||||
|
||||
const NdbError err = pTrans->getNdbError();
|
||||
if( check == -1 ) {
|
||||
closeTransaction(pNdb);
|
||||
ERR(err);
|
||||
if (eof == -1) {
|
||||
const NdbError err = pTrans->getNdbError();
|
||||
|
||||
if (err.status == NdbError::TemporaryError){
|
||||
ERR_INFO(err);
|
||||
closeTransaction(pNdb);
|
||||
NdbSleep_MilliSleep(50);
|
||||
goto restart;
|
||||
switch (err.code){
|
||||
case 488:
|
||||
case 245:
|
||||
case 490:
|
||||
// Too many active scans, no limit on number of retry attempts
|
||||
break;
|
||||
default:
|
||||
retryAttempt++;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
ERR(err);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
closeTransaction(pNdb);
|
||||
|
||||
g_info << rows << " rows have been read" << endl;
|
||||
if (records != 0 && rows != records){
|
||||
g_err << "Check expected number of records failed" << endl
|
||||
<< " expected=" << records <<", " << endl
|
||||
<< " read=" << rows << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
closeTransaction(pNdb);
|
||||
|
||||
g_info << rows << " rows have been updated" << endl;
|
||||
return NDBT_OK;
|
||||
}
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
int
|
||||
HugoTransactions::scanUpdateRecords(Ndb* pNdb,
|
||||
int records,
|
||||
int abortPercent,
|
||||
int parallelism){
|
||||
|
||||
return scanUpdateRecords(pNdb,
|
||||
(NdbScanOperation::ScanFlag)0,
|
||||
records, abortPercent, parallelism);
|
||||
}
|
||||
|
||||
// Scan all records exclusive and update
|
||||
// them one by one
|
||||
int
|
||||
HugoTransactions::scanUpdateRecords1(Ndb* pNdb,
|
||||
int records,
|
||||
int abortPercent,
|
||||
int parallelism){
|
||||
return scanUpdateRecords(pNdb,
|
||||
(NdbScanOperation::ScanFlag)0,
|
||||
records, abortPercent, 1);
|
||||
}
|
||||
|
||||
// Scan all records exclusive and update
|
||||
// them batched by asking nextScanResult to
|
||||
// give us all cached records before fetching new
|
||||
// records from db
|
||||
int
|
||||
HugoTransactions::scanUpdateRecords2(Ndb* pNdb,
|
||||
int records,
|
||||
int abortPercent,
|
||||
int parallelism){
|
||||
return scanUpdateRecords(pNdb, (NdbScanOperation::ScanFlag)0,
|
||||
records, abortPercent, parallelism);
|
||||
}
|
||||
|
||||
int
|
||||
HugoTransactions::scanUpdateRecords3(Ndb* pNdb,
|
||||
int records,
|
||||
int abortPercent,
|
||||
int parallelism)
|
||||
{
|
||||
return scanUpdateRecords(pNdb, (NdbScanOperation::ScanFlag)0,
|
||||
records, abortPercent, parallelism);
|
||||
}
|
||||
|
||||
int
|
||||
HugoTransactions::loadTable(Ndb* pNdb,
|
||||
int records,
|
||||
@ -524,7 +531,22 @@ HugoTransactions::loadTable(Ndb* pNdb,
|
||||
int doSleep,
|
||||
bool oneTrans,
|
||||
int value,
|
||||
bool abort){
|
||||
bool abort)
|
||||
{
|
||||
return loadTableStartFrom(pNdb, 0, records, batch, allowConstraintViolation,
|
||||
doSleep, oneTrans, value, abort);
|
||||
}
|
||||
|
||||
int
|
||||
HugoTransactions::loadTableStartFrom(Ndb* pNdb,
|
||||
int startFrom,
|
||||
int records,
|
||||
int batch,
|
||||
bool allowConstraintViolation,
|
||||
int doSleep,
|
||||
bool oneTrans,
|
||||
int value,
|
||||
bool abort){
|
||||
int check;
|
||||
int retryAttempt = 0;
|
||||
int retryMax = 5;
|
||||
@ -543,8 +565,9 @@ HugoTransactions::loadTable(Ndb* pNdb,
|
||||
<< " -> rows/commit = " << batch << endl;
|
||||
}
|
||||
|
||||
Uint32 orgbatch = batch;
|
||||
g_info << "|- Inserting records..." << endl;
|
||||
for (int c=0 ; c<records ; ){
|
||||
for (int c=0 ; c<records; ){
|
||||
bool closeTrans = true;
|
||||
|
||||
if(c + batch > records)
|
||||
@ -578,7 +601,7 @@ HugoTransactions::loadTable(Ndb* pNdb,
|
||||
}
|
||||
}
|
||||
|
||||
if(pkInsertRecord(pNdb, c, batch, value) != NDBT_OK)
|
||||
if(pkInsertRecord(pNdb, c + startFrom, batch, value) != NDBT_OK)
|
||||
{
|
||||
ERR(pTrans->getNdbError());
|
||||
closeTransaction(pNdb);
|
||||
@ -625,6 +648,7 @@ HugoTransactions::loadTable(Ndb* pNdb,
|
||||
ERR(err);
|
||||
NdbSleep_MilliSleep(50);
|
||||
retryAttempt++;
|
||||
batch = 1;
|
||||
continue;
|
||||
break;
|
||||
|
||||
@ -670,7 +694,14 @@ HugoTransactions::loadTable(Ndb* pNdb,
|
||||
|
||||
int
|
||||
HugoTransactions::fillTable(Ndb* pNdb,
|
||||
int batch){
|
||||
int batch){
|
||||
return fillTableStartFrom(pNdb, 0, batch);
|
||||
}
|
||||
|
||||
int
|
||||
HugoTransactions::fillTableStartFrom(Ndb* pNdb,
|
||||
int startFrom,
|
||||
int batch){
|
||||
int check;
|
||||
int retryAttempt = 0;
|
||||
int retryMax = 5;
|
||||
@ -688,7 +719,7 @@ HugoTransactions::fillTable(Ndb* pNdb,
|
||||
<< " -> rows/commit = " << batch << endl;
|
||||
}
|
||||
|
||||
for (int c=0 ; ; ){
|
||||
for (int c=startFrom ; ; ){
|
||||
|
||||
if (retryAttempt >= retryMax){
|
||||
g_info << "Record " << c << " could not be inserted, has retried "
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user