From 705060f10ccb6a0e3ce5899f7d8c2fdfc2a6a777 Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 8 Oct 2006 11:24:53 +0800 Subject: [PATCH 01/18] BUG #21858 Make sure retry when EINTR returns, which decreases memory leak chance. ndb/src/common/util/File.cpp: Avoid memory leak when EINTR error returns. Even though a close-error happens, a ERROR message in out file is given, and this shouldn't affect the normally running. --- ndb/src/common/util/File.cpp | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/ndb/src/common/util/File.cpp b/ndb/src/common/util/File.cpp index 12626f29e7d..00741d3a576 100644 --- a/ndb/src/common/util/File.cpp +++ b/ndb/src/common/util/File.cpp @@ -123,13 +123,25 @@ bool File_class::close() { bool rc = true; + int retval = 0; + if (m_file != NULL) { ::fflush(m_file); - rc = (::fclose(m_file) == 0 ? true : false); - m_file = NULL; // Try again? + retval = ::fclose(m_file); + while ( (retval != 0) && (errno == EINTR) ){ + retval = ::fclose(m_file); + } + if( retval == 0){ + rc = true; + } + else { + rc = false; + ndbout_c("ERROR: Close file error in File.cpp for %s",strerror(errno)); + } } - + m_file = NULL; + return rc; } From 14f7ff0025996fc54372526a494f4798d6fae2c2 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 14 Dec 2006 16:27:14 +0100 Subject: [PATCH 02/18] Removed check for impossible error return --- sql/ha_ndbcluster.cc | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index ff5634d291c..149a7c83895 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -2956,13 +2956,10 @@ KEY* key_info; DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error); } else if (type == UNIQUE_INDEX) - { - error= unique_index_scan(key_info, - start_key->key, - start_key->length, - buf); - DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error); - } + DBUG_RETURN(unique_index_scan(key_info, + start_key->key, + start_key->length, + buf)); break; default: break; From 386b381788be295147edaf3b3353ce8643f4e782 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 15 Dec 2006 09:03:21 +0100 Subject: [PATCH 03/18] bug#19956 Problems with VARCHAR primary key and BLOB fields:added test case --- mysql-test/r/ndb_read_multi_range.result | 14 ++++++++++++++ mysql-test/t/ndb_read_multi_range.test | 15 +++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/mysql-test/r/ndb_read_multi_range.result b/mysql-test/r/ndb_read_multi_range.result index 9941d2b28a3..e2a076ef99f 100644 --- a/mysql-test/r/ndb_read_multi_range.result +++ b/mysql-test/r/ndb_read_multi_range.result @@ -367,3 +367,17 @@ a b c 406994 67 2006-02-27 11:26:46 406995 67 2006-02-28 11:55:00 DROP TABLE t1, t11, t12, t21, t22; +CREATE TABLE t1 (id varchar(255) NOT NULL, +tag int(11) NOT NULL, +doc text NOT NULL, +type varchar(150) NOT NULL, +modified timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, +PRIMARY KEY (id) +) ENGINE=ndbcluster; +INSERT INTO t1 VALUES ('sakila',1,'Some text goes here','text',CURRENT_TIMESTAMP); +SELECT id, tag, doc, type FROM t1 WHERE id IN ('flipper','orka'); +id tag doc type +SELECT id, tag, doc, type FROM t1 WHERE id IN ('flipper','sakila'); +id tag doc type +sakila 1 Some text goes here text +DROP TABLE t1; diff --git a/mysql-test/t/ndb_read_multi_range.test b/mysql-test/t/ndb_read_multi_range.test index 855f7789032..99edab5d23c 100644 --- a/mysql-test/t/ndb_read_multi_range.test +++ b/mysql-test/t/ndb_read_multi_range.test @@ -238,3 +238,18 @@ select * from t12 order by 1,2,3; select * from t21 order by 1,2,3; select * from t22 order by 1,2,3; DROP TABLE t1, t11, t12, t21, t22; + +# bug#19956 +CREATE TABLE t1 (id varchar(255) NOT NULL, + tag int(11) NOT NULL, + doc text NOT NULL, + type varchar(150) NOT NULL, + modified timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + PRIMARY KEY (id) + ) ENGINE=ndbcluster; + +INSERT INTO t1 VALUES ('sakila',1,'Some text goes here','text',CURRENT_TIMESTAMP); +SELECT id, tag, doc, type FROM t1 WHERE id IN ('flipper','orka'); +SELECT id, tag, doc, type FROM t1 WHERE id IN ('flipper','sakila'); + +DROP TABLE t1; From 905b64160bcfc1e9f092699071481deaed6ef37f Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 2 Jan 2007 15:47:58 +0100 Subject: [PATCH 04/18] Added --ndb-use-transactions --- sql/mysqld.cc | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 1dd15398cd1..ce12db1c53a 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -4531,8 +4531,8 @@ enum options_mysqld OPT_LOG_BIN_TRUST_FUNCTION_CREATORS, OPT_SAFE_SHOW_DB, OPT_INNODB_SAFE_BINLOG, OPT_INNODB, OPT_ISAM, - OPT_ENGINE_CONDITION_PUSHDOWN, - OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, OPT_NDB_USE_EXACT_COUNT, + OPT_ENGINE_CONDITION_PUSHDOWN, OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, + OPT_NDB_USE_EXACT_COUNT, OPT_NDB_USE_TRANSACTIONS, OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ, OPT_NDB_SHM, OPT_NDB_OPTIMIZED_NODE_SELECTION, OPT_NDB_CACHE_CHECK_TIME, OPT_NDB_MGMD, OPT_NDB_NODEID, @@ -5158,6 +5158,17 @@ Disable with --skip-ndbcluster (will save memory).", (gptr*) &global_system_variables.ndb_use_exact_count, (gptr*) &global_system_variables.ndb_use_exact_count, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + {"ndb-use-transactions", OPT_NDB_USE_TRANSACTIONS, + "Use transactions for large inserts, if enabled then large " + "inserts will be split into several smaller transactions", + (gptr*) &global_system_variables.ndb_use_transactions, + (gptr*) &global_system_variables.ndb_use_transactions, + 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + {"ndb_use_transactions", OPT_NDB_USE_TRANSACTIONS, + "same as --ndb-use-transactions.", + (gptr*) &global_system_variables.ndb_use_transactions, + (gptr*) &global_system_variables.ndb_use_transactions, + 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, {"ndb-shm", OPT_NDB_SHM, "Use shared memory connections when available.", (gptr*) &opt_ndb_shm, From 7a78769027a1dd2598e41d9e6d6ef97d8ec18e9e Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 3 Jan 2007 06:17:34 +0100 Subject: [PATCH 05/18] ndb - bug#25364 on master node failure during qmgr-commitreq make sure to remove all committed failed nodes from failed/prepfailed arrays ndb/include/kernel/signaldata/DumpStateOrd.hpp: new error code ndb/src/kernel/blocks/ERROR_codes.txt: new error code ndb/src/kernel/blocks/qmgr/Qmgr.hpp: extra error insert variable ndb/src/kernel/blocks/qmgr/QmgrMain.cpp: make sure to remove all committed failed nodes from failed/prepfailed arrays ndb/test/ndbapi/testNodeRestart.cpp: testcase ndb/test/run-test/daily-basic-tests.txt: testcase --- .../kernel/signaldata/DumpStateOrd.hpp | 1 + ndb/src/kernel/blocks/ERROR_codes.txt | 3 + ndb/src/kernel/blocks/qmgr/Qmgr.hpp | 4 ++ ndb/src/kernel/blocks/qmgr/QmgrMain.cpp | 72 +++++++++++++++---- ndb/test/ndbapi/testNodeRestart.cpp | 43 +++++++++++ ndb/test/run-test/daily-basic-tests.txt | 4 ++ 6 files changed, 114 insertions(+), 13 deletions(-) diff --git a/ndb/include/kernel/signaldata/DumpStateOrd.hpp b/ndb/include/kernel/signaldata/DumpStateOrd.hpp index a2993ad5d03..04f94aaba58 100644 --- a/ndb/include/kernel/signaldata/DumpStateOrd.hpp +++ b/ndb/include/kernel/signaldata/DumpStateOrd.hpp @@ -68,6 +68,7 @@ public: // 100-105 TUP and ACC // 200-240 UTIL // 300-305 TRIX + QmgrErr935 = 935, NdbfsDumpFileStat = 400, NdbfsDumpAllFiles = 401, NdbfsDumpOpenFiles = 402, diff --git a/ndb/src/kernel/blocks/ERROR_codes.txt b/ndb/src/kernel/blocks/ERROR_codes.txt index 16f5da8a553..0bcc99a6334 100644 --- a/ndb/src/kernel/blocks/ERROR_codes.txt +++ b/ndb/src/kernel/blocks/ERROR_codes.txt @@ -21,6 +21,9 @@ Crash president when he starts to run in ArbitState 1-9. 910: Crash new president after node crash +935 : Crash master on node failure (delayed) + and skip sending GSN_COMMIT_FAILREQ to specified node + ERROR CODES FOR TESTING NODE FAILURE, GLOBAL CHECKPOINT HANDLING: ----------------------------------------------------------------- diff --git a/ndb/src/kernel/blocks/qmgr/Qmgr.hpp b/ndb/src/kernel/blocks/qmgr/Qmgr.hpp index e728ea81a7d..0c4bdc5d3c1 100644 --- a/ndb/src/kernel/blocks/qmgr/Qmgr.hpp +++ b/ndb/src/kernel/blocks/qmgr/Qmgr.hpp @@ -426,6 +426,10 @@ private: StopReq c_stopReq; bool check_multi_node_shutdown(Signal* signal); + +#ifdef ERROR_INSERT + Uint32 c_error_insert_extra; +#endif }; #endif diff --git a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index cc981f37987..66ee7549b9d 100644 --- a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -3110,6 +3110,18 @@ Qmgr::sendCommitFailReq(Signal* signal) for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { jam(); ptrAss(nodePtr, nodeRec); + +#ifdef ERROR_INSERT + if (ERROR_INSERTED(935) && nodePtr.i == c_error_insert_extra) + { + ndbout_c("skipping node %d", c_error_insert_extra); + CLEAR_ERROR_INSERT_VALUE; + signal->theData[0] = 9999; + sendSignalWithDelay(CMVMI_REF, GSN_NDB_TAMPER, signal, 1000, 1); + continue; + } +#endif + if (nodePtr.p->phase == ZRUNNING) { jam(); nodePtr.p->sendCommitFailReqStatus = Q_ACTIVE; @@ -3180,6 +3192,33 @@ void Qmgr::execPREP_FAILREF(Signal* signal) return; }//Qmgr::execPREP_FAILREF() +static +Uint32 +clear_nodes(Uint32 dstcnt, Uint16 dst[], Uint32 srccnt, const Uint16 src[]) +{ + if (srccnt == 0) + return dstcnt; + + Uint32 pos = 0; + for (Uint32 i = 0; i 0) { - jam(); - guard0 = cnoFailedNodes - 1; - arrGuard(guard0 + cnoCommitFailedNodes, MAX_NDB_NODES); - for (Tj = 0; Tj <= guard0; Tj++) { - jam(); - cfailedNodes[Tj] = cfailedNodes[Tj + cnoCommitFailedNodes]; - }//for - }//if - }//if + + /** + * Remove committed nodes from failed/prepared + */ + cnoFailedNodes = clear_nodes(cnoFailedNodes, + cfailedNodes, + cnoCommitFailedNodes, + ccommitFailedNodes); + cnoPrepFailedNodes = clear_nodes(cnoPrepFailedNodes, + cprepFailedNodes, + cnoCommitFailedNodes, + ccommitFailedNodes); cnoCommitFailedNodes = 0; }//if /**----------------------------------------------------------------------- @@ -4658,6 +4696,14 @@ Qmgr::execDUMP_STATE_ORD(Signal* signal) default: ; }//switch + +#ifdef ERROR_INSERT + if (signal->theData[0] == 935 && signal->getLength() == 2) + { + SET_ERROR_INSERT_VALUE(935); + c_error_insert_extra = signal->theData[1]; + } +#endif }//Qmgr::execDUMP_STATE_ORD() void Qmgr::execSET_VAR_REQ(Signal* signal) diff --git a/ndb/test/ndbapi/testNodeRestart.cpp b/ndb/test/ndbapi/testNodeRestart.cpp index 082013f07cc..c0c5cc5163a 100644 --- a/ndb/test/ndbapi/testNodeRestart.cpp +++ b/ndb/test/ndbapi/testNodeRestart.cpp @@ -955,6 +955,46 @@ int runBug24717(NDBT_Context* ctx, NDBT_Step* step){ return NDBT_OK; } +int runBug25364(NDBT_Context* ctx, NDBT_Step* step){ + int result = NDBT_OK; + NdbRestarter restarter; + Ndb* pNdb = GETNDB(step); + int loops = ctx->getNumLoops(); + + if (restarter.getNumDbNodes() < 4) + return NDBT_OK; + + int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 }; + + for (; loops; loops --) + { + int master = restarter.getMasterNodeId(); + int victim = restarter.getRandomNodeOtherNodeGroup(master, rand()); + int second = restarter.getRandomNodeSameNodeGroup(victim, rand()); + + int dump[] = { 935, victim } ; + if (restarter.dumpStateOneNode(master, dump, 2)) + return NDBT_FAILED; + + if (restarter.dumpStateOneNode(master, val2, 2)) + return NDBT_FAILED; + + if (restarter.restartOneDbNode(second, false, true, true)) + return NDBT_FAILED; + + int nodes[2] = { master, second }; + if (restarter.waitNodesNoStart(nodes, 2)) + return NDBT_FAILED; + + restarter.startNodes(nodes, 2); + + if (restarter.waitNodesStarted(nodes, 2)) + return NDBT_FAILED; + } + + return NDBT_OK; +} + NDBT_TESTSUITE(testNodeRestart); TESTCASE("NoLoad", @@ -1271,6 +1311,9 @@ TESTCASE("Bug20185", TESTCASE("Bug24717", ""){ INITIALIZER(runBug24717); } +TESTCASE("Bug25364", ""){ + INITIALIZER(runBug25364); +} NDBT_TESTSUITE_END(testNodeRestart); int main(int argc, const char** argv){ diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index a1443970388..41070275935 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -469,6 +469,10 @@ max-time: 1000 cmd: testNodeRestart args: -n Bug24717 T1 +max-time: 1000 +cmd: testNodeRestart +args: -n Bug25364 T1 + # OLD FLEX max-time: 500 cmd: flexBench From 6385ab6851171129cd1613b9a33968c196f1af00 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 3 Jan 2007 14:56:26 +0100 Subject: [PATCH 06/18] ndb_use_transactions is set from value for command line flag --- sql/sql_class.cc | 3 --- 1 file changed, 3 deletions(-) diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 645ac6e28f3..8dd53262eea 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -290,9 +290,6 @@ void THD::init(void) variables.date_format); variables.datetime_format= date_time_format_copy((THD*) 0, variables.datetime_format); -#ifdef HAVE_NDBCLUSTER_DB - variables.ndb_use_transactions= 1; -#endif pthread_mutex_unlock(&LOCK_global_system_variables); server_status= SERVER_STATUS_AUTOCOMMIT; if (variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES) From 82ac37e2dfc4b4c8b9dba64eb613c411a80bb04d Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 5 Jan 2007 15:33:39 +0100 Subject: [PATCH 07/18] remove unused errorcode corrected previous patch removing "if (signal)", which should acually be "if (signum)" ndb/src/common/debugger/EventLogger.cpp: corrected previous patch removing "if (signal)", which should acually be "if (signum)" ndb/src/kernel/blocks/dbtc/Dbtc.hpp: remove unused errorcode ndb/src/ndbapi/ndberror.c: remove unused errorcode --- ndb/src/common/debugger/EventLogger.cpp | 3 ++- ndb/src/kernel/blocks/dbtc/Dbtc.hpp | 1 - ndb/src/ndbapi/ndberror.c | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/ndb/src/common/debugger/EventLogger.cpp b/ndb/src/common/debugger/EventLogger.cpp index e168c705d47..3efd52808e2 100644 --- a/ndb/src/common/debugger/EventLogger.cpp +++ b/ndb/src/common/debugger/EventLogger.cpp @@ -115,7 +115,8 @@ void getTextNDBStopForced(QQQQ) { int sphase = theData[4]; int extra = theData[5]; getRestartAction(theData[1],action_str); - reason_str.appfmt(" Initiated by signal %d.", signum); + if (signum) + reason_str.appfmt(" Initiated by signal %d.", signum); if (error) { ndbd_exit_classification cl; diff --git a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp index c6089113382..d6c4529bb72 100644 --- a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp +++ b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp @@ -138,7 +138,6 @@ #define ZNOT_FOUND 626 #define ZALREADYEXIST 630 -#define ZINCONSISTENTHASHINDEX 892 #define ZNOTUNIQUE 893 #define ZINVALID_KEY 290 diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c index 8800aedae5a..45248000703 100644 --- a/ndb/src/ndbapi/ndberror.c +++ b/ndb/src/ndbapi/ndberror.c @@ -208,7 +208,6 @@ ErrorBundle ErrorCodes[] = { /** * Internal errors */ - { 892, IE, "Inconsistent hash index. The index needs to be dropped and recreated" }, { 896, IE, "Tuple corrupted - wrong checksum or column data in invalid format" }, { 901, IE, "Inconsistent ordered index. The index needs to be dropped and recreated" }, { 202, IE, "202" }, From 672998ea5f3f9793f2203b12da1b6affbe40ae13 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 8 Jan 2007 10:38:53 +0100 Subject: [PATCH 08/18] bug#24820 CREATE INDEX ....USING HASH on NDB table creates ordered index, not HASH index: Added error checking --- mysql-test/r/ndb_index_ordered.result | 9 +++++++++ mysql-test/t/ndb_index_ordered.test | 14 ++++++++++++++ sql/ha_ndbcluster.cc | 10 ++++++++++ 3 files changed, 33 insertions(+) diff --git a/mysql-test/r/ndb_index_ordered.result b/mysql-test/r/ndb_index_ordered.result index 36bac7b0f9d..54458e1b459 100644 --- a/mysql-test/r/ndb_index_ordered.result +++ b/mysql-test/r/ndb_index_ordered.result @@ -658,3 +658,12 @@ insert into t1 (a, c) values (1,'aaa'),(3,'bbb'); select count(*) from t1 where c<'bbb'; count(*) 1 +create table nationaldish (DishID int(10) unsigned NOT NULL AUTO_INCREMENT, +CountryCode char(3) NOT NULL, +DishTitle varchar(64) NOT NULL, +calories smallint(5) unsigned DEFAULT NULL, +PRIMARY KEY (DishID) +) ENGINE=ndbcluster; +create index i using hash on nationaldish(countrycode,calories); +ERROR HY000: Can't create table './test/#sql-3c51_2.frm' (errno: 138) +drop table nationaldish; diff --git a/mysql-test/t/ndb_index_ordered.test b/mysql-test/t/ndb_index_ordered.test index e6827bdbe12..fa76202c7b7 100644 --- a/mysql-test/t/ndb_index_ordered.test +++ b/mysql-test/t/ndb_index_ordered.test @@ -356,3 +356,17 @@ insert into t1 (a, c) values (1,'aaa'),(3,'bbb'); select count(*) from t1 where c<'bbb'; # End of 4.1 tests + +# bug#24820 CREATE INDEX ....USING HASH on NDB table creates ordered index, not HASH index + +create table nationaldish (DishID int(10) unsigned NOT NULL AUTO_INCREMENT, + CountryCode char(3) NOT NULL, + DishTitle varchar(64) NOT NULL, + calories smallint(5) unsigned DEFAULT NULL, + PRIMARY KEY (DishID) + ) ENGINE=ndbcluster; + +--error ER_CANT_CREATE_TABLE +create index i using hash on nationaldish(countrycode,calories); + +drop table nationaldish; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 149a7c83895..34a3a001b21 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -1107,6 +1107,16 @@ int ha_ndbcluster::build_index_list(Ndb *ndb, TABLE *tab, enum ILBP phase) error= create_unique_index(unique_index_name, key_info); break; case ORDERED_INDEX: + if (key_info->algorithm == HA_KEY_ALG_HASH) + { + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_UNSUPPORTED_EXTENSION, + ER(ER_UNSUPPORTED_EXTENSION), + "Ndb does not support non-unique " + "hash based indexes"); + error= HA_ERR_UNSUPPORTED; + break; + } error= create_ordered_index(index_name, key_info); break; default: From 817ed4029e1a99519e4eb4833b2e2818c8f116c5 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 8 Jan 2007 13:53:37 +0100 Subject: [PATCH 09/18] bug#24820 CREATE INDEX ....USING HASH on NDB table creates ordered index, not HASH index: Changed test since error mesage wasn't predictable --- mysql-test/r/ndb_index_ordered.result | 7 +++---- mysql-test/t/ndb_index_ordered.test | 8 +++----- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/mysql-test/r/ndb_index_ordered.result b/mysql-test/r/ndb_index_ordered.result index 54458e1b459..b3e55a23073 100644 --- a/mysql-test/r/ndb_index_ordered.result +++ b/mysql-test/r/ndb_index_ordered.result @@ -662,8 +662,7 @@ create table nationaldish (DishID int(10) unsigned NOT NULL AUTO_INCREMENT, CountryCode char(3) NOT NULL, DishTitle varchar(64) NOT NULL, calories smallint(5) unsigned DEFAULT NULL, -PRIMARY KEY (DishID) +PRIMARY KEY (DishID), +INDEX i USING HASH (countrycode,calories) ) ENGINE=ndbcluster; -create index i using hash on nationaldish(countrycode,calories); -ERROR HY000: Can't create table './test/#sql-3c51_2.frm' (errno: 138) -drop table nationaldish; +ERROR HY000: Can't create table './test/nationaldish.frm' (errno: 138) diff --git a/mysql-test/t/ndb_index_ordered.test b/mysql-test/t/ndb_index_ordered.test index fa76202c7b7..5867140fabb 100644 --- a/mysql-test/t/ndb_index_ordered.test +++ b/mysql-test/t/ndb_index_ordered.test @@ -359,14 +359,12 @@ select count(*) from t1 where c<'bbb'; # bug#24820 CREATE INDEX ....USING HASH on NDB table creates ordered index, not HASH index +--error ER_CANT_CREATE_TABLE create table nationaldish (DishID int(10) unsigned NOT NULL AUTO_INCREMENT, CountryCode char(3) NOT NULL, DishTitle varchar(64) NOT NULL, calories smallint(5) unsigned DEFAULT NULL, - PRIMARY KEY (DishID) + PRIMARY KEY (DishID), + INDEX i USING HASH (countrycode,calories) ) ENGINE=ndbcluster; ---error ER_CANT_CREATE_TABLE -create index i using hash on nationaldish(countrycode,calories); - -drop table nationaldish; From 45b1bdb1b237dde4710501e737b6d4509665a879 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 11 Jan 2007 20:51:16 +0100 Subject: [PATCH 10/18] ndb - bug#25554 fix bug when master failure during nr (recommit against 5.0) ndb/src/kernel/blocks/dbdih/DbdihMain.cpp: recommit against 5.0 ndb/src/kernel/blocks/qmgr/QmgrMain.cpp: recommit against 5.0 ndb/test/ndbapi/testNodeRestart.cpp: recommit against 5.0 ndb/test/run-test/daily-basic-tests.txt: recommit against 5.0 --- ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 3 +- ndb/src/kernel/blocks/qmgr/QmgrMain.cpp | 21 +++++++++ ndb/test/ndbapi/testNodeRestart.cpp | 53 +++++++++++++++++++++++ ndb/test/run-test/daily-basic-tests.txt | 4 ++ 4 files changed, 80 insertions(+), 1 deletion(-) diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index cd82b6fc425..5f573d40dfe 100644 --- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -3555,7 +3555,6 @@ void Dbdih::endTakeOver(Uint32 takeOverPtrI) takeOverPtr.i = takeOverPtrI; ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - releaseTakeOver(takeOverPtrI); if ((takeOverPtr.p->toMasterStatus != TakeOverRecord::IDLE) && (takeOverPtr.p->toMasterStatus != TakeOverRecord::TO_WAIT_START_TAKE_OVER)) { jam(); @@ -3569,6 +3568,7 @@ void Dbdih::endTakeOver(Uint32 takeOverPtrI) }//if setAllowNodeStart(takeOverPtr.p->toStartingNode, true); initTakeOver(takeOverPtr); + releaseTakeOver(takeOverPtrI); }//Dbdih::endTakeOver() void Dbdih::releaseTakeOver(Uint32 takeOverPtrI) @@ -4710,6 +4710,7 @@ void Dbdih::handleTakeOverNewMaster(Signal* signal, Uint32 takeOverPtrI) break; } ndbrequire(ok); + endTakeOver(takeOverPtr.i); }//if }//Dbdih::handleTakeOverNewMaster() diff --git a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index 66ee7549b9d..c5987ee8a57 100644 --- a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -2847,6 +2847,17 @@ void Qmgr::failReportLab(Signal* signal, Uint16 aFailedNode, systemErrorLab(signal, __LINE__); return; }//if + + if (getNodeState().startLevel < NodeState::SL_STARTED) + { + jam(); + CRASH_INSERTION(932); + char buf[100]; + BaseString::snprintf(buf, 100, "Node failure during restart"); + progError(__LINE__, NDBD_EXIT_SR_OTHERNODEFAILED, buf); + ndbrequire(false); + } + TnoFailedNodes = cnoFailedNodes; failReport(signal, failedNodePtr.i, (UintR)ZTRUE, aFailCause); if (cpresident == getOwnNodeId()) { @@ -2933,6 +2944,16 @@ void Qmgr::execPREP_FAILREQ(Signal* signal) return; }//if + if (getNodeState().startLevel < NodeState::SL_STARTED) + { + jam(); + CRASH_INSERTION(932); + char buf[100]; + BaseString::snprintf(buf, 100, "Node failure during restart"); + progError(__LINE__, NDBD_EXIT_SR_OTHERNODEFAILED, buf); + ndbrequire(false); + } + guard0 = cnoPrepFailedNodes - 1; arrGuard(guard0, MAX_NDB_NODES); for (Tindex = 0; Tindex <= guard0; Tindex++) { diff --git a/ndb/test/ndbapi/testNodeRestart.cpp b/ndb/test/ndbapi/testNodeRestart.cpp index c0c5cc5163a..43fb77342b5 100644 --- a/ndb/test/ndbapi/testNodeRestart.cpp +++ b/ndb/test/ndbapi/testNodeRestart.cpp @@ -995,6 +995,56 @@ int runBug25364(NDBT_Context* ctx, NDBT_Step* step){ return NDBT_OK; } +int runBug25554(NDBT_Context* ctx, NDBT_Step* step){ + + int result = NDBT_OK; + int loops = ctx->getNumLoops(); + int records = ctx->getNumRecords(); + NdbRestarter restarter; + + if (restarter.getNumDbNodes() < 4) + return NDBT_OK; + + for (int i = 0; i Date: Wed, 17 Jan 2007 21:15:13 +0100 Subject: [PATCH 11/18] ndb - bug#25686 add support for doing mlockall before mallc instead of after (recommit in 5.0) ndb/include/portlib/NdbMem.h: add support for doing mlockall before mallc instead of after ndb/src/common/portlib/NdbMem.c: add support for doing mlockall before mallc instead of after ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp: add support for doing mlockall before mallc instead of after ndb/src/kernel/vm/Configuration.cpp: add support for doing mlockall before mallc instead of after ndb/src/kernel/vm/Configuration.hpp: add support for doing mlockall before mallc instead of after ndb/src/mgmsrv/ConfigInfo.cpp: add support for doing mlockall before mallc instead of after --- ndb/include/portlib/NdbMem.h | 2 +- ndb/src/common/portlib/NdbMem.c | 10 +++++++++- ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp | 19 +++++++++++++++++-- ndb/src/kernel/vm/Configuration.cpp | 2 +- ndb/src/kernel/vm/Configuration.hpp | 2 +- ndb/src/mgmsrv/ConfigInfo.cpp | 8 ++++---- 6 files changed, 33 insertions(+), 10 deletions(-) diff --git a/ndb/include/portlib/NdbMem.h b/ndb/include/portlib/NdbMem.h index 0f2de80200e..2afb1845112 100644 --- a/ndb/include/portlib/NdbMem.h +++ b/ndb/include/portlib/NdbMem.h @@ -66,7 +66,7 @@ void NdbMem_Free(void* ptr); * NdbMem_MemLockAll * Locks virtual memory in main memory */ -int NdbMem_MemLockAll(void); +int NdbMem_MemLockAll(int); /** * NdbMem_MemUnlockAll diff --git a/ndb/src/common/portlib/NdbMem.c b/ndb/src/common/portlib/NdbMem.c index f964f4d9937..0d2021aaf0a 100644 --- a/ndb/src/common/portlib/NdbMem.c +++ b/ndb/src/common/portlib/NdbMem.c @@ -57,7 +57,15 @@ void NdbMem_Free(void* ptr) } -int NdbMem_MemLockAll(){ +int NdbMem_MemLockAll(int i){ + if (i == 1) + { +#if defined(HAVE_MLOCKALL) && defined(MCL_CURRENT) && defined (MCL_FUTURE) + return mlockall(MCL_CURRENT | MCL_FUTURE); +#else + return -1; +#endif + } #if defined(HAVE_MLOCKALL) && defined(MCL_CURRENT) return mlockall(MCL_CURRENT); #else diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index cd0e471a676..5642a11db81 100644 --- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -342,9 +342,9 @@ void Cmvmi::execSTTOR(Signal* signal) if (theStartPhase == 1){ jam(); - if(theConfig.lockPagesInMainMemory()) + if(theConfig.lockPagesInMainMemory() == 1) { - int res = NdbMem_MemLockAll(); + int res = NdbMem_MemLockAll(0); if(res != 0){ g_eventLogger.warning("Failed to memlock pages"); warningEvent("Failed to memlock pages"); @@ -788,6 +788,21 @@ Cmvmi::execSTART_ORD(Signal* signal) { if(globalData.theStartLevel == NodeState::SL_CMVMI){ jam(); + + if(theConfig.lockPagesInMainMemory() == 2) + { + int res = NdbMem_MemLockAll(1); + if(res != 0) + { + g_eventLogger.warning("Failed to memlock pages"); + warningEvent("Failed to memlock pages"); + } + else + { + g_eventLogger.info("Locked future allocations"); + } + } + globalData.theStartLevel = NodeState::SL_STARTING; globalData.theRestartFlag = system_started; /** diff --git a/ndb/src/kernel/vm/Configuration.cpp b/ndb/src/kernel/vm/Configuration.cpp index 49f16dae3dd..cbdd9494fd8 100644 --- a/ndb/src/kernel/vm/Configuration.cpp +++ b/ndb/src/kernel/vm/Configuration.cpp @@ -477,7 +477,7 @@ Configuration::setupConfiguration(){ DBUG_VOID_RETURN; } -bool +Uint32 Configuration::lockPagesInMainMemory() const { return _lockPagesInMainMemory; } diff --git a/ndb/src/kernel/vm/Configuration.hpp b/ndb/src/kernel/vm/Configuration.hpp index 6315209ddbb..13b31ad3538 100644 --- a/ndb/src/kernel/vm/Configuration.hpp +++ b/ndb/src/kernel/vm/Configuration.hpp @@ -37,7 +37,7 @@ public: void setupConfiguration(); void closeConfiguration(bool end_session= true); - bool lockPagesInMainMemory() const; + Uint32 lockPagesInMainMemory() const; int timeBetweenWatchDogCheck() const ; void timeBetweenWatchDogCheck(int value); diff --git a/ndb/src/mgmsrv/ConfigInfo.cpp b/ndb/src/mgmsrv/ConfigInfo.cpp index ab4f2b413b3..7f89f5c5c49 100644 --- a/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/ndb/src/mgmsrv/ConfigInfo.cpp @@ -564,10 +564,10 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { "If set to yes, then NDB Cluster data will not be swapped out to disk", ConfigInfo::CI_USED, true, - ConfigInfo::CI_BOOL, - "false", - "false", - "true" }, + ConfigInfo::CI_INT, + "0", + "1", + "2" }, { CFG_DB_WATCHDOG_INTERVAL, From 3c09c1c3273b86cf357dc3f1fb0d2f12c08b0d48 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 19 Jan 2007 04:36:33 +0100 Subject: [PATCH 12/18] ndb - bug#25711 fix cpu peak in big clusters during unpack of config ndb/src/common/util/ConfigValues.cpp: use bin-search instead of hash (as keys collide too much) --- ndb/src/common/util/ConfigValues.cpp | 208 ++++++++++++++++----------- 1 file changed, 122 insertions(+), 86 deletions(-) diff --git a/ndb/src/common/util/ConfigValues.cpp b/ndb/src/common/util/ConfigValues.cpp index ae4fbfd2f71..49fd6dd9a28 100644 --- a/ndb/src/common/util/ConfigValues.cpp +++ b/ndb/src/common/util/ConfigValues.cpp @@ -34,7 +34,7 @@ static const char Magic[] = { 'N', 'D', 'B', 'C', 'O', 'N', 'F', 'V' }; //#define DEBUG_CV #ifdef DEBUG_CV -#define DEBUG +#define DEBUG if(getenv("CV_DEBUG")) #else #define DEBUG if(0) #endif @@ -202,62 +202,60 @@ ConfigValues::Iterator::set(Uint32 key, const char * value){ static bool findKey(const Uint32 * values, Uint32 sz, Uint32 key, Uint32 * _pos){ - Uint32 pos = hash(key, sz); - Uint32 count = 0; - while((values[pos] & KP_MASK) != key && count < sz){ - pos = nextHash(key, sz, pos, ++count); + Uint32 lo = 0; + Uint32 hi = sz; + Uint32 pos = (hi + lo) >> 1; + + DEBUG printf("findKey(H'%.8x %d)", key, sz); + + if (sz == 0) + { + DEBUG ndbout_c(" -> false, 0"); + * _pos = 0; + return false; } - if((values[pos] & KP_MASK)== key){ - *_pos = pos; - return true; - } + Uint32 val = 0; + Uint32 oldpos = pos + 1; + while (pos != oldpos) + { + DEBUG printf(" [ %d %d %d ] ", lo, pos, hi); + assert(pos < hi); + assert(pos >= lo); + val = values[2*pos] & KP_MASK; + if (key > val) + { + lo = pos; + } + else if (key < val) + { + hi = pos; + } + else + { + * _pos = 2*pos; + DEBUG ndbout_c(" -> true, %d", pos); + return true; + } + oldpos = pos; + pos = (hi + lo) >> 1; + } + + DEBUG printf(" pos: %d (key %.8x val: %.8x values[pos]: %x) key>val: %d ", + pos, key, val, values[2*pos] & KP_MASK, + key > val); + + pos += (key > val) ? 1 : 0; + + * _pos = 2*pos; + DEBUG ndbout_c(" -> false, %d", pos); return false; } -static -Uint32 -hash(Uint32 key, Uint32 size){ - Uint32 tmp = (key >> 16) ^ (key & 0xFFFF); - return (((tmp << 16) | tmp) % size) << 1; -} - -static -Uint32 -nextHash(Uint32 key, Uint32 size, Uint32 pos, Uint32 count){ - Uint32 p = (pos >> 1); - if((key % size) != 0) - p += key; - else - p += 1; - return (p % size) << 1; -} - -static -Uint32 -directory(Uint32 sz){ - const Uint32 _input = sz; - if((sz & 1) == 0) - sz ++; - - bool prime = false; - while(!prime){ - prime = true; - for(Uint32 n = 3; n*n <= sz; n += 2){ - if((sz % n) == 0){ - prime = false; - sz += 2; - break; - } - } - } - DEBUG printf("directory %d -> %d\n", _input, sz); - return sz; -} ConfigValuesFactory::ConfigValuesFactory(Uint32 keys, Uint32 data){ m_sectionCounter = (1 << KP_SECTION_SHIFT); - m_freeKeys = directory(keys); + m_freeKeys = keys; m_freeData = (data + 7) & ~7; m_currentSection = 0; m_cfg = create(m_freeKeys, m_freeData); @@ -316,11 +314,14 @@ ConfigValuesFactory::expand(Uint32 fk, Uint32 fs){ return ; } + DEBUG printf("[ fk fd ] : [ %d %d ]", m_freeKeys, m_freeData); + m_freeKeys = (m_freeKeys >= fk ? m_cfg->m_size : fk + m_cfg->m_size); m_freeData = (m_freeData >= fs ? m_cfg->m_dataSize : fs + m_cfg->m_dataSize); - m_freeKeys = directory(m_freeKeys); m_freeData = (m_freeData + 7) & ~7; - + + DEBUG ndbout_c(" [ %d %d ]", m_freeKeys, m_freeData); + ConfigValues * m_tmp = m_cfg; m_cfg = create(m_freeKeys, m_freeData); put(* m_tmp); @@ -336,7 +337,6 @@ ConfigValuesFactory::shrink(){ m_freeKeys = m_cfg->m_size - m_freeKeys; m_freeData = m_cfg->m_dataSize - m_freeData; - m_freeKeys = directory(m_freeKeys); m_freeData = (m_freeData + 7) & ~7; ConfigValues * m_tmp = m_cfg; @@ -415,52 +415,58 @@ ConfigValuesFactory::put(const ConfigValues::Entry & entry){ } const Uint32 tmp = entry.m_key | m_currentSection; - const Uint32 sz = m_cfg->m_size; - Uint32 pos = hash(tmp, sz); - Uint32 count = 0; - Uint32 val = m_cfg->m_values[pos]; + const Uint32 sz = m_cfg->m_size - m_freeKeys; - while((val & KP_MASK) != tmp && val != CFV_KEY_FREE && count < sz){ - pos = nextHash(tmp, sz, pos, ++count); - val = m_cfg->m_values[pos]; - } - - if((val & KP_MASK) == tmp){ + Uint32 pos; + if (findKey(m_cfg->m_values, sz, tmp, &pos)) + { DEBUG ndbout_c("key %x already found at pos: %d", tmp, pos); return false; } - if(count >= sz){ - pos = hash(tmp, sz); - count = 0; - Uint32 val = m_cfg->m_values[pos]; - - printf("key: %d, (key %% size): %d\n", entry.m_key, (entry.m_key % sz)); - printf("pos: %d", pos); - while((val & KP_MASK) != tmp && val != CFV_KEY_FREE && count < sz){ - pos = nextHash(tmp, sz, pos, ++count); - val = m_cfg->m_values[pos]; - printf(" %d", pos); + DEBUG { + printf("H'before "); + Uint32 prev = 0; + for (Uint32 i = 0; im_values[2*i] & KP_MASK; + ndbout_c("%.8x", val); + assert(val >= prev); + prev = val; } - printf("\n"); - - abort(); - printf("Full\n"); - return false; + } + + if (pos != 2*sz) + { + DEBUG ndbout_c("pos: %d sz: %d", pos, sz); + memmove(m_cfg->m_values + pos + 2, m_cfg->m_values + pos, + 4 * (2*sz - pos)); } - assert(pos < (sz << 1)); Uint32 key = tmp; key |= (entry.m_type << KP_TYPE_SHIFT); m_cfg->m_values[pos] = key; + + DEBUG { + printf("H'after "); + Uint32 prev = 0; + for (Uint32 i = 0; i<=sz; i++) + { + Uint32 val = m_cfg->m_values[2*i] & KP_MASK; + ndbout_c("%.8x", val); + assert(val >= prev); + prev = val; + } + } + switch(entry.m_type){ case ConfigValues::IntType: case ConfigValues::SectionType: m_cfg->m_values[pos+1] = entry.m_int; m_freeKeys--; DEBUG printf("Putting at: %d(%d) (loop = %d) key: %d value: %d\n", - pos, sz, count, + pos, sz, 0, (key >> KP_KEYVAL_SHIFT) & KP_KEYVAL_MASK, entry.m_int); return true; @@ -472,7 +478,7 @@ ConfigValuesFactory::put(const ConfigValues::Entry & entry){ m_freeKeys--; m_freeData -= sizeof(char *); DEBUG printf("Putting at: %d(%d) (loop = %d) key: %d value(%d): %s\n", - pos, sz, count, + pos, sz, 0, (key >> KP_KEYVAL_SHIFT) & KP_KEYVAL_MASK, index, entry.m_string); @@ -485,7 +491,7 @@ ConfigValuesFactory::put(const ConfigValues::Entry & entry){ m_freeKeys--; m_freeData -= 8; DEBUG printf("Putting at: %d(%d) (loop = %d) key: %d value64(%d): %lld\n", - pos, sz, count, + pos, sz, 0, (key >> KP_KEYVAL_SHIFT) & KP_KEYVAL_MASK, index, entry.m_int64); @@ -648,7 +654,9 @@ ConfigValuesFactory::unpack(const void * _src, Uint32 len){ } const char * src = (const char *)_src; - + const char * end = src + len - 4; + src += sizeof(Magic); + { Uint32 len32 = (len >> 2); const Uint32 * tmp = (const Uint32*)_src; @@ -663,9 +671,37 @@ ConfigValuesFactory::unpack(const void * _src, Uint32 len){ } } - const char * end = src + len - 4; - src += sizeof(Magic); - + const char * save = src; + + { + Uint32 keys = 0; + Uint32 data = 0; + while(end - src > 4){ + Uint32 tmp = ntohl(* (const Uint32 *)src); src += 4; + keys++; + switch(::getTypeOf(tmp)){ + case ConfigValues::IntType: + case ConfigValues::SectionType: + src += 4; + break; + case ConfigValues::Int64Type: + src += 8; + data += 8; + break; + case ConfigValues::StringType:{ + Uint32 s_len = ntohl(* (const Uint32 *)src); + src += 4 + mod4(s_len); + data += sizeof(char*); + break; + } + default: + break; + } + } + expand(keys, data); + } + + src = save; ConfigValues::Entry entry; while(end - src > 4){ Uint32 tmp = ntohl(* (const Uint32 *)src); src += 4; From 5f9e20de6ccb1d9d2dbdb9640c70d59405f6bf32 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 23 Jan 2007 00:34:00 +0700 Subject: [PATCH 13/18] bug#25746 ndb: 4209 error with 2 VARCHAR primary keys - make sure keys are copied correctly when varchar has 2 length bytes - test case mysql-test/r/ndb_basic.result: bug#25746 ndb: 4209 error with 2 VARCHAR primary keys - test case mysql-test/t/ndb_basic.test: bug#25746 ndb: 4209 error with 2 VARCHAR primary keys - test case sql/ha_ndbcluster.cc: bug#25746 ndb: 4209 error with 2 VARCHAR primary keys - make sure keys are copied correctly when varchar has 2 length bytes --- mysql-test/r/ndb_basic.result | 16 ++++++++++++++++ mysql-test/t/ndb_basic.test | 19 +++++++++++++++++++ sql/ha_ndbcluster.cc | 26 ++++++++++++++++---------- 3 files changed, 51 insertions(+), 10 deletions(-) diff --git a/mysql-test/r/ndb_basic.result b/mysql-test/r/ndb_basic.result index d2111db24fe..fab10867acd 100644 --- a/mysql-test/r/ndb_basic.result +++ b/mysql-test/r/ndb_basic.result @@ -749,3 +749,19 @@ f1 f2 f3 222222 bbbbbb 2 drop table t1; Illegal ndb error code: 1186 +CREATE TABLE t1 ( +a VARBINARY(40) NOT NULL, +b VARCHAR (256) CHARACTER SET UTF8 NOT NULL, +c VARCHAR(256) CHARACTER SET UTF8 NOT NULL, +PRIMARY KEY (b,c)) ENGINE=ndbcluster; +INSERT INTO t1 VALUES +("a","ab","abc"),("b","abc","abcd"),("c","abc","ab"),("d","ab","ab"),("e","abc","abc"); +SELECT * FROM t1 ORDER BY a; +a b c +a ab abc +b abc abcd +c abc ab +d ab ab +e abc abc +DROP TABLE t1; +End of 5.0 tests diff --git a/mysql-test/t/ndb_basic.test b/mysql-test/t/ndb_basic.test index 6c1a4e44f4b..a1ceddcd183 100644 --- a/mysql-test/t/ndb_basic.test +++ b/mysql-test/t/ndb_basic.test @@ -710,3 +710,22 @@ drop table t1; --error 1 --exec $MY_PERROR --ndb 1186 2>&1 +# +# Bug #25746 - VARCHAR UTF8 PK issue +# - prior to bugfix 4209, illegal length parameter would be +# returned in SELECT * + +CREATE TABLE t1 ( +a VARBINARY(40) NOT NULL, +b VARCHAR (256) CHARACTER SET UTF8 NOT NULL, +c VARCHAR(256) CHARACTER SET UTF8 NOT NULL, +PRIMARY KEY (b,c)) ENGINE=ndbcluster; +INSERT INTO t1 VALUES +("a","ab","abc"),("b","abc","abcd"),("c","abc","ab"),("d","ab","ab"),("e","abc","abc"); +SELECT * FROM t1 ORDER BY a; +DROP TABLE t1; + +# End of 5.0 tests +--echo End of 5.0 tests + + diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index f9984b27077..6cff3637cf3 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3144,20 +3144,26 @@ void ha_ndbcluster::position(const byte *record) size_t len = key_part->length; const byte * ptr = record + key_part->offset; Field *field = key_part->field; - if ((field->type() == MYSQL_TYPE_VARCHAR) && - ((Field_varstring*)field)->length_bytes == 1) + if (unlikely(field->type() == MYSQL_TYPE_VARCHAR)) { - /** - * Keys always use 2 bytes length - */ - buff[0] = ptr[0]; - buff[1] = 0; - memcpy(buff+2, ptr + 1, len); - len += 2; + if (((Field_varstring*)field)->length_bytes == 1) + { + /** + * Keys always use 2 bytes length + */ + buff[0] = ptr[0]; + buff[1] = 0; + memcpy(buff+2, ptr + 1, len); + } + else + { + memcpy(buff, ptr, len + 2); + } + len += 2; } else { - memcpy(buff, ptr, len); + memcpy(buff, ptr, len); } buff += len; } From 188899cdfc07d58a534c2bdbf09b986ac94adb4e Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 23 Jan 2007 07:12:03 +0700 Subject: [PATCH 14/18] bug#25746 ndb: 4209 error with 2 VARCHAR primary keys - post review changes --- sql/ha_ndbcluster.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 6cff3637cf3..3c3f6f4e06b 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3144,7 +3144,7 @@ void ha_ndbcluster::position(const byte *record) size_t len = key_part->length; const byte * ptr = record + key_part->offset; Field *field = key_part->field; - if (unlikely(field->type() == MYSQL_TYPE_VARCHAR)) + if (field->type() == MYSQL_TYPE_VARCHAR) { if (((Field_varstring*)field)->length_bytes == 1) { From ab8355fab0dec8666481c86d33eb80c9b07e9f88 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 23 Jan 2007 11:44:42 +0700 Subject: [PATCH 15/18] ndb - bug#22013 Fix bug in event handling wrt early node shutdown ndb/src/mgmsrv/MgmtSrvr.cpp: Fix bug in event handling wrt early node shutdown ndb/src/ndbapi/ClusterMgr.cpp: Fix reportNodeFailed if only connected wo/ having received any API_REGCONF ndb/src/ndbapi/ClusterMgr.hpp: Fix reportNodeFailed if only connected wo/ having received any API_REGCONF ndb/src/ndbapi/SignalSender.cpp: Fix memleak --- ndb/src/mgmsrv/MgmtSrvr.cpp | 48 ++++++++++++++++++++++++--------- ndb/src/ndbapi/ClusterMgr.cpp | 9 ++++--- ndb/src/ndbapi/ClusterMgr.hpp | 4 +-- ndb/src/ndbapi/SignalSender.cpp | 9 +++++++ 4 files changed, 51 insertions(+), 19 deletions(-) diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index ebef5510b55..0ee59f70885 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -137,8 +137,11 @@ MgmtSrvr::logLevelThreadRun() m_started_nodes.erase(0, false); m_started_nodes.unlock(); - setEventReportingLevelImpl(node, req); - + if (setEventReportingLevelImpl(node, req)) + { + ndbout_c("setEventReportingLevelImpl(%d): failed", node); + } + SetLogLevelOrd ord; ord = m_nodeLogLevel[node]; setNodeLogLevelImpl(node, ord); @@ -155,10 +158,16 @@ MgmtSrvr::logLevelThreadRun() m_log_level_requests.erase(0, false); m_log_level_requests.unlock(); - if(req.blockRef == 0){ + if(req.blockRef == 0) + { req.blockRef = _ownReference; - setEventReportingLevelImpl(0, req); - } else { + if (setEventReportingLevelImpl(0, req)) + { + ndbout_c("setEventReportingLevelImpl: failed 2!"); + } + } + else + { SetLogLevelOrd ord; ord = req; setNodeLogLevelImpl(req.blockRef, ord); @@ -1376,9 +1385,6 @@ int MgmtSrvr::restartDB(bool nostart, bool initialStart, NodeId nodeId = 0; NDB_TICKS maxTime = NdbTick_CurrentMillisecond() + waitTime; - ndbout_c(" %d", nodes.get(1)); - ndbout_c(" %d", nodes.get(2)); - while(getNextNodeId(&nodeId, NDB_MGM_NODE_TYPE_NDB)) { if (!nodes.get(nodeId)) continue; @@ -1584,6 +1590,11 @@ MgmtSrvr::setEventReportingLevelImpl(int nodeId, } } + if (nodes.isclear()) + { + return SEND_OR_RECEIVE_FAILED; + } + int error = 0; while (!nodes.isclear()) { @@ -1600,16 +1611,24 @@ MgmtSrvr::setEventReportingLevelImpl(int nodeId, error = 1; break; } + // Since sending okToSend(true), + // there is no guarantee that NF_COMPLETEREP will come + // i.e listen also to NODE_FAILREP + case GSN_NODE_FAILREP: { + const NodeFailRep * const rep = + CAST_CONSTPTR(NodeFailRep, signal->getDataPtr()); + NdbNodeBitmask mask; + mask.assign(NdbNodeBitmask::Size, rep->theNodes); + nodes.bitANDC(mask); + break; + } + case GSN_NF_COMPLETEREP:{ const NFCompleteRep * const rep = CAST_CONSTPTR(NFCompleteRep, signal->getDataPtr()); nodes.clear(rep->failedNodeId); break; } - case GSN_NODE_FAILREP:{ - // ignore, NF_COMPLETEREP will arrive later - break; - } default: report_unknown_signal(signal); return SEND_OR_RECEIVE_FAILED; @@ -1909,7 +1928,10 @@ MgmtSrvr::handleStatus(NodeId nodeId, bool alive, bool nfComplete) theData[1] = nodeId; if (alive) { - m_started_nodes.push_back(nodeId); + if (nodeTypes[nodeId] == NODE_TYPE_DB) + { + m_started_nodes.push_back(nodeId); + } rep->setEventType(NDB_LE_Connected); } else { rep->setEventType(NDB_LE_Disconnected); diff --git a/ndb/src/ndbapi/ClusterMgr.cpp b/ndb/src/ndbapi/ClusterMgr.cpp index 0aab294cd3a..2ff27ca893e 100644 --- a/ndb/src/ndbapi/ClusterMgr.cpp +++ b/ndb/src/ndbapi/ClusterMgr.cpp @@ -507,6 +507,7 @@ ClusterMgr::reportConnected(NodeId nodeId){ theNode.m_info.m_version = 0; theNode.compatible = true; theNode.nfCompleteRep = true; + theNode.m_state.startLevel = NodeState::SL_NOTHING; theFacade.ReportNodeAlive(nodeId); } @@ -518,14 +519,13 @@ ClusterMgr::reportDisconnected(NodeId nodeId){ noOfConnectedNodes--; theNodes[nodeId].connected = false; - theNodes[nodeId].m_state.m_connected_nodes.clear(); - reportNodeFailed(nodeId); + reportNodeFailed(nodeId, true); } void -ClusterMgr::reportNodeFailed(NodeId nodeId){ +ClusterMgr::reportNodeFailed(NodeId nodeId, bool disconnect){ Node & theNode = theNodes[nodeId]; @@ -536,10 +536,11 @@ ClusterMgr::reportNodeFailed(NodeId nodeId){ { theFacade.doDisconnect(nodeId); } + const bool report = (theNode.m_state.startLevel != NodeState::SL_NOTHING); theNode.m_state.startLevel = NodeState::SL_NOTHING; - if(report) + if(disconnect || report) { theFacade.ReportNodeDead(nodeId); } diff --git a/ndb/src/ndbapi/ClusterMgr.hpp b/ndb/src/ndbapi/ClusterMgr.hpp index 92fe1423f8f..32234a0b2f4 100644 --- a/ndb/src/ndbapi/ClusterMgr.hpp +++ b/ndb/src/ndbapi/ClusterMgr.hpp @@ -97,8 +97,8 @@ private: NdbMutex* clusterMgrThreadMutex; void showState(NodeId nodeId); - void reportNodeFailed(NodeId nodeId); - + void reportNodeFailed(NodeId nodeId, bool disconnect = false); + /** * Signals received */ diff --git a/ndb/src/ndbapi/SignalSender.cpp b/ndb/src/ndbapi/SignalSender.cpp index 1ed42c9c610..804ea92877d 100644 --- a/ndb/src/ndbapi/SignalSender.cpp +++ b/ndb/src/ndbapi/SignalSender.cpp @@ -19,6 +19,14 @@ #include #include +static +void +require(bool x) +{ + if (!x) + abort(); +} + SimpleSignal::SimpleSignal(bool dealloc){ memset(this, 0, sizeof(* this)); deallocSections = dealloc; @@ -145,6 +153,7 @@ SignalSender::waitFor(Uint32 timeOutMillis, T & t) { SimpleSignal * s = t.check(m_jobBuffer); if(s != 0){ + m_usedBuffer.push_back(s); return s; } From 8deeb2f95b7585d98c0252db767a87b9d3aa0326 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 23 Jan 2007 17:07:09 +1100 Subject: [PATCH 16/18] Bug #25487 deleting ndb_cluster_connection object takes long time aim is to: a) if set_connect_timeout called, timeout connect attempt (for retry on next call) after timeout period b) preserve existing blocking behaviour otherwise (for, e.g. mgmapi) Related to customer issue with long time deleting ndb_cluster_connection object. believe we're hanging on the connect(2) call until timeout (when we then realise we should exit the thread). ndb/include/mgmapi/mgmapi.h: add ndb_mgm_set_connect_timeout ndb/include/util/SocketClient.hpp: add timeout (seconds) for max time to wait for connection ndb/src/common/transporter/Transporter.cpp: set limit on amount of time we'll wait for tcp connect ndb/src/common/util/SocketClient.cpp: only try to connect for a maximum of timeout time ndb/src/mgmapi/mgmapi.cpp: add ndb_mgm_set_connect_timeout --- ndb/include/mgmapi/mgmapi.h | 10 ++++ ndb/include/util/SocketClient.hpp | 4 ++ ndb/src/common/transporter/Transporter.cpp | 8 ++- ndb/src/common/util/SocketClient.cpp | 60 ++++++++++++++++++++-- ndb/src/mgmapi/mgmapi.cpp | 13 +++++ 5 files changed, 89 insertions(+), 6 deletions(-) diff --git a/ndb/include/mgmapi/mgmapi.h b/ndb/include/mgmapi/mgmapi.h index 2010aa8cc33..2423048f98f 100644 --- a/ndb/include/mgmapi/mgmapi.h +++ b/ndb/include/mgmapi/mgmapi.h @@ -540,6 +540,16 @@ extern "C" { */ const char *ndb_mgm_get_connectstring(NdbMgmHandle handle, char *buf, int buf_sz); + /** + * Sets the number of seconds to wait for connect(2) during ndb_mgm_connect + * Default is no timeout + * + * @param handle NdbMgmHandle + * @param seconds number of seconds + * @return non-zero on success + */ + int ndb_mgm_set_connect_timeout(NdbMgmHandle handle, unsigned int seconds); + /** * Connects to a management server. Connectstring is set by * ndb_mgm_set_connectstring(). diff --git a/ndb/include/util/SocketClient.hpp b/ndb/include/util/SocketClient.hpp index e1f1752e9a8..bb8d9b9ac41 100644 --- a/ndb/include/util/SocketClient.hpp +++ b/ndb/include/util/SocketClient.hpp @@ -23,6 +23,7 @@ class SocketClient { NDB_SOCKET_TYPE m_sockfd; struct sockaddr_in m_servaddr; + unsigned int m_connect_timeout_sec; unsigned short m_port; char *m_server_name; SocketAuthenticator *m_auth; @@ -34,6 +35,9 @@ public: m_port = port; m_servaddr.sin_port = htons(m_port); }; + void set_connect_timeout(unsigned int s) { + m_connect_timeout_sec= s; + } unsigned short get_port() { return m_port; }; char *get_server_name() { return m_server_name; }; int bind(const char* toaddress, unsigned short toport); diff --git a/ndb/src/common/transporter/Transporter.cpp b/ndb/src/common/transporter/Transporter.cpp index 339533c8d27..d1e88e303e7 100644 --- a/ndb/src/common/transporter/Transporter.cpp +++ b/ndb/src/common/transporter/Transporter.cpp @@ -79,9 +79,13 @@ Transporter::Transporter(TransporterRegistry &t_reg, if (isServer) m_socket_client= 0; else + { m_socket_client= new SocketClient(remoteHostName, s_port, new SocketAuthSimple("ndbd", "ndbd passwd")); + + m_socket_client->set_connect_timeout(m_timeOutMillis/1000); + } DBUG_VOID_RETURN; } @@ -140,9 +144,9 @@ Transporter::connect_client() { } sockfd= m_socket_client->connect(); } - + return connect_client(sockfd); -} +} bool Transporter::connect_client(NDB_SOCKET_TYPE sockfd) { diff --git a/ndb/src/common/util/SocketClient.cpp b/ndb/src/common/util/SocketClient.cpp index bb059585863..ec35fd3eb90 100644 --- a/ndb/src/common/util/SocketClient.cpp +++ b/ndb/src/common/util/SocketClient.cpp @@ -26,6 +26,7 @@ SocketClient::SocketClient(const char *server_name, unsigned short port, SocketA m_port= port; m_server_name= server_name ? strdup(server_name) : 0; m_sockfd= NDB_INVALID_SOCKET; + m_connect_timeout_sec= 0; } SocketClient::~SocketClient() @@ -58,7 +59,7 @@ SocketClient::init() if (m_sockfd == NDB_INVALID_SOCKET) { return false; } - + DBUG_PRINT("info",("NDB_SOCKET: %d", m_sockfd)); return true; @@ -104,6 +105,13 @@ SocketClient::bind(const char* bindaddress, unsigned short localport) NDB_SOCKET_TYPE SocketClient::connect(const char *toaddress, unsigned short toport) { + fd_set rset, wset; + struct timeval tval; + int r; + bool use_timeout; + socklen_t len; + int flags; + if (m_sockfd == NDB_INVALID_SOCKET) { if (!init()) { @@ -127,14 +135,58 @@ SocketClient::connect(const char *toaddress, unsigned short toport) if (Ndb_getInAddr(&m_servaddr.sin_addr, m_server_name)) return NDB_INVALID_SOCKET; } - - const int r = ::connect(m_sockfd, (struct sockaddr*) &m_servaddr, sizeof(m_servaddr)); - if (r == -1) { + + flags= fcntl(m_sockfd, F_GETFL, 0); + fcntl(m_sockfd, F_SETFL, flags | O_NONBLOCK); + + r= ::connect(m_sockfd, (struct sockaddr*) &m_servaddr, sizeof(m_servaddr)); + + if (r == 0) + goto done; // connected immediately. + + if (r < 0 && (errno != EINPROGRESS)) { NDB_CLOSE_SOCKET(m_sockfd); m_sockfd= NDB_INVALID_SOCKET; return NDB_INVALID_SOCKET; } + FD_ZERO(&rset); + FD_SET(m_sockfd, &rset); + wset= rset; + tval.tv_sec= m_connect_timeout_sec; + tval.tv_usec= 0; + use_timeout= m_connect_timeout_sec; + + if ((r= select(m_sockfd+1, &rset, &wset, NULL, + use_timeout? &tval : NULL)) == 0) + { + NDB_CLOSE_SOCKET(m_sockfd); + m_sockfd= NDB_INVALID_SOCKET; + return NDB_INVALID_SOCKET; + } + + if (FD_ISSET(m_sockfd, &rset) || FD_ISSET(m_sockfd, &wset)) + { + len= sizeof(r); + if (getsockopt(m_sockfd, SOL_SOCKET, SO_ERROR, &r, &len) < 0) + { + // Solaris got an error... different than others + NDB_CLOSE_SOCKET(m_sockfd); + m_sockfd= NDB_INVALID_SOCKET; + return NDB_INVALID_SOCKET; + } + } + else + { + // select error, probably m_sockfd not set. + NDB_CLOSE_SOCKET(m_sockfd); + m_sockfd= NDB_INVALID_SOCKET; + return NDB_INVALID_SOCKET; + } + +done: + fcntl(m_sockfd, F_SETFL, flags); + if (m_auth) { if (!m_auth->client_authenticate(m_sockfd)) { diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp index 2f49efd9f58..fa7aed8b182 100644 --- a/ndb/src/mgmapi/mgmapi.cpp +++ b/ndb/src/mgmapi/mgmapi.cpp @@ -93,6 +93,7 @@ struct ndb_mgm_handle { char last_error_desc[NDB_MGM_MAX_ERR_DESC_SIZE]; int read_timeout; int write_timeout; + unsigned int connect_timeout; NDB_SOCKET_TYPE socket; @@ -159,6 +160,7 @@ ndb_mgm_create_handle() h->socket = NDB_INVALID_SOCKET; h->read_timeout = 50000; h->write_timeout = 100; + h->connect_timeout = 0; h->cfg_i = -1; h->errstream = stdout; h->m_name = 0; @@ -426,6 +428,16 @@ int ndb_mgm_is_connected(NdbMgmHandle handle) return handle->connected; } +extern "C" +int ndb_mgm_set_connect_timeout(NdbMgmHandle handle, unsigned int seconds) +{ + if(!handle) + return -1; + + handle->connect_timeout= seconds; + return 0; +} + /** * Connect to a management server */ @@ -456,6 +468,7 @@ ndb_mgm_connect(NdbMgmHandle handle, int no_retries, Uint32 i; int binderror = 0; SocketClient s(0, 0); + s.set_connect_timeout(handle->connect_timeout); if (!s.init()) { fprintf(handle->errstream, From 68ab0996b4e8443ca248cad3091d1d51485acb52 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 23 Jan 2007 17:19:27 +1100 Subject: [PATCH 17/18] round up Transporter connect timeout ndb/src/common/transporter/Transporter.cpp: change so timeout is rounded up to nearest second --- ndb/src/common/transporter/Transporter.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ndb/src/common/transporter/Transporter.cpp b/ndb/src/common/transporter/Transporter.cpp index d1e88e303e7..20b6be8ce26 100644 --- a/ndb/src/common/transporter/Transporter.cpp +++ b/ndb/src/common/transporter/Transporter.cpp @@ -84,7 +84,7 @@ Transporter::Transporter(TransporterRegistry &t_reg, new SocketAuthSimple("ndbd", "ndbd passwd")); - m_socket_client->set_connect_timeout(m_timeOutMillis/1000); + m_socket_client->set_connect_timeout((m_timeOutMillis+999)/1000); } DBUG_VOID_RETURN; } From 25fb32ef8457ac2e3bafcea08b009b9d599a671b Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 23 Jan 2007 12:58:10 +0100 Subject: [PATCH 18/18] ndb - bug#25562 use byte-size max_data_length() when setting blob part size sql/ha_ndbcluster.cc: bug#25562 use byte-size max_data_length() when setting blob part size --- sql/ha_ndbcluster.cc | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 3c3f6f4e06b..af097366159 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4185,19 +4185,29 @@ static int create_ndb_column(NDBCOL &col, col.setType(NDBCOL::Text); col.setCharset(cs); } - // Use "<=" even if "<" is the exact condition - if (field->max_length() <= (1 << 8)) - goto mysql_type_tiny_blob; - else if (field->max_length() <= (1 << 16)) { - col.setInlineSize(256); - col.setPartSize(2000); - col.setStripeSize(16); + Field_blob *field_blob= (Field_blob *)field; + /* + * max_data_length is 2^8-1, 2^16-1, 2^24-1 for tiny, blob, medium. + * Tinyblob gets no blob parts. The other cases are just a crude + * way to control part size and striping. + * + * In mysql blob(256) is promoted to blob(65535) so it does not + * in fact fit "inline" in NDB. + */ + if (field_blob->max_data_length() < (1 << 8)) + goto mysql_type_tiny_blob; + else if (field_blob->max_data_length() < (1 << 16)) + { + col.setInlineSize(256); + col.setPartSize(2000); + col.setStripeSize(16); + } + else if (field_blob->max_data_length() < (1 << 24)) + goto mysql_type_medium_blob; + else + goto mysql_type_long_blob; } - else if (field->max_length() <= (1 << 24)) - goto mysql_type_medium_blob; - else - goto mysql_type_long_blob; break; mysql_type_medium_blob: case MYSQL_TYPE_MEDIUM_BLOB: