From ef4445187c7d3027215d39a3af92d40b58065d71 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 7 May 2007 15:25:24 +0200 Subject: [PATCH 01/21] Fix SCI Transporter config/ac-macros/ha_ndbcluster.m4: Fix SCI Transporter build part libmysqld/Makefile.am: Fix SCI Transporter build part libmysqld/examples/Makefile.am: Fix SCI Transporter build part --- config/ac-macros/ha_ndbcluster.m4 | 2 +- libmysqld/Makefile.am | 2 +- libmysqld/examples/Makefile.am | 3 +- .../common/transporter/SCI_Transporter.cpp | 216 ++++-------------- .../common/transporter/SCI_Transporter.hpp | 18 +- 5 files changed, 54 insertions(+), 187 deletions(-) diff --git a/config/ac-macros/ha_ndbcluster.m4 b/config/ac-macros/ha_ndbcluster.m4 index a4963a5e20e..55fe6ad8350 100644 --- a/config/ac-macros/ha_ndbcluster.m4 +++ b/config/ac-macros/ha_ndbcluster.m4 @@ -22,7 +22,7 @@ AC_DEFUN([MYSQL_CHECK_NDB_OPTIONS], [ if test -f "$mysql_sci_dir/lib/libsisci.a" -a \ -f "$mysql_sci_dir/include/sisci_api.h"; then NDB_SCI_INCLUDES="-I$mysql_sci_dir/include" - NDB_SCI_LIBS="-L$mysql_sci_dir/lib -lsisci" + NDB_SCI_LIBS="$mysql_sci_dir/lib/libsisci.a" AC_MSG_RESULT([-- including sci transporter]) AC_DEFINE([NDB_SCI_TRANSPORTER], [1], [Including Ndb Cluster DB sci transporter]) diff --git a/libmysqld/Makefile.am b/libmysqld/Makefile.am index 95e3e539eee..81da1e43cc9 100644 --- a/libmysqld/Makefile.am +++ b/libmysqld/Makefile.am @@ -81,7 +81,7 @@ INC_LIB= $(top_builddir)/regex/libregex.a \ $(top_builddir)/mysys/libmysys.a \ $(top_builddir)/strings/libmystrings.a \ $(top_builddir)/dbug/libdbug.a \ - $(top_builddir)/vio/libvio.a + $(top_builddir)/vio/libvio.a @NDB_SCI_LIBS@ # diff --git a/libmysqld/examples/Makefile.am b/libmysqld/examples/Makefile.am index f30951a5d81..e0dd8491688 100644 --- a/libmysqld/examples/Makefile.am +++ b/libmysqld/examples/Makefile.am @@ -35,7 +35,8 @@ INCLUDES = -I$(top_builddir)/include -I$(top_srcdir)/include -I$(srcdir) \ -I$(top_srcdir) -I$(top_srcdir)/client -I$(top_srcdir)/regex \ $(openssl_includes) LIBS = @LIBS@ @WRAPLIBS@ @CLIENT_LIBS@ $(yassl_libs) -LDADD = @CLIENT_EXTRA_LDFLAGS@ ../libmysqld.a @innodb_system_libs@ @LIBDL@ $(CXXLDFLAGS) +LDADD = @CLIENT_EXTRA_LDFLAGS@ ../libmysqld.a @innodb_system_libs@ @LIBDL@ $(CXXLDFLAGS) \ + @NDB_SCI_LIBS@ mysqltest_embedded_LINK = $(CXXLINK) mysqltest_embedded_SOURCES = mysqltest.c diff --git a/ndb/src/common/transporter/SCI_Transporter.cpp b/ndb/src/common/transporter/SCI_Transporter.cpp index 138b79acb51..0720fe84973 100644 --- a/ndb/src/common/transporter/SCI_Transporter.cpp +++ b/ndb/src/common/transporter/SCI_Transporter.cpp @@ -65,13 +65,10 @@ SCI_Transporter::SCI_Transporter(TransporterRegistry &t_reg, m_initLocal=false; - m_swapCounter=0; m_failCounter=0; m_remoteNodes[0]=remoteSciNodeId0; m_remoteNodes[1]=remoteSciNodeId1; m_adapters = nAdapters; - // The maximum number of times to try and create, - // start and destroy a sequence m_ActiveAdapterId=0; m_StandbyAdapterId=1; @@ -102,8 +99,6 @@ SCI_Transporter::SCI_Transporter(TransporterRegistry &t_reg, DBUG_VOID_RETURN; } - - void SCI_Transporter::disconnectImpl() { DBUG_ENTER("SCI_Transporter::disconnectImpl"); @@ -129,7 +124,8 @@ void SCI_Transporter::disconnectImpl() if(err != SCI_ERR_OK) { report_error(TE_SCI_UNABLE_TO_CLOSE_CHANNEL); - DBUG_PRINT("error", ("Cannot close channel to the driver. Error code 0x%x", + DBUG_PRINT("error", + ("Cannot close channel to the driver. Error code 0x%x", err)); } } @@ -164,19 +160,18 @@ bool SCI_Transporter::initTransporter() { m_sendBuffer.m_buffer = new Uint32[m_sendBuffer.m_sendBufferSize / 4]; m_sendBuffer.m_dataSize = 0; - DBUG_PRINT("info", ("Created SCI Send Buffer with buffer size %d and packet size %d", + DBUG_PRINT("info", + ("Created SCI Send Buffer with buffer size %d and packet size %d", m_sendBuffer.m_sendBufferSize, m_PacketSize * 4)); if(!getLinkStatus(m_ActiveAdapterId) || (m_adapters > 1 && !getLinkStatus(m_StandbyAdapterId))) { - DBUG_PRINT("error", ("The link is not fully operational. Check the cables and the switches")); - //reportDisconnect(remoteNodeId, 0); - //doDisconnect(); + DBUG_PRINT("error", + ("The link is not fully operational. Check the cables and the switches")); //NDB should terminate report_error(TE_SCI_LINK_ERROR); DBUG_RETURN(false); } - DBUG_RETURN(true); } // initTransporter() @@ -235,7 +230,8 @@ sci_error_t SCI_Transporter::initLocalSegment() { DBUG_PRINT("info", ("SCInode iD %d adapter %d\n", sciAdapters[i].localSciNodeId, i)); if(err != SCI_ERR_OK) { - DBUG_PRINT("error", ("Cannot open an SCI virtual device. Error code 0x%x", + DBUG_PRINT("error", + ("Cannot open an SCI virtual device. Error code 0x%x", err)); DBUG_RETURN(err); } @@ -269,7 +265,8 @@ sci_error_t SCI_Transporter::initLocalSegment() { &err); if(err != SCI_ERR_OK) { - DBUG_PRINT("error", ("Local Segment is not accessible by an SCI adapter. Error code 0x%x\n", + DBUG_PRINT("error", + ("Local Segment is not accessible by an SCI adapter. Error code 0x%x\n", err)); DBUG_RETURN(err); } @@ -303,15 +300,13 @@ sci_error_t SCI_Transporter::initLocalSegment() { &err); if(err != SCI_ERR_OK) { - DBUG_PRINT("error", ("Local Segment is not available for remote connections. Error code 0x%x\n", + DBUG_PRINT("error", + ("Local Segment is not available for remote connections. Error code 0x%x\n", err)); DBUG_RETURN(err); } } - - setupLocalSegment(); - DBUG_RETURN(err); } // initLocalSegment() @@ -343,12 +338,6 @@ bool SCI_Transporter::doSend() { if(sizeToSend==4097) i4097++; #endif - if(startSequence(m_ActiveAdapterId)!=SCI_ERR_OK) { - DBUG_PRINT("error", ("Start sequence failed")); - report_error(TE_SCI_UNABLE_TO_START_SEQUENCE); - return false; - } - tryagain: retry++; @@ -374,119 +363,36 @@ bool SCI_Transporter::doSend() { SCI_FLAG_ERROR_CHECK, &err); - if (err != SCI_ERR_OK) { - if(err == SCI_ERR_OUT_OF_RANGE) { - DBUG_PRINT("error", ("Data transfer : out of range error")); - goto tryagain; - } - if(err == SCI_ERR_SIZE_ALIGNMENT) { - DBUG_PRINT("error", ("Data transfer : alignment error")); - DBUG_PRINT("info", ("sendPtr 0x%x, sizeToSend = %d", sendPtr, sizeToSend)); - goto tryagain; - } - if(err == SCI_ERR_OFFSET_ALIGNMENT) { - DBUG_PRINT("error", ("Data transfer : offset alignment")); - goto tryagain; - } - if(err == SCI_ERR_TRANSFER_FAILED) { - //(m_TargetSegm[m_StandbyAdapterId].writer)->heavyLock(); - if(getLinkStatus(m_ActiveAdapterId)) { - goto tryagain; - } - if (m_adapters == 1) { - DBUG_PRINT("error", ("SCI Transfer failed")); + if (err == SCI_ERR_OUT_OF_RANGE || + err == SCI_ERR_SIZE_ALIGNMENT || + err == SCI_ERR_OFFSET_ALIGNMENT) { + DBUG_PRINT("error", ("Data transfer error = %d", err)); report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); return false; - } - m_failCounter++; - Uint32 temp=m_ActiveAdapterId; - switch(m_swapCounter) { - case 0: - /**swap from active (0) to standby (1)*/ - if(getLinkStatus(m_StandbyAdapterId)) { - DBUG_PRINT("error", ("Swapping from adapter 0 to 1")); + } + if(err == SCI_ERR_TRANSFER_FAILED) { + if(getLinkStatus(m_ActiveAdapterId)) + goto tryagain; + if (m_adapters == 1) { + DBUG_PRINT("error", ("SCI Transfer failed")); + report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); + return false; + } + m_failCounter++; + Uint32 temp=m_ActiveAdapterId; + if (getLinkStatus(m_StandbyAdapterId)) { failoverShmWriter(); SCIStoreBarrier(m_TargetSegm[m_StandbyAdapterId].sequence,0); m_ActiveAdapterId=m_StandbyAdapterId; m_StandbyAdapterId=temp; - SCIRemoveSequence((m_TargetSegm[m_StandbyAdapterId].sequence), - FLAGS, - &err); - if(err!=SCI_ERR_OK) { - report_error(TE_SCI_UNABLE_TO_REMOVE_SEQUENCE); - DBUG_PRINT("error", ("Unable to remove sequence")); - return false; - } - if(startSequence(m_ActiveAdapterId)!=SCI_ERR_OK) { - DBUG_PRINT("error", ("Start sequence failed")); - report_error(TE_SCI_UNABLE_TO_START_SEQUENCE); - return false; - } - m_swapCounter++; - DBUG_PRINT("info", ("failover complete")); - goto tryagain; - } else { + DBUG_PRINT("error", ("Swapping from adapter %u to %u", + m_StandbyAdapterId, m_ActiveAdapterId)); + } else { report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); DBUG_PRINT("error", ("SCI Transfer failed")); - return false; } - return false; - break; - case 1: - /** swap back from 1 to 0 - must check that the link is up */ - - if(getLinkStatus(m_StandbyAdapterId)) { - failoverShmWriter(); - m_ActiveAdapterId=m_StandbyAdapterId; - m_StandbyAdapterId=temp; - DBUG_PRINT("info", ("Swapping from 1 to 0")); - if(createSequence(m_ActiveAdapterId)!=SCI_ERR_OK) { - DBUG_PRINT("error", ("Unable to create sequence")); - report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE); - return false; - } - if(startSequence(m_ActiveAdapterId)!=SCI_ERR_OK) { - DBUG_PRINT("error", ("startSequence failed... disconnecting")); - report_error(TE_SCI_UNABLE_TO_START_SEQUENCE); - return false; - } - - SCIRemoveSequence((m_TargetSegm[m_StandbyAdapterId].sequence) - , FLAGS, - &err); - if(err!=SCI_ERR_OK) { - DBUG_PRINT("error", ("Unable to remove sequence")); - report_error(TE_SCI_UNABLE_TO_REMOVE_SEQUENCE); - return false; - } - - if(createSequence(m_StandbyAdapterId)!=SCI_ERR_OK) { - DBUG_PRINT("error", ("Unable to create sequence on standby")); - report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE); - return false; - } - - m_swapCounter=0; - - DBUG_PRINT("info", ("failover complete..")); - goto tryagain; - - } else { - DBUG_PRINT("error", ("Unrecoverable data transfer error")); - report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); - return false; - } - - break; - default: - DBUG_PRINT("error", ("Unrecoverable data transfer error")); - report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); - return false; - break; - } - } + } } else { SHM_Writer * writer = (m_TargetSegm[m_ActiveAdapterId].writer); writer->updateWritePtr(sizeToSend); @@ -497,7 +403,6 @@ bool SCI_Transporter::doSend() { m_sendBuffer.m_dataSize = 0; m_sendBuffer.m_forceSendLimit = sendLimit; } - } else { /** * If we end up here, the SCI segment is full. @@ -552,15 +457,12 @@ void SCI_Transporter::setupLocalSegment() DBUG_VOID_RETURN; } //setupLocalSegment - - void SCI_Transporter::setupRemoteSegment() { DBUG_ENTER("SCI_Transporter::setupRemoteSegment"); Uint32 sharedSize = 0; sharedSize =4096; //start of the buffer is page aligned - Uint32 sizeOfBuffer = m_BufferSize; const Uint32 slack = MAX_MESSAGE_SIZE; sizeOfBuffer -= sharedSize; @@ -666,7 +568,6 @@ SCI_Transporter::init_remote() DBUG_PRINT("error", ("Error connecting segment, err 0x%x", err)); DBUG_RETURN(false); } - } // Map the remote memory segment into program space for(Uint32 i=0; i < m_adapters ; i++) { @@ -679,13 +580,14 @@ SCI_Transporter::init_remote() FLAGS, &err); - - if(err!= SCI_ERR_OK) { - DBUG_PRINT("error", ("Cannot map a segment to the remote node %d. Error code 0x%x",m_RemoteSciNodeId, err)); - //NDB SHOULD TERMINATE AND COMPUTER REBOOTED! - report_error(TE_SCI_CANNOT_MAP_REMOTESEGMENT); - DBUG_RETURN(false); - } + if(err!= SCI_ERR_OK) { + DBUG_PRINT("error", + ("Cannot map a segment to the remote node %d. Error code 0x%x", + m_RemoteSciNodeId, err)); + //NDB SHOULD TERMINATE AND COMPUTER REBOOTED! + report_error(TE_SCI_CANNOT_MAP_REMOTESEGMENT); + DBUG_RETURN(false); + } } m_mapped=true; setupRemoteSegment(); @@ -713,7 +615,6 @@ SCI_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd) NDB_CLOSE_SOCKET(sockfd); DBUG_RETURN(false); } - if (!init_local()) { NDB_CLOSE_SOCKET(sockfd); DBUG_RETURN(false); @@ -788,29 +689,9 @@ sci_error_t SCI_Transporter::createSequence(Uint32 adapterid) { &(m_TargetSegm[adapterid].sequence), SCI_FLAG_FAST_BARRIER, &err); - - return err; } // createSequence() - -sci_error_t SCI_Transporter::startSequence(Uint32 adapterid) { - - sci_error_t err; - /** Perform preliminary error check on an SCI adapter before starting a - * sequence of read and write operations on the mapped segment. - */ - m_SequenceStatus = SCIStartSequence( - (m_TargetSegm[adapterid].sequence), - FLAGS, &err); - - - // If there still is an error then data cannot be safely send - return err; -} // startSequence() - - - bool SCI_Transporter::disconnectLocal() { DBUG_ENTER("SCI_Transporter::disconnectLocal"); @@ -878,9 +759,6 @@ SCI_Transporter::~SCI_Transporter() { DBUG_VOID_RETURN; } // ~SCI_Transporter() - - - void SCI_Transporter::closeSCI() { // Termination of SCI sci_error_t err; @@ -897,8 +775,9 @@ void SCI_Transporter::closeSCI() { SCIClose(activeSCIDescriptor, FLAGS, &err); if(err != SCI_ERR_OK) { - DBUG_PRINT("error", ("Cannot close SCI channel to the driver. Error code 0x%x", - err)); + DBUG_PRINT("error", + ("Cannot close SCI channel to the driver. Error code 0x%x", + err)); } SCITerminate(); DBUG_VOID_RETURN; @@ -973,7 +852,6 @@ SCI_Transporter::getConnectionStatus() { return false; } - void SCI_Transporter::setConnected() { *m_remoteStatusFlag = SCICONNECTED; @@ -983,7 +861,6 @@ SCI_Transporter::setConnected() { *m_localStatusFlag = SCICONNECTED; } - void SCI_Transporter::setDisconnect() { if(getLinkStatus(m_ActiveAdapterId)) @@ -994,7 +871,6 @@ SCI_Transporter::setDisconnect() { } } - bool SCI_Transporter::checkConnected() { if (*m_localStatusFlag == SCIDISCONNECT) { @@ -1015,8 +891,9 @@ SCI_Transporter::initSCI() { SCIInitialize(0, &error); if(error != SCI_ERR_OK) { DBUG_PRINT("error", ("Cannot initialize SISCI library.")); - DBUG_PRINT("error", ("Inconsistency between SISCI library and SISCI driver. Error code 0x%x", - error)); + DBUG_PRINT("error", + ("Inconsistency between SISCI library and SISCI driver. Error code 0x%x", + error)); DBUG_RETURN(false); } init = true; @@ -1029,3 +906,4 @@ SCI_Transporter::get_free_buffer() const { return (m_TargetSegm[m_ActiveAdapterId].writer)->get_free_buffer(); } + diff --git a/ndb/src/common/transporter/SCI_Transporter.hpp b/ndb/src/common/transporter/SCI_Transporter.hpp index fbba2ac4516..f774186f238 100644 --- a/ndb/src/common/transporter/SCI_Transporter.hpp +++ b/ndb/src/common/transporter/SCI_Transporter.hpp @@ -54,12 +54,12 @@ * local segment, the SCI transporter connects to a segment created by another * transporter at a remote node, and the maps the remote segment into its * virtual address space. However, since NDB Cluster relies on redundancy - * at the network level, by using dual SCI adapters communica - * + * at the network level, by using dual SCI adapters communication can be + * maintained even if one of the adapter cards fails (or anything on the + * network this adapter card exists in e.g. an SCI switch failure). * */ - /** * class SCITransporter * @brief - main class for the SCI transporter. @@ -84,16 +84,6 @@ public: sci_error_t createSequence(Uint32 adapterid); - /** - * starts a sequence for error checking. - * The actual checking that a sequence is correct is done implicitly - * in SCIMemCpy (in doSend). - * @param adapterid the adapter on which to start the sequence. - * @return SCI_ERR_OK if ok, otherwize something else. - */ - sci_error_t startSequence(Uint32 adapterid); - - /** Initiate Local Segment: create a memory segment, * prepare a memory segment, map the local segment * into memory space and make segment available. @@ -159,7 +149,6 @@ private: bool m_mapped; bool m_initLocal; bool m_sciinit; - Uint32 m_swapCounter; Uint32 m_failCounter; /** * For statistics on transfered packets @@ -195,7 +184,6 @@ private: */ Uint32 m_reportFreq; - Uint32 m_adapters; Uint32 m_numberOfRemoteNodes; From b95356601151b10a695b5b9a44bd49ddeac7e433 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 7 May 2007 15:33:27 +0200 Subject: [PATCH 02/21] New SCI Transporter Build scripts --- BUILD/Makefile.am | 2 ++ BUILD/compile-amd64-max-sci | 8 ++++++++ BUILD/compile-pentium64-max-sci | 9 +++++++++ 3 files changed, 19 insertions(+) create mode 100644 BUILD/compile-amd64-max-sci create mode 100644 BUILD/compile-pentium64-max-sci diff --git a/BUILD/Makefile.am b/BUILD/Makefile.am index 3fd61790903..d06106d4431 100644 --- a/BUILD/Makefile.am +++ b/BUILD/Makefile.am @@ -28,6 +28,7 @@ EXTRA_DIST = FINISH.sh \ compile-alpha-debug \ compile-amd64-debug-max \ compile-amd64-max \ + compile-amd64-max-sci \ compile-darwin-mwcc \ compile-dist \ compile-hpux11-parisc2-aCC \ @@ -53,6 +54,7 @@ EXTRA_DIST = FINISH.sh \ compile-pentium-valgrind-max \ compile-pentium64-debug \ compile-pentium64-debug-max \ + compile-pentium64-max-sci \ compile-pentium64-valgrind-max \ compile-ppc \ compile-ppc-debug \ diff --git a/BUILD/compile-amd64-max-sci b/BUILD/compile-amd64-max-sci new file mode 100644 index 00000000000..4afa9004742 --- /dev/null +++ b/BUILD/compile-amd64-max-sci @@ -0,0 +1,8 @@ +#! /bin/sh + +path=`dirname $0` +. "$path/SETUP.sh" +extra_flags="$amd64_cflags $fast_cflags -g" +extra_configs="$amd64_configs $max_configs --with-ndb-sci=/opt/DIS" + +. "$path/FINISH.sh" diff --git a/BUILD/compile-pentium64-max-sci b/BUILD/compile-pentium64-max-sci new file mode 100644 index 00000000000..9ebb1988475 --- /dev/null +++ b/BUILD/compile-pentium64-max-sci @@ -0,0 +1,9 @@ +#! /bin/sh + +path=`dirname $0` +. "$path/SETUP.sh" + +extra_flags="$pentium64_cflags $fast_cflags -g" +extra_configs="$pentium_configs $max_configs --with-ndb-sci=/opt/DIS" + +. "$path/FINISH.sh" From e3a68d8dcc2af76706948df698d6a15a91cae379 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 7 May 2007 15:46:29 +0200 Subject: [PATCH 03/21] Manual merge --- sql/Makefile.am | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/Makefile.am b/sql/Makefile.am index 465e5c843f4..d280b22f493 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -33,7 +33,7 @@ SUPPORTING_LIBS = $(top_builddir)/vio/libvio.a \ $(top_builddir)/regex/libregex.a \ $(top_builddir)/strings/libmystrings.a mysqld_DEPENDENCIES= @mysql_plugin_libs@ $(SUPPORTING_LIBS) -LDADD = $(SUPPORTING_LIBS) @ZLIB_LIBS@ +LDADD = $(SUPPORTING_LIBS) @ZLIB_LIBS@ @NDB_SCI_LIBS@ mysqld_LDADD = @MYSQLD_EXTRA_LDFLAGS@ \ @pstack_libs@ \ @mysql_plugin_libs@ \ From 6d5f665cbb16ba6f3343661d37d92eebe43cab60 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 7 May 2007 16:07:04 +0200 Subject: [PATCH 04/21] Jamming storage/ndb/src/common/debugger/signaldata/SignalNames.cpp: Added signal names to signal logging --- .../debugger/signaldata/SignalNames.cpp | 8 + .../ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 682 +++++++++++------- storage/ndb/src/kernel/blocks/lgman.cpp | 7 +- 3 files changed, 442 insertions(+), 255 deletions(-) diff --git a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp index 884a49b3a94..0d31cd5de7f 100644 --- a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp +++ b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp @@ -621,6 +621,14 @@ const GsnName SignalNames [] = { ,{ GSN_LCP_PREPARE_REF, "LCP_PREPARE_REF" } ,{ GSN_LCP_PREPARE_CONF, "LCP_PREPARE_CONF" } + ,{ GSN_DICT_ABORT_REQ, "DICT_ABORT_REQ" } + ,{ GSN_DICT_ABORT_REF, "DICT_ABORT_REF" } + ,{ GSN_DICT_ABORT_CONF, "DICT_ABORT_CONF" } + + ,{ GSN_DICT_COMMIT_REQ, "DICT_COMMIT_REQ" } + ,{ GSN_DICT_COMMIT_REF, "DICT_COMMIT_REF" } + ,{ GSN_DICT_COMMIT_CONF, "DICT_COMMIT_CONF" } + /* DICT LOCK */ ,{ GSN_DICT_LOCK_REQ, "DICT_LOCK_REQ" } ,{ GSN_DICT_LOCK_CONF, "DICT_LOCK_CONF" } diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 66cd523f333..de365e886a0 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -13968,7 +13968,8 @@ Dbdict::getTableEntry(XSchemaFile * xsf, Uint32 tableId) //****************************************** void -Dbdict::execCREATE_FILE_REQ(Signal* signal){ +Dbdict::execCREATE_FILE_REQ(Signal* signal) +{ jamEntry(); if(!assembleFragments(signal)){ @@ -14013,13 +14014,14 @@ Dbdict::execCREATE_FILE_REQ(Signal* signal){ Ptr trans_ptr; if (! c_Trans.seize(trans_ptr)){ + jam(); ref->errorCode = CreateFileRef::Busy; ref->status = 0; ref->errorKey = 0; ref->errorLine = __LINE__; break; } - + jam(); const Uint32 trans_key = ++c_opRecordSequence; trans_ptr.p->key = trans_key; trans_ptr.p->m_senderRef = senderRef; @@ -14048,6 +14050,7 @@ Dbdict::execCREATE_FILE_REQ(Signal* signal){ { Uint32 objId = getFreeObjId(0); if (objId == RNIL) { + jam(); ref->errorCode = CreateFileRef::NoMoreObjectRecords; ref->status = 0; ref->errorKey = 0; @@ -14072,7 +14075,6 @@ Dbdict::execCREATE_FILE_REQ(Signal* signal){ CreateObjReq::SignalLength, JBB); c_blockState = BS_CREATE_TAB; - return; } while(0); @@ -14083,7 +14085,8 @@ Dbdict::execCREATE_FILE_REQ(Signal* signal){ } void -Dbdict::execCREATE_FILEGROUP_REQ(Signal* signal){ +Dbdict::execCREATE_FILEGROUP_REQ(Signal* signal) +{ jamEntry(); if(!assembleFragments(signal)){ @@ -14127,13 +14130,14 @@ Dbdict::execCREATE_FILEGROUP_REQ(Signal* signal){ Ptr trans_ptr; if (! c_Trans.seize(trans_ptr)){ + jam(); ref->errorCode = CreateFilegroupRef::Busy; ref->status = 0; ref->errorKey = 0; ref->errorLine = __LINE__; break; } - + jam(); const Uint32 trans_key = ++c_opRecordSequence; trans_ptr.p->key = trans_key; trans_ptr.p->m_senderRef = senderRef; @@ -14159,6 +14163,7 @@ Dbdict::execCREATE_FILEGROUP_REQ(Signal* signal){ { Uint32 objId = getFreeObjId(0); if (objId == RNIL) { + jam(); ref->errorCode = CreateFilegroupRef::NoMoreObjectRecords; ref->status = 0; ref->errorKey = 0; @@ -14183,7 +14188,6 @@ Dbdict::execCREATE_FILEGROUP_REQ(Signal* signal){ CreateObjReq::SignalLength, JBB); c_blockState = BS_CREATE_TAB; - return; } while(0); @@ -14219,7 +14223,8 @@ Dbdict::execDROP_FILE_REQ(Signal* signal) break; } - if (c_blockState != BS_IDLE){ + if (c_blockState != BS_IDLE) + { jam(); ref->errorCode = DropFileRef::Busy; ref->errorKey = 0; @@ -14229,6 +14234,7 @@ Dbdict::execDROP_FILE_REQ(Signal* signal) if (checkSingleUserMode(senderRef)) { + jam(); ref->errorCode = DropFileRef::SingleUser; ref->errorKey = 0; ref->errorLine = __LINE__; @@ -14238,6 +14244,7 @@ Dbdict::execDROP_FILE_REQ(Signal* signal) Ptr file_ptr; if (!c_file_hash.find(file_ptr, objId)) { + jam(); ref->errorCode = DropFileRef::NoSuchFile; ref->errorLine = __LINE__; break; @@ -14245,6 +14252,7 @@ Dbdict::execDROP_FILE_REQ(Signal* signal) if (file_ptr.p->m_version != version) { + jam(); ref->errorCode = DropFileRef::InvalidSchemaObjectVersion; ref->errorLine = __LINE__; break; @@ -14253,10 +14261,12 @@ Dbdict::execDROP_FILE_REQ(Signal* signal) Ptr trans_ptr; if (! c_Trans.seize(trans_ptr)) { + jam(); ref->errorCode = DropFileRef::Busy; ref->errorLine = __LINE__; break; } + jam(); const Uint32 trans_key = ++c_opRecordSequence; trans_ptr.p->key = trans_key; @@ -14292,7 +14302,6 @@ Dbdict::execDROP_FILE_REQ(Signal* signal) DropObjReq::SignalLength, JBB); c_blockState = BS_CREATE_TAB; - return; } while(0); @@ -14320,7 +14329,8 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal) Uint32 version = req->filegroup_version; do { - if(getOwnNodeId() != c_masterNodeId){ + if(getOwnNodeId() != c_masterNodeId) + { jam(); ref->errorCode = DropFilegroupRef::NotMaster; ref->errorKey = 0; @@ -14328,7 +14338,8 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal) break; } - if (c_blockState != BS_IDLE){ + if (c_blockState != BS_IDLE) + { jam(); ref->errorCode = DropFilegroupRef::Busy; ref->errorKey = 0; @@ -14338,6 +14349,7 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal) if (checkSingleUserMode(senderRef)) { + jam(); ref->errorCode = DropFilegroupRef::SingleUser; ref->errorKey = 0; ref->errorLine = __LINE__; @@ -14347,6 +14359,7 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal) Ptr filegroup_ptr; if (!c_filegroup_hash.find(filegroup_ptr, objId)) { + jam(); ref->errorCode = DropFilegroupRef::NoSuchFilegroup; ref->errorLine = __LINE__; break; @@ -14354,6 +14367,7 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal) if (filegroup_ptr.p->m_version != version) { + jam(); ref->errorCode = DropFilegroupRef::InvalidSchemaObjectVersion; ref->errorLine = __LINE__; break; @@ -14362,10 +14376,12 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal) Ptr trans_ptr; if (! c_Trans.seize(trans_ptr)) { + jam(); ref->errorCode = DropFilegroupRef::Busy; ref->errorLine = __LINE__; break; } + jam(); const Uint32 trans_key = ++c_opRecordSequence; trans_ptr.p->key = trans_key; @@ -14401,7 +14417,6 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal) DropObjReq::SignalLength, JBB); c_blockState = BS_CREATE_TAB; - return; } while(0); @@ -14412,15 +14427,15 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal) } void -Dbdict::execCREATE_OBJ_REF(Signal* signal){ - jamEntry(); - +Dbdict::execCREATE_OBJ_REF(Signal* signal) +{ CreateObjRef * const ref = (CreateObjRef*)signal->getDataPtr(); - Ptr trans_ptr; + + jamEntry(); ndbrequire(c_Trans.find(trans_ptr, ref->senderData)); - if(ref->errorCode != CreateObjRef::NF_FakeErrorREF){ + jam(); trans_ptr.p->setErrorCode(ref->errorCode); } Uint32 node = refToNode(ref->senderRef); @@ -14428,12 +14443,12 @@ Dbdict::execCREATE_OBJ_REF(Signal* signal){ } void -Dbdict::execCREATE_OBJ_CONF(Signal* signal){ - jamEntry(); - - CreateObjConf * const conf = (CreateObjConf*)signal->getDataPtr(); - +Dbdict::execCREATE_OBJ_CONF(Signal* signal) +{ Ptr trans_ptr; + CreateObjConf * const conf = (CreateObjConf*)signal->getDataPtr(); + + jamEntry(); ndbrequire(c_Trans.find(trans_ptr, conf->senderData)); schemaOp_reply(signal, trans_ptr.p, refToNode(conf->senderRef)); } @@ -14443,6 +14458,7 @@ Dbdict::schemaOp_reply(Signal* signal, SchemaTransaction * trans_ptr_p, Uint32 nodeId) { + jam(); { SafeCounter tmp(c_counterMgr, trans_ptr_p->m_counter); if(!tmp.clearWaitingFor(nodeId)){ @@ -14453,10 +14469,8 @@ Dbdict::schemaOp_reply(Signal* signal, switch(trans_ptr_p->m_op.m_state){ case DictObjOp::Preparing:{ - if(trans_ptr_p->m_errorCode != 0) { - jam(); /** * Failed to prepare on atleast one node -> abort on all */ @@ -14466,10 +14480,16 @@ Dbdict::schemaOp_reply(Signal* signal, safe_cast(&Dbdict::trans_abort_start_done); if(f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_abort_start) + { + jam(); (this->*f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_abort_start) (signal, trans_ptr_p); + } else + { + jam(); execute(signal, trans_ptr_p->m_callback, 0); + } return; } @@ -14479,14 +14499,19 @@ Dbdict::schemaOp_reply(Signal* signal, safe_cast(&Dbdict::trans_commit_start_done); if(f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_commit_start) + { + jam(); (this->*f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_commit_start) (signal, trans_ptr_p); + } else + { + jam(); execute(signal, trans_ptr_p->m_callback, 0); + } return; } case DictObjOp::Committing: { - jam(); ndbrequire(trans_ptr_p->m_errorCode == 0); trans_ptr_p->m_op.m_state = DictObjOp::Committed; @@ -14495,31 +14520,42 @@ Dbdict::schemaOp_reply(Signal* signal, safe_cast(&Dbdict::trans_commit_complete_done); if(f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_commit_complete) + { + jam(); (this->*f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_commit_complete) (signal, trans_ptr_p); + } else - execute(signal, trans_ptr_p->m_callback, 0); + { + jam(); + execute(signal, trans_ptr_p->m_callback, 0); + } return; } case DictObjOp::Aborting:{ - jam(); - trans_ptr_p->m_op.m_state = DictObjOp::Committed; trans_ptr_p->m_callback.m_callbackData = trans_ptr_p->key; trans_ptr_p->m_callback.m_callbackFunction= safe_cast(&Dbdict::trans_abort_complete_done); if(f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_abort_complete) + { + jam(); (this->*f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_abort_complete) (signal, trans_ptr_p); + } else - execute(signal, trans_ptr_p->m_callback, 0); + { + jam(); + execute(signal, trans_ptr_p->m_callback, 0); + } return; } case DictObjOp::Defined: case DictObjOp::Prepared: case DictObjOp::Committed: case DictObjOp::Aborted: + jam(); break; } ndbrequire(false); @@ -14528,14 +14564,13 @@ Dbdict::schemaOp_reply(Signal* signal, void Dbdict::trans_commit_start_done(Signal* signal, Uint32 callbackData, - Uint32 retValue){ - jamEntry(); - - ndbrequire(retValue == 0); - + Uint32 retValue) +{ Ptr trans_ptr; + + jam(); + ndbrequire(retValue == 0); ndbrequire(c_Trans.find(trans_ptr, callbackData)); - NodeReceiverGroup rg(DBDICT, trans_ptr.p->m_nodes); SafeCounter tmp(c_counterMgr, trans_ptr.p->m_counter); tmp.init(rg, GSN_DICT_COMMIT_REF, trans_ptr.p->key); @@ -14546,27 +14581,26 @@ Dbdict::trans_commit_start_done(Signal* signal, req->op_key = trans_ptr.p->m_op.m_key; sendSignal(rg, GSN_DICT_COMMIT_REQ, signal, DictCommitReq::SignalLength, JBB); - trans_ptr.p->m_op.m_state = DictObjOp::Committing; } void Dbdict::trans_commit_complete_done(Signal* signal, Uint32 callbackData, - Uint32 retValue){ - jamEntry(); - - ndbrequire(retValue == 0); - + Uint32 retValue) +{ Ptr trans_ptr; + + jam(); + ndbrequire(retValue == 0); ndbrequire(c_Trans.find(trans_ptr, callbackData)); switch(f_dict_op[trans_ptr.p->m_op.m_vt_index].m_gsn_user_req){ case GSN_CREATE_FILEGROUP_REQ:{ FilegroupPtr fg_ptr; + jam(); ndbrequire(c_filegroup_hash.find(fg_ptr, trans_ptr.p->m_op.m_obj_id)); - // CreateFilegroupConf * conf = (CreateFilegroupConf*)signal->getDataPtr(); conf->senderRef = reference(); conf->senderData = trans_ptr.p->m_senderData; @@ -14576,11 +14610,11 @@ Dbdict::trans_commit_complete_done(Signal* signal, //@todo check api failed sendSignal(trans_ptr.p->m_senderRef, GSN_CREATE_FILEGROUP_CONF, signal, CreateFilegroupConf::SignalLength, JBB); - break; } case GSN_CREATE_FILE_REQ:{ FilePtr f_ptr; + jam(); ndbrequire(c_file_hash.find(f_ptr, trans_ptr.p->m_op.m_obj_id)); CreateFileConf * conf = (CreateFileConf*)signal->getDataPtr(); conf->senderRef = reference(); @@ -14591,11 +14625,11 @@ Dbdict::trans_commit_complete_done(Signal* signal, //@todo check api failed sendSignal(trans_ptr.p->m_senderRef, GSN_CREATE_FILE_CONF, signal, CreateFileConf::SignalLength, JBB); - break; } case GSN_DROP_FILE_REQ:{ DropFileConf * conf = (DropFileConf*)signal->getDataPtr(); + jam(); conf->senderRef = reference(); conf->senderData = trans_ptr.p->m_senderData; conf->fileId = trans_ptr.p->m_op.m_obj_id; @@ -14607,6 +14641,7 @@ Dbdict::trans_commit_complete_done(Signal* signal, } case GSN_DROP_FILEGROUP_REQ:{ DropFilegroupConf * conf = (DropFilegroupConf*)signal->getDataPtr(); + jam(); conf->senderRef = reference(); conf->senderData = trans_ptr.p->m_senderData; conf->filegroupId = trans_ptr.p->m_op.m_obj_id; @@ -14629,12 +14664,12 @@ Dbdict::trans_commit_complete_done(Signal* signal, void Dbdict::trans_abort_start_done(Signal* signal, Uint32 callbackData, - Uint32 retValue){ - jamEntry(); - - ndbrequire(retValue == 0); - + Uint32 retValue) +{ Ptr trans_ptr; + + jam(); + ndbrequire(retValue == 0); ndbrequire(c_Trans.find(trans_ptr, callbackData)); NodeReceiverGroup rg(DBDICT, trans_ptr.p->m_nodes); @@ -14652,12 +14687,12 @@ Dbdict::trans_abort_start_done(Signal* signal, void Dbdict::trans_abort_complete_done(Signal* signal, Uint32 callbackData, - Uint32 retValue){ - jamEntry(); - - ndbrequire(retValue == 0); - + Uint32 retValue) +{ Ptr trans_ptr; + + jam(); + ndbrequire(retValue == 0); ndbrequire(c_Trans.find(trans_ptr, callbackData)); switch(f_dict_op[trans_ptr.p->m_op.m_vt_index].m_gsn_user_req){ @@ -14665,6 +14700,7 @@ Dbdict::trans_abort_complete_done(Signal* signal, { // CreateFilegroupRef * ref = (CreateFilegroupRef*)signal->getDataPtr(); + jam(); ref->senderRef = reference(); ref->senderData = trans_ptr.p->m_senderData; ref->masterNodeId = c_masterNodeId; @@ -14676,12 +14712,12 @@ Dbdict::trans_abort_complete_done(Signal* signal, //@todo check api failed sendSignal(trans_ptr.p->m_senderRef, GSN_CREATE_FILEGROUP_REF, signal, CreateFilegroupRef::SignalLength, JBB); - break; } case GSN_CREATE_FILE_REQ: { CreateFileRef * ref = (CreateFileRef*)signal->getDataPtr(); + jam(); ref->senderRef = reference(); ref->senderData = trans_ptr.p->m_senderData; ref->masterNodeId = c_masterNodeId; @@ -14693,12 +14729,12 @@ Dbdict::trans_abort_complete_done(Signal* signal, //@todo check api failed sendSignal(trans_ptr.p->m_senderRef, GSN_CREATE_FILE_REF, signal, CreateFileRef::SignalLength, JBB); - break; } case GSN_DROP_FILE_REQ: { DropFileRef * ref = (DropFileRef*)signal->getDataPtr(); + jam(); ref->senderRef = reference(); ref->senderData = trans_ptr.p->m_senderData; ref->masterNodeId = c_masterNodeId; @@ -14709,13 +14745,13 @@ Dbdict::trans_abort_complete_done(Signal* signal, //@todo check api failed sendSignal(trans_ptr.p->m_senderRef, GSN_DROP_FILE_REF, signal, DropFileRef::SignalLength, JBB); - break; } case GSN_DROP_FILEGROUP_REQ: { // DropFilegroupRef * ref = (DropFilegroupRef*)signal->getDataPtr(); + jam(); ref->senderRef = reference(); ref->senderData = trans_ptr.p->m_senderData; ref->masterNodeId = c_masterNodeId; @@ -14726,7 +14762,6 @@ Dbdict::trans_abort_complete_done(Signal* signal, //@todo check api failed sendSignal(trans_ptr.p->m_senderRef, GSN_DROP_FILEGROUP_REF, signal, DropFilegroupRef::SignalLength, JBB); - break; } default: @@ -14740,7 +14775,8 @@ Dbdict::trans_abort_complete_done(Signal* signal, } void -Dbdict::execCREATE_OBJ_REQ(Signal* signal){ +Dbdict::execCREATE_OBJ_REQ(Signal* signal) +{ jamEntry(); if(!assembleFragments(signal)){ @@ -14785,6 +14821,7 @@ Dbdict::execCREATE_OBJ_REQ(Signal* signal){ switch(objType){ case DictTabInfo::Tablespace: case DictTabInfo::LogfileGroup: + jam(); createObjPtr.p->m_vt_index = 0; break; case DictTabInfo::Datafile: @@ -14793,7 +14830,11 @@ Dbdict::execCREATE_OBJ_REQ(Signal* signal){ * Use restart code to impl. ForceCreateFile */ if (requestInfo & CreateFileReq::ForceCreateFile) - createObjPtr.p->m_restart= 2; + { + jam(); + createObjPtr.p->m_restart= 2; + } + jam(); createObjPtr.p->m_vt_index = 1; break; default: @@ -14809,10 +14850,10 @@ void Dbdict::execDICT_COMMIT_REQ(Signal* signal) { DictCommitReq* req = (DictCommitReq*)signal->getDataPtr(); - Ptr op; - ndbrequire(c_schemaOp.find(op, req->op_key)); + jamEntry(); + ndbrequire(c_schemaOp.find(op, req->op_key)); (this->*f_dict_op[op.p->m_vt_index].m_commit)(signal, op.p); } @@ -14820,23 +14861,23 @@ void Dbdict::execDICT_ABORT_REQ(Signal* signal) { DictAbortReq* req = (DictAbortReq*)signal->getDataPtr(); - Ptr op; - ndbrequire(c_schemaOp.find(op, req->op_key)); + jamEntry(); + ndbrequire(c_schemaOp.find(op, req->op_key)); (this->*f_dict_op[op.p->m_vt_index].m_abort)(signal, op.p); } void -Dbdict::execDICT_COMMIT_REF(Signal* signal){ - jamEntry(); - +Dbdict::execDICT_COMMIT_REF(Signal* signal) +{ DictCommitRef * const ref = (DictCommitRef*)signal->getDataPtr(); - Ptr trans_ptr; + + jamEntry(); ndbrequire(c_Trans.find(trans_ptr, ref->senderData)); - if(ref->errorCode != DictCommitRef::NF_FakeErrorREF){ + jam(); trans_ptr.p->setErrorCode(ref->errorCode); } Uint32 node = refToNode(ref->senderRef); @@ -14844,26 +14885,26 @@ Dbdict::execDICT_COMMIT_REF(Signal* signal){ } void -Dbdict::execDICT_COMMIT_CONF(Signal* signal){ - jamEntry(); - +Dbdict::execDICT_COMMIT_CONF(Signal* signal) +{ + Ptr trans_ptr; DictCommitConf * const conf = (DictCommitConf*)signal->getDataPtr(); - - Ptr trans_ptr; + + jamEntry(); ndbrequire(c_Trans.find(trans_ptr, conf->senderData)); schemaOp_reply(signal, trans_ptr.p, refToNode(conf->senderRef)); } void -Dbdict::execDICT_ABORT_REF(Signal* signal){ - jamEntry(); - +Dbdict::execDICT_ABORT_REF(Signal* signal) +{ DictAbortRef * const ref = (DictAbortRef*)signal->getDataPtr(); - Ptr trans_ptr; + + jamEntry(); ndbrequire(c_Trans.find(trans_ptr, ref->senderData)); - if(ref->errorCode != DictAbortRef::NF_FakeErrorREF){ + jam(); trans_ptr.p->setErrorCode(ref->errorCode); } Uint32 node = refToNode(ref->senderRef); @@ -14871,31 +14912,28 @@ Dbdict::execDICT_ABORT_REF(Signal* signal){ } void -Dbdict::execDICT_ABORT_CONF(Signal* signal){ - jamEntry(); - +Dbdict::execDICT_ABORT_CONF(Signal* signal) +{ DictAbortConf * const conf = (DictAbortConf*)signal->getDataPtr(); - Ptr trans_ptr; + + jamEntry(); ndbrequire(c_Trans.find(trans_ptr, conf->senderData)); schemaOp_reply(signal, trans_ptr.p, refToNode(conf->senderRef)); } - - void Dbdict::createObj_prepare_start_done(Signal* signal, Uint32 callbackData, - Uint32 returnCode){ + Uint32 returnCode) +{ + CreateObjRecordPtr createObjPtr; + SegmentedSectionPtr objInfoPtr; ndbrequire(returnCode == 0); - - CreateObjRecordPtr createObjPtr; ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - - SegmentedSectionPtr objInfoPtr; + jam(); getSection(objInfoPtr, createObjPtr.p->m_obj_info_ptr_i); - if(createObjPtr.p->m_errorCode != 0){ jam(); createObjPtr.p->m_obj_info_ptr_i= RNIL; @@ -14923,19 +14961,19 @@ Dbdict::createObj_prepare_start_done(Signal* signal, void Dbdict::createObj_writeSchemaConf1(Signal* signal, Uint32 callbackData, - Uint32 returnCode){ - jam(); - - ndbrequire(returnCode == 0); - + Uint32 returnCode) +{ CreateObjRecordPtr createObjPtr; + Callback callback; + SegmentedSectionPtr objInfoPtr; + + jam(); + ndbrequire(returnCode == 0); ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - Callback callback; callback.m_callbackData = createObjPtr.p->key; callback.m_callbackFunction = safe_cast(&Dbdict::createObj_writeObjConf); - SegmentedSectionPtr objInfoPtr; getSection(objInfoPtr, createObjPtr.p->m_obj_info_ptr_i); writeTableFile(signal, createObjPtr.p->m_obj_id, objInfoPtr, &callback); @@ -14947,14 +14985,13 @@ Dbdict::createObj_writeSchemaConf1(Signal* signal, void Dbdict::createObj_writeObjConf(Signal* signal, Uint32 callbackData, - Uint32 returnCode){ + Uint32 returnCode) +{ + CreateObjRecordPtr createObjPtr; + jam(); - ndbrequire(returnCode == 0); - - CreateObjRecordPtr createObjPtr; ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - createObjPtr.p->m_callback.m_callbackFunction = safe_cast(&Dbdict::createObj_prepare_complete_done); (this->*f_dict_op[createObjPtr.p->m_vt_index].m_prepare_complete) @@ -14964,12 +15001,12 @@ Dbdict::createObj_writeObjConf(Signal* signal, void Dbdict::createObj_prepare_complete_done(Signal* signal, Uint32 callbackData, - Uint32 returnCode){ + Uint32 returnCode) +{ + CreateObjRecordPtr createObjPtr; + jam(); - ndbrequire(returnCode == 0); - - CreateObjRecordPtr createObjPtr; ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); //@todo check for master failed @@ -14998,28 +15035,33 @@ Dbdict::createObj_prepare_complete_done(Signal* signal, } void -Dbdict::createObj_commit(Signal * signal, SchemaOp * op){ - jam(); - +Dbdict::createObj_commit(Signal * signal, SchemaOp * op) +{ OpCreateObj * createObj = (OpCreateObj*)op; + createObj->m_callback.m_callbackFunction = safe_cast(&Dbdict::createObj_commit_start_done); if (f_dict_op[createObj->m_vt_index].m_commit_start) + { + jam(); (this->*f_dict_op[createObj->m_vt_index].m_commit_start)(signal, createObj); + } else + { + jam(); execute(signal, createObj->m_callback, 0); + } } void Dbdict::createObj_commit_start_done(Signal* signal, Uint32 callbackData, - Uint32 returnCode){ + Uint32 returnCode) +{ + CreateObjRecordPtr createObjPtr; jam(); - ndbrequire(returnCode == 0); - - CreateObjRecordPtr createObjPtr; ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); Uint32 objId = createObjPtr.p->m_obj_id; @@ -15039,29 +15081,35 @@ Dbdict::createObj_commit_start_done(Signal* signal, void Dbdict::createObj_writeSchemaConf2(Signal* signal, Uint32 callbackData, - Uint32 returnCode){ - jam(); - - CreateObjRecordPtr createObjPtr; - ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); + Uint32 returnCode) +{ + CreateObjRecordPtr createObjPtr; + ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); createObjPtr.p->m_callback.m_callbackFunction = safe_cast(&Dbdict::createObj_commit_complete_done); if (f_dict_op[createObjPtr.p->m_vt_index].m_commit_complete) + { + jam(); (this->*f_dict_op[createObjPtr.p->m_vt_index].m_commit_complete) (signal, createObjPtr.p); + } else + { + jam(); execute(signal, createObjPtr.p->m_callback, 0); + } } void Dbdict::createObj_commit_complete_done(Signal* signal, Uint32 callbackData, - Uint32 returnCode){ + Uint32 returnCode) +{ + CreateObjRecordPtr createObjPtr; + jam(); - - CreateObjRecordPtr createObjPtr; ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); //@todo check error @@ -15079,27 +15127,31 @@ Dbdict::createObj_commit_complete_done(Signal* signal, void Dbdict::createObj_abort(Signal* signal, SchemaOp* op) { - jam(); - OpCreateObj * createObj = (OpCreateObj*)op; createObj->m_callback.m_callbackFunction = safe_cast(&Dbdict::createObj_abort_start_done); if (f_dict_op[createObj->m_vt_index].m_abort_start) + { + jam(); (this->*f_dict_op[createObj->m_vt_index].m_abort_start)(signal, createObj); + } else + { + jam(); execute(signal, createObj->m_callback, 0); + } } void Dbdict::createObj_abort_start_done(Signal* signal, Uint32 callbackData, - Uint32 returnCode){ + Uint32 returnCode) +{ + CreateObjRecordPtr createObjPtr; + jam(); - - CreateObjRecordPtr createObjPtr; ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; SchemaFile::TableEntry objEntry = * getTableEntry(xsf, createObjPtr.p->m_obj_id); @@ -15118,19 +15170,23 @@ Dbdict::createObj_abort_writeSchemaConf(Signal* signal, Uint32 callbackData, Uint32 returnCode) { - jam(); + CreateObjRecordPtr createObjPtr; - CreateObjRecordPtr createObjPtr; ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - createObjPtr.p->m_callback.m_callbackFunction = safe_cast(&Dbdict::createObj_abort_complete_done); if (f_dict_op[createObjPtr.p->m_vt_index].m_abort_complete) + { + jam(); (this->*f_dict_op[createObjPtr.p->m_vt_index].m_abort_complete) (signal, createObjPtr.p); + } else + { + jam(); execute(signal, createObjPtr.p->m_callback, 0); + } } void @@ -15138,9 +15194,9 @@ Dbdict::createObj_abort_complete_done(Signal* signal, Uint32 callbackData, Uint32 returnCode) { - jam(); + CreateObjRecordPtr createObjPtr; - CreateObjRecordPtr createObjPtr; + jam(); ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); DictAbortConf * const conf = (DictAbortConf*)signal->getDataPtr(); @@ -15153,7 +15209,8 @@ Dbdict::createObj_abort_complete_done(Signal* signal, } void -Dbdict::execDROP_OBJ_REQ(Signal* signal){ +Dbdict::execDROP_OBJ_REQ(Signal* signal) +{ jamEntry(); if(!assembleFragments(signal)){ @@ -15191,8 +15248,9 @@ Dbdict::execDROP_OBJ_REQ(Signal* signal){ case DictTabInfo::Tablespace: case DictTabInfo::LogfileGroup: { - dropObjPtr.p->m_vt_index = 3; Ptr fg_ptr; + jam(); + dropObjPtr.p->m_vt_index = 3; ndbrequire(c_filegroup_hash.find(fg_ptr, objId)); dropObjPtr.p->m_obj_ptr_i = fg_ptr.i; break; @@ -15200,15 +15258,19 @@ Dbdict::execDROP_OBJ_REQ(Signal* signal){ } case DictTabInfo::Datafile: { - dropObjPtr.p->m_vt_index = 2; Ptr file_ptr; + jam(); + dropObjPtr.p->m_vt_index = 2; ndbrequire(c_file_hash.find(file_ptr, objId)); dropObjPtr.p->m_obj_ptr_i = file_ptr.i; break; } case DictTabInfo::Undofile: + { + jam(); dropObjPtr.p->m_vt_index = 4; return; + } default: ndbrequire(false); } @@ -15223,12 +15285,12 @@ Dbdict::dropObj_prepare_start_done(Signal* signal, Uint32 callbackData, Uint32 returnCode) { - ndbrequire(returnCode == 0); + DropObjRecordPtr dropObjPtr; + Callback cb; - DropObjRecordPtr dropObjPtr; + ndbrequire(returnCode == 0); ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - Callback cb; cb.m_callbackData = callbackData; cb.m_callbackFunction = safe_cast(&Dbdict::dropObj_prepare_writeSchemaConf); @@ -15239,7 +15301,7 @@ Dbdict::dropObj_prepare_start_done(Signal* signal, dropObj_prepare_complete_done(signal, callbackData, 0); return; } - + jam(); Uint32 objId = dropObjPtr.p->m_obj_id; XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; SchemaFile::TableEntry objEntry = *getTableEntry(xsf, objId); @@ -15252,19 +15314,23 @@ Dbdict::dropObj_prepare_writeSchemaConf(Signal* signal, Uint32 callbackData, Uint32 returnCode) { - ndbrequire(returnCode == 0); + DropObjRecordPtr dropObjPtr; - DropObjRecordPtr dropObjPtr; + ndbrequire(returnCode == 0); ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - dropObjPtr.p->m_callback.m_callbackFunction = safe_cast(&Dbdict::dropObj_prepare_complete_done); - if(f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_complete) + { + jam(); (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_complete) (signal, dropObjPtr.p); + } else + { + jam(); execute(signal, dropObjPtr.p->m_callback, 0); + } } void @@ -15272,10 +15338,11 @@ Dbdict::dropObj_prepare_complete_done(Signal* signal, Uint32 callbackData, Uint32 returnCode) { + DropObjRecordPtr dropObjPtr; + ndbrequire(returnCode == 0); - - DropObjRecordPtr dropObjPtr; ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); + jam(); //@todo check for master failed @@ -15301,16 +15368,22 @@ Dbdict::dropObj_prepare_complete_done(Signal* signal, } void -Dbdict::dropObj_commit(Signal * signal, SchemaOp * op){ - jam(); - +Dbdict::dropObj_commit(Signal * signal, SchemaOp * op) +{ OpDropObj * dropObj = (OpDropObj*)op; + dropObj->m_callback.m_callbackFunction = safe_cast(&Dbdict::dropObj_commit_start_done); if (f_dict_op[dropObj->m_vt_index].m_commit_start) + { + jam(); (this->*f_dict_op[dropObj->m_vt_index].m_commit_start)(signal, dropObj); + } else + { + jam(); execute(signal, dropObj->m_callback, 0); + } } void @@ -15318,10 +15391,10 @@ Dbdict::dropObj_commit_start_done(Signal* signal, Uint32 callbackData, Uint32 returnCode) { + DropObjRecordPtr dropObjPtr; + jam(); ndbrequire(returnCode == 0); - - DropObjRecordPtr dropObjPtr; ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); Uint32 objId = dropObjPtr.p->m_obj_id; @@ -15342,20 +15415,25 @@ Dbdict::dropObj_commit_writeSchemaConf(Signal* signal, Uint32 callbackData, Uint32 returnCode) { + DropObjRecordPtr dropObjPtr; + jam(); ndbrequire(returnCode == 0); - - DropObjRecordPtr dropObjPtr; ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - dropObjPtr.p->m_callback.m_callbackFunction = safe_cast(&Dbdict::dropObj_commit_complete_done); if(f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete) + { + jam(); (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete) (signal, dropObjPtr.p); + } else + { + jam(); execute(signal, dropObjPtr.p->m_callback, 0); + } } void @@ -15363,7 +15441,9 @@ Dbdict::dropObj_commit_complete_done(Signal* signal, Uint32 callbackData, Uint32 returnCode) { - DropObjRecordPtr dropObjPtr; + DropObjRecordPtr dropObjPtr; + + jam(); ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); //@todo check error @@ -15374,22 +15454,26 @@ Dbdict::dropObj_commit_complete_done(Signal* signal, conf->senderData = dropObjPtr.p->m_senderData; sendSignal(dropObjPtr.p->m_senderRef, GSN_DICT_COMMIT_CONF, signal, DictCommitConf::SignalLength, JBB); - c_opDropObj.release(dropObjPtr); } void -Dbdict::dropObj_abort(Signal * signal, SchemaOp * op){ - jam(); - +Dbdict::dropObj_abort(Signal * signal, SchemaOp * op) +{ OpDropObj * dropObj = (OpDropObj*)op; + dropObj->m_callback.m_callbackFunction = safe_cast(&Dbdict::dropObj_abort_start_done); - if (f_dict_op[dropObj->m_vt_index].m_abort_start) + { + jam(); (this->*f_dict_op[dropObj->m_vt_index].m_abort_start)(signal, dropObj); + } else + { + jam(); execute(signal, dropObj->m_callback, 0); + } } void @@ -15397,10 +15481,10 @@ Dbdict::dropObj_abort_start_done(Signal* signal, Uint32 callbackData, Uint32 returnCode) { + DropObjRecordPtr dropObjPtr; + jam(); ndbrequire(returnCode == 0); - - DropObjRecordPtr dropObjPtr; ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; @@ -15421,6 +15505,7 @@ Dbdict::dropObj_abort_start_done(Signal* signal, } else { + jam(); execute(signal, callback, 0); } } @@ -15430,20 +15515,24 @@ Dbdict::dropObj_abort_writeSchemaConf(Signal* signal, Uint32 callbackData, Uint32 returnCode) { - jam(); + DropObjRecordPtr dropObjPtr; + ndbrequire(returnCode == 0); - - DropObjRecordPtr dropObjPtr; ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - dropObjPtr.p->m_callback.m_callbackFunction = safe_cast(&Dbdict::dropObj_abort_complete_done); if(f_dict_op[dropObjPtr.p->m_vt_index].m_abort_complete) + { + jam(); (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_abort_complete) (signal, dropObjPtr.p); + } else + { + jam(); execute(signal, dropObjPtr.p->m_callback, 0); + } } void @@ -15451,24 +15540,26 @@ Dbdict::dropObj_abort_complete_done(Signal* signal, Uint32 callbackData, Uint32 returnCode) { - DropObjRecordPtr dropObjPtr; - ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - + DropObjRecordPtr dropObjPtr; DictAbortConf * const conf = (DictAbortConf*)signal->getDataPtr(); + + ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); + jam(); conf->senderRef = reference(); conf->senderData = dropObjPtr.p->m_senderData; sendSignal(dropObjPtr.p->m_senderRef, GSN_DICT_ABORT_CONF, signal, DictAbortConf::SignalLength, JBB); - c_opDropObj.release(dropObjPtr); } void -Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){ +Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op) +{ /** * Put data into table record */ SegmentedSectionPtr objInfoPtr; + jam(); getSection(objInfoPtr, ((OpCreateObj*)op)->m_obj_info_ptr_i); SimplePropertiesSectionReader it(objInfoPtr, getSectionSegmentPool()); @@ -15485,6 +15576,7 @@ Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){ if(status != SimpleProperties::Eof) { + jam(); op->m_errorCode = CreateTableRef::InvalidFormat; break; } @@ -15493,6 +15585,7 @@ Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){ { if(!fg.TS_ExtentSize) { + jam(); op->m_errorCode = CreateFilegroupRef::InvalidExtentSize; break; } @@ -15504,6 +15597,7 @@ Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){ */ if(fg.LF_UndoBufferSize < 3 * File_formats::NDB_PAGE_SIZE) { + jam(); op->m_errorCode = CreateFilegroupRef::InvalidUndoBufferSize; break; } @@ -15512,16 +15606,19 @@ Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){ Uint32 len = strlen(fg.FilegroupName) + 1; Uint32 hash = Rope::hash(fg.FilegroupName, len); if(get_object(fg.FilegroupName, len, hash) != 0){ + jam(); op->m_errorCode = CreateTableRef::TableAlreadyExist; break; } if(!c_obj_pool.seize(obj_ptr)){ + jam(); op->m_errorCode = CreateTableRef::NoMoreTableRecords; break; } if(!c_filegroup_pool.seize(fg_ptr)){ + jam(); op->m_errorCode = CreateTableRef::NoMoreTableRecords; break; } @@ -15531,6 +15628,7 @@ Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){ { Rope name(c_rope_pool, obj_ptr.p->m_name); if(!name.assign(fg.FilegroupName, len, hash)){ + jam(); op->m_errorCode = CreateTableRef::OutOfStringBuffer; break; } @@ -15544,6 +15642,7 @@ Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){ switch(fg.FilegroupType){ case DictTabInfo::Tablespace: + { //fg.TS_DataGrow = group.m_grow_spec; fg_ptr.p->m_tablespace.m_extent_size = fg.TS_ExtentSize; fg_ptr.p->m_tablespace.m_default_logfile_group_id = fg.TS_LogfileGroupId; @@ -15551,22 +15650,28 @@ Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){ Ptr lg_ptr; if (!c_filegroup_hash.find(lg_ptr, fg.TS_LogfileGroupId)) { + jam(); op->m_errorCode = CreateFilegroupRef::NoSuchLogfileGroup; goto error; } if (lg_ptr.p->m_version != fg.TS_LogfileGroupVersion) { + jam(); op->m_errorCode = CreateFilegroupRef::InvalidFilegroupVersion; goto error; } increase_ref_count(lg_ptr.p->m_obj_ptr_i); break; + } case DictTabInfo::LogfileGroup: + { + jam(); fg_ptr.p->m_logfilegroup.m_undo_buffer_size = fg.LF_UndoBufferSize; fg_ptr.p->m_logfilegroup.m_files.init(); //fg.LF_UndoGrow = ; break; + } default: ndbrequire(false); } @@ -15601,13 +15706,14 @@ error: } void -Dbdict::create_fg_prepare_complete(Signal* signal, SchemaOp* op){ +Dbdict::create_fg_prepare_complete(Signal* signal, SchemaOp* op) +{ /** * CONTACT TSMAN LGMAN PGMAN */ CreateFilegroupImplReq* req = (CreateFilegroupImplReq*)signal->getDataPtrSend(); - + jam(); req->senderData = op->key; req->senderRef = reference(); req->filegroup_id = op->m_obj_id; @@ -15620,18 +15726,24 @@ Dbdict::create_fg_prepare_complete(Signal* signal, SchemaOp* op){ Uint32 len= 0; switch(op->m_obj_type){ case DictTabInfo::Tablespace: + { + jam(); ref = TSMAN_REF; len = CreateFilegroupImplReq::TablespaceLength; req->tablespace.extent_size = fg_ptr.p->m_tablespace.m_extent_size; req->tablespace.logfile_group_id = fg_ptr.p->m_tablespace.m_default_logfile_group_id; break; + } case DictTabInfo::LogfileGroup: + { + jam(); ref = LGMAN_REF; len = CreateFilegroupImplReq::LogfileGroupLength; req->logfile_group.buffer_size = fg_ptr.p->m_logfilegroup.m_undo_buffer_size; break; + } default: ndbrequire(false); } @@ -15640,12 +15752,11 @@ Dbdict::create_fg_prepare_complete(Signal* signal, SchemaOp* op){ } void -Dbdict::execCREATE_FILEGROUP_REF(Signal* signal){ - jamEntry(); - +Dbdict::execCREATE_FILEGROUP_REF(Signal* signal) +{ CreateFilegroupImplRef * ref = (CreateFilegroupImplRef*)signal->getDataPtr(); - CreateObjRecordPtr op_ptr; + jamEntry(); ndbrequire(c_opCreateObj.find(op_ptr, ref->senderData)); op_ptr.p->m_errorCode = ref->errorCode; @@ -15653,13 +15764,12 @@ Dbdict::execCREATE_FILEGROUP_REF(Signal* signal){ } void -Dbdict::execCREATE_FILEGROUP_CONF(Signal* signal){ - jamEntry(); - +Dbdict::execCREATE_FILEGROUP_CONF(Signal* signal) +{ CreateFilegroupImplConf * rep = (CreateFilegroupImplConf*)signal->getDataPtr(); - CreateObjRecordPtr op_ptr; + jamEntry(); ndbrequire(c_opCreateObj.find(op_ptr, rep->senderData)); execute(signal, op_ptr.p->m_callback, 0); @@ -15675,13 +15785,13 @@ Dbdict::create_fg_abort_start(Signal* signal, SchemaOp* op){ send_drop_fg(signal, op, DropFilegroupImplReq::Commit); return; } - + jam(); execute(signal, op->m_callback, 0); } void -Dbdict::create_fg_abort_complete(Signal* signal, SchemaOp* op){ - +Dbdict::create_fg_abort_complete(Signal* signal, SchemaOp* op) +{ if (op->m_obj_ptr_i != RNIL) { jam(); @@ -15691,12 +15801,13 @@ Dbdict::create_fg_abort_complete(Signal* signal, SchemaOp* op){ release_object(fg_ptr.p->m_obj_ptr_i); c_filegroup_hash.release(fg_ptr); } - + jam(); execute(signal, op->m_callback, 0); } void -Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){ +Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op) +{ /** * Put data into table record */ @@ -15716,6 +15827,7 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){ do { if(status != SimpleProperties::Eof){ + jam(); op->m_errorCode = CreateFileRef::InvalidFormat; break; } @@ -15723,34 +15835,53 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){ // Get Filegroup FilegroupPtr fg_ptr; if(!c_filegroup_hash.find(fg_ptr, f.FilegroupId)){ + jam(); op->m_errorCode = CreateFileRef::NoSuchFilegroup; break; } if(fg_ptr.p->m_version != f.FilegroupVersion){ + jam(); op->m_errorCode = CreateFileRef::InvalidFilegroupVersion; break; } switch(f.FileType){ case DictTabInfo::Datafile: + { if(fg_ptr.p->m_type != DictTabInfo::Tablespace) + { + jam(); op->m_errorCode = CreateFileRef::InvalidFileType; + } + jam(); break; + } case DictTabInfo::Undofile: + { if(fg_ptr.p->m_type != DictTabInfo::LogfileGroup) + { + jam(); op->m_errorCode = CreateFileRef::InvalidFileType; + } + jam(); break; + } default: + jam(); op->m_errorCode = CreateFileRef::InvalidFileType; } if(op->m_errorCode) + { + jam(); break; + } Uint32 len = strlen(f.FileName) + 1; Uint32 hash = Rope::hash(f.FileName, len); if(get_object(f.FileName, len, hash) != 0){ + jam(); op->m_errorCode = CreateFileRef::FilenameAlreadyExists; break; } @@ -15761,6 +15892,7 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){ m_ctx.m_config.getOwnConfigIterator(); if(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &dl) && dl) { + jam(); op->m_errorCode = CreateFileRef::NotSupportedWhenDiskless; break; } @@ -15768,11 +15900,13 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){ // Loop through all filenames... if(!c_obj_pool.seize(obj_ptr)){ + jam(); op->m_errorCode = CreateTableRef::NoMoreTableRecords; break; } if (! c_file_pool.seize(filePtr)){ + jam(); op->m_errorCode = CreateFileRef::OutOfFileRecords; break; } @@ -15782,6 +15916,7 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){ { Rope name(c_rope_pool, obj_ptr.p->m_name); if(!name.assign(f.FileName, len, hash)){ + jam(); op->m_errorCode = CreateTableRef::OutOfStringBuffer; break; } @@ -15789,10 +15924,14 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){ switch(fg_ptr.p->m_type){ case DictTabInfo::Tablespace: + { + jam(); increase_ref_count(fg_ptr.p->m_obj_ptr_i); break; + } case DictTabInfo::LogfileGroup: { + jam(); Local_file_list list(c_file_pool, fg_ptr.p->m_logfilegroup.m_files); list.add(filePtr); break; @@ -15836,37 +15975,46 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){ c_obj_pool.release(obj_ptr); } } - execute(signal, op->m_callback, 0); } void -Dbdict::create_file_prepare_complete(Signal* signal, SchemaOp* op){ +Dbdict::create_file_prepare_complete(Signal* signal, SchemaOp* op) +{ /** * CONTACT TSMAN LGMAN PGMAN */ CreateFileImplReq* req = (CreateFileImplReq*)signal->getDataPtrSend(); - FilePtr f_ptr; - c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); - FilegroupPtr fg_ptr; + + jam(); + c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); req->senderData = op->key; req->senderRef = reference(); switch(((OpCreateObj*)op)->m_restart){ case 0: + { + jam(); req->requestInfo = CreateFileImplReq::Create; break; + } case 1: + { + jam(); req->requestInfo = CreateFileImplReq::Open; break; + } case 2: + { + jam(); req->requestInfo = CreateFileImplReq::CreateForce; break; } + } req->file_id = f_ptr.p->key; req->filegroup_id = f_ptr.p->m_filegroup_id; @@ -15878,14 +16026,20 @@ Dbdict::create_file_prepare_complete(Signal* signal, SchemaOp* op){ Uint32 len= 0; switch(op->m_obj_type){ case DictTabInfo::Datafile: + { + jam(); ref = TSMAN_REF; len = CreateFileImplReq::DatafileLength; req->tablespace.extent_size = fg_ptr.p->m_tablespace.m_extent_size; break; + } case DictTabInfo::Undofile: + { + jam(); ref = LGMAN_REF; len = CreateFileImplReq::UndofileLength; break; + } default: ndbrequire(false); } @@ -15900,42 +16054,41 @@ Dbdict::create_file_prepare_complete(Signal* signal, SchemaOp* op){ } void -Dbdict::execCREATE_FILE_REF(Signal* signal){ - jamEntry(); - +Dbdict::execCREATE_FILE_REF(Signal* signal) +{ CreateFileImplRef * ref = (CreateFileImplRef*)signal->getDataPtr(); - CreateObjRecordPtr op_ptr; + + jamEntry(); ndbrequire(c_opCreateObj.find(op_ptr, ref->senderData)); op_ptr.p->m_errorCode = ref->errorCode; - execute(signal, op_ptr.p->m_callback, 0); } void -Dbdict::execCREATE_FILE_CONF(Signal* signal){ - jamEntry(); - +Dbdict::execCREATE_FILE_CONF(Signal* signal) +{ CreateFileImplConf * rep = (CreateFileImplConf*)signal->getDataPtr(); - CreateObjRecordPtr op_ptr; + + jamEntry(); ndbrequire(c_opCreateObj.find(op_ptr, rep->senderData)); - execute(signal, op_ptr.p->m_callback, 0); } void -Dbdict::create_file_commit_start(Signal* signal, SchemaOp* op){ +Dbdict::create_file_commit_start(Signal* signal, SchemaOp* op) +{ /** * CONTACT TSMAN LGMAN PGMAN */ CreateFileImplReq* req = (CreateFileImplReq*)signal->getDataPtrSend(); - FilePtr f_ptr; - c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); - FilegroupPtr fg_ptr; + + jam(); + c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); req->senderData = op->key; @@ -15949,15 +16102,20 @@ Dbdict::create_file_commit_start(Signal* signal, SchemaOp* op){ Uint32 ref= 0; switch(op->m_obj_type){ case DictTabInfo::Datafile: + { + jam(); ref = TSMAN_REF; break; + } case DictTabInfo::Undofile: + { + jam(); ref = LGMAN_REF; break; + } default: ndbrequire(false); } - sendSignal(ref, GSN_CREATE_FILE_REQ, signal, CreateFileImplReq::CommitLength, JBB); } @@ -15970,9 +16128,11 @@ Dbdict::create_file_abort_start(Signal* signal, SchemaOp* op) if (op->m_obj_ptr_i != RNIL) { FilePtr f_ptr; + FilegroupPtr fg_ptr; + + jam(); c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); - FilegroupPtr fg_ptr; ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); req->senderData = op->key; @@ -15986,20 +16146,24 @@ Dbdict::create_file_abort_start(Signal* signal, SchemaOp* op) Uint32 ref= 0; switch(op->m_obj_type){ case DictTabInfo::Datafile: + { + jam(); ref = TSMAN_REF; break; + } case DictTabInfo::Undofile: + { + jam(); ref = LGMAN_REF; break; + } default: ndbrequire(false); } - sendSignal(ref, GSN_CREATE_FILE_REQ, signal, CreateFileImplReq::AbortLength, JBB); return; } - execute(signal, op->m_callback, 0); } @@ -16009,17 +16173,21 @@ Dbdict::create_file_abort_complete(Signal* signal, SchemaOp* op) if (op->m_obj_ptr_i != RNIL) { FilePtr f_ptr; - c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); - FilegroupPtr fg_ptr; + + jam(); + c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); - switch(fg_ptr.p->m_type){ case DictTabInfo::Tablespace: + { + jam(); decrease_ref_count(fg_ptr.p->m_obj_ptr_i); break; + } case DictTabInfo::LogfileGroup: { + jam(); Local_file_list list(c_file_pool, fg_ptr.p->m_logfilegroup.m_files); list.remove(f_ptr); break; @@ -16031,19 +16199,20 @@ Dbdict::create_file_abort_complete(Signal* signal, SchemaOp* op) release_object(f_ptr.p->m_obj_ptr_i); c_file_hash.release(f_ptr); } - execute(signal, op->m_callback, 0); } void Dbdict::drop_file_prepare_start(Signal* signal, SchemaOp* op) { + jam(); send_drop_file(signal, op, DropFileImplReq::Prepare); } void Dbdict::drop_undofile_prepare_start(Signal* signal, SchemaOp* op) { + jam(); op->m_errorCode = DropFileRef::DropUndoFileNotSupported; execute(signal, op->m_callback, 0); } @@ -16051,6 +16220,7 @@ Dbdict::drop_undofile_prepare_start(Signal* signal, SchemaOp* op) void Dbdict::drop_file_commit_start(Signal* signal, SchemaOp* op) { + jam(); send_drop_file(signal, op, DropFileImplReq::Commit); } @@ -16058,21 +16228,21 @@ void Dbdict::drop_file_commit_complete(Signal* signal, SchemaOp* op) { FilePtr f_ptr; - c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); - FilegroupPtr fg_ptr; - ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); + jam(); + c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); + ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); decrease_ref_count(fg_ptr.p->m_obj_ptr_i); release_object(f_ptr.p->m_obj_ptr_i); c_file_hash.release(f_ptr); - execute(signal, op->m_callback, 0); } void Dbdict::drop_file_abort_start(Signal* signal, SchemaOp* op) { + jam(); send_drop_file(signal, op, DropFileImplReq::Abort); } @@ -16081,11 +16251,11 @@ Dbdict::send_drop_file(Signal* signal, SchemaOp* op, DropFileImplReq::RequestInfo type) { DropFileImplReq* req = (DropFileImplReq*)signal->getDataPtrSend(); - FilePtr f_ptr; - c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); - FilegroupPtr fg_ptr; + + jam(); + c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); req->senderData = op->key; @@ -16099,29 +16269,34 @@ Dbdict::send_drop_file(Signal* signal, SchemaOp* op, Uint32 ref= 0; switch(op->m_obj_type){ case DictTabInfo::Datafile: + { + jam(); ref = TSMAN_REF; break; + } case DictTabInfo::Undofile: + { + jam(); ref = LGMAN_REF; break; + } default: ndbrequire(false); } - sendSignal(ref, GSN_DROP_FILE_REQ, signal, DropFileImplReq::SignalLength, JBB); } void -Dbdict::execDROP_OBJ_REF(Signal* signal){ - jamEntry(); - +Dbdict::execDROP_OBJ_REF(Signal* signal) +{ DropObjRef * const ref = (DropObjRef*)signal->getDataPtr(); - Ptr trans_ptr; + + jamEntry(); ndbrequire(c_Trans.find(trans_ptr, ref->senderData)); - if(ref->errorCode != DropObjRef::NF_FakeErrorREF){ + jam(); trans_ptr.p->setErrorCode(ref->errorCode); } Uint32 node = refToNode(ref->senderRef); @@ -16129,65 +16304,61 @@ Dbdict::execDROP_OBJ_REF(Signal* signal){ } void -Dbdict::execDROP_OBJ_CONF(Signal* signal){ - jamEntry(); - +Dbdict::execDROP_OBJ_CONF(Signal* signal) +{ DropObjConf * const conf = (DropObjConf*)signal->getDataPtr(); - Ptr trans_ptr; + + jamEntry(); ndbrequire(c_Trans.find(trans_ptr, conf->senderData)); schemaOp_reply(signal, trans_ptr.p, refToNode(conf->senderRef)); } void -Dbdict::execDROP_FILE_REF(Signal* signal){ - jamEntry(); - +Dbdict::execDROP_FILE_REF(Signal* signal) +{ DropFileImplRef * ref = (DropFileImplRef*)signal->getDataPtr(); - DropObjRecordPtr op_ptr; + + jamEntry(); ndbrequire(c_opDropObj.find(op_ptr, ref->senderData)); op_ptr.p->m_errorCode = ref->errorCode; - execute(signal, op_ptr.p->m_callback, 0); } void -Dbdict::execDROP_FILE_CONF(Signal* signal){ - jamEntry(); - +Dbdict::execDROP_FILE_CONF(Signal* signal) +{ DropFileImplConf * rep = (DropFileImplConf*)signal->getDataPtr(); - DropObjRecordPtr op_ptr; + + jamEntry(); ndbrequire(c_opDropObj.find(op_ptr, rep->senderData)); - execute(signal, op_ptr.p->m_callback, 0); } void -Dbdict::execDROP_FILEGROUP_REF(Signal* signal){ - jamEntry(); - +Dbdict::execDROP_FILEGROUP_REF(Signal* signal) +{ DropFilegroupImplRef * ref = (DropFilegroupImplRef*)signal->getDataPtr(); - DropObjRecordPtr op_ptr; + + jamEntry(); ndbrequire(c_opDropObj.find(op_ptr, ref->senderData)); op_ptr.p->m_errorCode = ref->errorCode; - execute(signal, op_ptr.p->m_callback, 0); } void -Dbdict::execDROP_FILEGROUP_CONF(Signal* signal){ - jamEntry(); - +Dbdict::execDROP_FILEGROUP_CONF(Signal* signal) +{ DropFilegroupImplConf * rep = (DropFilegroupImplConf*)signal->getDataPtr(); - DropObjRecordPtr op_ptr; + + jamEntry(); ndbrequire(c_opDropObj.find(op_ptr, rep->senderData)); - execute(signal, op_ptr.p->m_callback, 0); } @@ -16200,11 +16371,13 @@ Dbdict::drop_fg_prepare_start(Signal* signal, SchemaOp* op) DictObject * obj = c_obj_pool.getPtr(fg_ptr.p->m_obj_ptr_i); if (obj->m_ref_count) { + jam(); op->m_errorCode = DropFilegroupRef::FilegroupInUse; execute(signal, op->m_callback, 0); } else { + jam(); send_drop_fg(signal, op, DropFilegroupImplReq::Prepare); } } @@ -16216,7 +16389,7 @@ Dbdict::drop_fg_commit_start(Signal* signal, SchemaOp* op) c_filegroup_pool.getPtr(fg_ptr, op->m_obj_ptr_i); if (op->m_obj_type == DictTabInfo::LogfileGroup) { - + jam(); /** * Mark all undofiles as dropped */ @@ -16225,6 +16398,7 @@ Dbdict::drop_fg_commit_start(Signal* signal, SchemaOp* op) XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; for(list.first(filePtr); !filePtr.isNull(); list.next(filePtr)) { + jam(); Uint32 objId = filePtr.p->key; SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, objId); tableEntry->m_tableState = SchemaFile::DROP_TABLE_COMMITTED; @@ -16237,13 +16411,14 @@ Dbdict::drop_fg_commit_start(Signal* signal, SchemaOp* op) else if(op->m_obj_type == DictTabInfo::Tablespace) { FilegroupPtr lg_ptr; + jam(); ndbrequire(c_filegroup_hash. find(lg_ptr, fg_ptr.p->m_tablespace.m_default_logfile_group_id)); decrease_ref_count(lg_ptr.p->m_obj_ptr_i); } - + jam(); send_drop_fg(signal, op, DropFilegroupImplReq::Commit); } @@ -16252,16 +16427,17 @@ Dbdict::drop_fg_commit_complete(Signal* signal, SchemaOp* op) { FilegroupPtr fg_ptr; c_filegroup_pool.getPtr(fg_ptr, op->m_obj_ptr_i); - + + jam(); release_object(fg_ptr.p->m_obj_ptr_i); c_filegroup_hash.release(fg_ptr); - execute(signal, op->m_callback, 0); } void Dbdict::drop_fg_abort_start(Signal* signal, SchemaOp* op) { + jam(); send_drop_fg(signal, op, DropFilegroupImplReq::Abort); } diff --git a/storage/ndb/src/kernel/blocks/lgman.cpp b/storage/ndb/src/kernel/blocks/lgman.cpp index 82fed94f62e..4af27e25124 100644 --- a/storage/ndb/src/kernel/blocks/lgman.cpp +++ b/storage/ndb/src/kernel/blocks/lgman.cpp @@ -462,7 +462,8 @@ Lgman::drop_filegroup_drop_files(Signal* signal, } void -Lgman::execCREATE_FILE_REQ(Signal* signal){ +Lgman::execCREATE_FILE_REQ(Signal* signal) +{ jamEntry(); CreateFileImplReq* req= (CreateFileImplReq*)signal->getDataPtr(); @@ -491,6 +492,7 @@ Lgman::execCREATE_FILE_REQ(Signal* signal){ switch(requestInfo){ case CreateFileImplReq::Commit: { + jam(); ndbrequire(find_file_by_id(file_ptr, ptr.p->m_meta_files, req->file_id)); file_ptr.p->m_create.m_senderRef = req->senderRef; file_ptr.p->m_create.m_senderData = req->senderData; @@ -503,6 +505,7 @@ Lgman::execCREATE_FILE_REQ(Signal* signal){ Uint32 senderData = req->senderData; if (find_file_by_id(file_ptr, ptr.p->m_meta_files, req->file_id)) { + jam(); file_ptr.p->m_create.m_senderRef = senderRef; file_ptr.p->m_create.m_senderData = senderData; create_file_abort(signal, ptr, file_ptr); @@ -510,11 +513,11 @@ Lgman::execCREATE_FILE_REQ(Signal* signal){ else { CreateFileImplConf* conf= (CreateFileImplConf*)signal->getDataPtr(); + jam(); conf->senderData = senderData; conf->senderRef = reference(); sendSignal(senderRef, GSN_CREATE_FILE_CONF, signal, CreateFileImplConf::SignalLength, JBB); - return; } return; } From 18e6090c376e4347a3b2041244e2b1a2fb269c2f Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 11 May 2007 08:07:42 +0200 Subject: [PATCH 05/21] Bug#25818 No return of NDB share object in failures in open method - make sure resources are release properly on error --- sql/ha_ndbcluster.cc | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 9b48e0d4f38..0e9dfcef5fb 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -5011,27 +5011,36 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked) set_dbname(name); set_tabname(name); - if (check_ndb_connection()) { - free_share(m_share); m_share= 0; - DBUG_RETURN(HA_ERR_NO_CONNECTION); + if ((res= check_ndb_connection()) || + (res= get_metadata(name))) + { + free_share(m_share); + m_share= 0; + DBUG_RETURN(res); } - - res= get_metadata(name); - if (!res) + while (1) { Ndb *ndb= get_ndb(); if (ndb->setDatabaseName(m_dbname)) { - ERR_RETURN(ndb->getNdbError()); + res= ndb_to_mysql_error(&ndb->getNdbError()); + break; } struct Ndb_statistics stat; res= ndb_get_table_statistics(NULL, false, ndb, m_tabname, &stat); records= stat.row_count; if(!res) res= info(HA_STATUS_CONST); + break; } - - DBUG_RETURN(res); + if (res) + { + free_share(m_share); + m_share= 0; + release_metadata(); + DBUG_RETURN(res); + } + DBUG_RETURN(0); } From 48a3175c4750d46ecba7cff69b8c48c0ba72cefd Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 11 May 2007 09:19:03 +0200 Subject: [PATCH 06/21] Bug#25818 No return of NDB share object in failures in open method - correct manual/auto merge to 5.1 --- sql/ha_ndbcluster.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index e9c634011f0..315b3c56eb3 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -6292,9 +6292,9 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked) } if (res) { - free_share(m_share); + free_share(&m_share); m_share= 0; - release_metadata(); + release_metadata(current_thd, get_ndb()); DBUG_RETURN(res); } #ifdef HAVE_NDB_BINLOG From 6962973e018984a67e101223bda86f1042e1d28d Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 14 May 2007 10:34:21 +0200 Subject: [PATCH 07/21] ndb - bug#28348 remove LCP files when dropping table storage/ndb/include/kernel/signaldata/FsOpenReq.hpp: add tup as user (for fsremove) storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp: add remove LCP stuff storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp: add remove LCP stuff storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp: remove each LCP file regardless if present or not --- .../include/kernel/signaldata/FsOpenReq.hpp | 2 + storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp | 11 ++- .../ndb/src/kernel/blocks/dbtup/DbtupGen.cpp | 4 + .../ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp | 87 +++++++++++++++++++ 4 files changed, 103 insertions(+), 1 deletion(-) diff --git a/storage/ndb/include/kernel/signaldata/FsOpenReq.hpp b/storage/ndb/include/kernel/signaldata/FsOpenReq.hpp index 8d438f79259..8126267f946 100644 --- a/storage/ndb/include/kernel/signaldata/FsOpenReq.hpp +++ b/storage/ndb/include/kernel/signaldata/FsOpenReq.hpp @@ -44,6 +44,8 @@ class FsOpenReq { friend class Restore; friend class Dblqh; + friend class Dbtup; + /** * For printing */ diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index 6d14b714be0..d59d5cd79f2 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -972,6 +972,8 @@ ArrayPool c_triggerPool; struct { Uint32 tabUserPtr; Uint32 tabUserRef; + Uint32 m_lcpno; + Uint32 m_fragPtrI; } m_dropTable; State tableStatus; }; @@ -1533,6 +1535,11 @@ private: void execACCKEYREF(Signal* signal); void execACC_ABORTCONF(Signal* signal); + + // Drop table + void execFSREMOVEREF(Signal*); + void execFSREMOVECONF(Signal*); + //------------------------------------------------------------------ //------------------------------------------------------------------ // Methods to handle execution of TUPKEYREQ + ATTRINFO. @@ -2423,7 +2430,9 @@ private: void drop_fragment_free_extent_log_buffer_callback(Signal*, Uint32, Uint32); void drop_fragment_unmap_pages(Signal*, TablerecPtr, FragrecordPtr, Uint32); void drop_fragment_unmap_page_callback(Signal* signal, Uint32, Uint32); - + void drop_fragment_fsremove(Signal*, TablerecPtr, FragrecordPtr); + void drop_fragment_fsremove_done(Signal*, TablerecPtr, FragrecordPtr); + // Initialisation void initData(); void initRecords(); diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp index 7563712d481..f4fd80a482a 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp @@ -102,6 +102,10 @@ Dbtup::Dbtup(Block_context& ctx, Pgman* pgman) addRecSignal(GSN_ACCKEYREF, &Dbtup::execACCKEYREF); addRecSignal(GSN_ACC_ABORTCONF, &Dbtup::execACC_ABORTCONF); + // Drop table + addRecSignal(GSN_FSREMOVEREF, &Dbtup::execFSREMOVEREF, true); + addRecSignal(GSN_FSREMOVECONF, &Dbtup::execFSREMOVECONF, true); + attrbufrec = 0; fragoperrec = 0; fragrecord = 0; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp index 040a43d3dcd..3c2d521c1f9 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -1282,6 +1283,24 @@ Dbtup::drop_fragment_free_var_pages(Signal* signal) sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB); return; } + + /** + * Remove LCP's for fragment + */ + tabPtr.p->m_dropTable.m_lcpno = 0; + tabPtr.p->m_dropTable.m_fragPtrI = fragPtr.i; + drop_fragment_fsremove(signal, tabPtr, fragPtr); +} + +void +Dbtup::drop_fragment_fsremove_done(Signal* signal, + TablerecPtr tabPtr, + FragrecordPtr fragPtr) +{ + /** + * LCP's removed... + * now continue with "next" + */ Uint32 logfile_group_id = fragPtr.p->m_logfile_group_id ; releaseFragPages(fragPtr.p); Uint32 i; @@ -1301,6 +1320,74 @@ Dbtup::drop_fragment_free_var_pages(Signal* signal) return; } +// Remove LCP + +void +Dbtup::drop_fragment_fsremove(Signal* signal, + TablerecPtr tabPtr, + FragrecordPtr fragPtr) +{ + FsRemoveReq* req = (FsRemoveReq*)signal->getDataPtrSend(); + req->userReference = reference(); + req->userPointer = tabPtr.i; + req->directory = 0; + req->ownDirectory = 0; + + Uint32 lcpno = tabPtr.p->m_dropTable.m_lcpno; + Uint32 fragId = fragPtr.p->fragmentId; + Uint32 tableId = fragPtr.p->fragTableId; + + FsOpenReq::setVersion(req->fileNumber, 5); + FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_DATA); + FsOpenReq::v5_setLcpNo(req->fileNumber, lcpno); + FsOpenReq::v5_setTableId(req->fileNumber, tableId); + FsOpenReq::v5_setFragmentId(req->fileNumber, fragId); + sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal, + FsRemoveReq::SignalLength, JBB); +} + +void +Dbtup::execFSREMOVEREF(Signal* signal) +{ + jamEntry(); + FsRef* ref = (FsRef*)signal->getDataPtr(); + Uint32 userPointer = ref->userPointer; + FsConf* conf = (FsConf*)signal->getDataPtrSend(); + conf->userPointer = userPointer; + execFSREMOVECONF(signal); +} + +void +Dbtup::execFSREMOVECONF(Signal* signal) +{ + jamEntry(); + FsConf* conf = (FsConf*)signal->getDataPtrSend(); + + TablerecPtr tabPtr; + FragrecordPtr fragPtr; + + tabPtr.i = conf->userPointer; + ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); + + ndbrequire(tabPtr.p->tableStatus == DROPPING); + + fragPtr.i = tabPtr.p->m_dropTable.m_fragPtrI; + ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); + + tabPtr.p->m_dropTable.m_lcpno++; + if (tabPtr.p->m_dropTable.m_lcpno < 3) + { + jam(); + drop_fragment_fsremove(signal, tabPtr, fragPtr); + } + else + { + jam(); + drop_fragment_fsremove_done(signal, tabPtr, fragPtr); + } +} +// End remove LCP + void Dbtup::start_restore_lcp(Uint32 tableId, Uint32 fragId) { From 5a5aafd3078578f474248d3025cbcdda6a1a914a Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 14 May 2007 12:15:27 +0200 Subject: [PATCH 08/21] Bug #28410 ndb: no retry sleep when getting autoincrement - add retry sleep to allow temprary error to go away --- sql/ha_ndbcluster.cc | 57 +++++++++++++++++++++++++++----------------- 1 file changed, 35 insertions(+), 22 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 0e9dfcef5fb..0f3a42bbce7 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -2309,16 +2309,24 @@ int ha_ndbcluster::write_row(byte *record) { // Table has hidden primary key Ndb *ndb= get_ndb(); - int ret; Uint64 auto_value; uint retries= NDB_AUTO_INCREMENT_RETRIES; - do { - ret= ndb->getAutoIncrementValue((const NDBTAB *) m_table, auto_value, 1); - } while (ret == -1 && - --retries && - ndb->getNdbError().status == NdbError::TemporaryError); - if (ret == -1) - ERR_RETURN(ndb->getNdbError()); + int retry_sleep= 30; /* 30 milliseconds, transaction */ + for (;;) + { + if (ndb->getAutoIncrementValue((const NDBTAB *) m_table, + auto_value, 1) == -1) + { + if (--retries && + ndb->getNdbError().status == NdbError::TemporaryError); + { + my_sleep(retry_sleep); + continue; + } + ERR_RETURN(ndb->getNdbError()); + } + break; + } if (set_hidden_key(op, table->s->fields, (const byte*)&auto_value)) ERR_RETURN(op->getNdbError()); } @@ -4855,22 +4863,27 @@ ulonglong ha_ndbcluster::get_auto_increment() m_rows_to_insert - m_rows_inserted : ((m_rows_to_insert > m_autoincrement_prefetch) ? m_rows_to_insert : m_autoincrement_prefetch)); - int ret; uint retries= NDB_AUTO_INCREMENT_RETRIES; - do { - ret= - m_skip_auto_increment ? - ndb->readAutoIncrementValue((const NDBTAB *) m_table, auto_value) : - ndb->getAutoIncrementValue((const NDBTAB *) m_table, auto_value, cache_size); - } while (ret == -1 && - --retries && - ndb->getNdbError().status == NdbError::TemporaryError); - if (ret == -1) + int retry_sleep= 30; /* 30 milliseconds, transaction */ + for (;;) { - const NdbError err= ndb->getNdbError(); - sql_print_error("Error %lu in ::get_auto_increment(): %s", - (ulong) err.code, err.message); - DBUG_RETURN(~(ulonglong) 0); + if (m_skip_auto_increment && + ndb->readAutoIncrementValue((const NDBTAB *) m_table, auto_value) || + ndb->getAutoIncrementValue((const NDBTAB *) m_table, + auto_value, cache_size)) + { + if (--retries && + ndb->getNdbError().status == NdbError::TemporaryError); + { + my_sleep(retry_sleep); + continue; + } + const NdbError err= ndb->getNdbError(); + sql_print_error("Error %lu in ::get_auto_increment(): %s", + (ulong) err.code, err.message); + DBUG_RETURN(~(ulonglong) 0); + } + break; } DBUG_RETURN((longlong)auto_value); } From c78f434071f10b19d16bdc45120a2a6ba7d6c8b9 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 14 May 2007 14:38:50 +0200 Subject: [PATCH 09/21] files to check if log bin is turned on mysql-test/include/have_log_bin.inc: New BitKeeper file ``mysql-test/include/have_log_bin.inc'' mysql-test/r/have_log_bin.require: New BitKeeper file ``mysql-test/r/have_log_bin.require'' --- mysql-test/include/have_log_bin.inc | 4 ++++ mysql-test/r/have_log_bin.require | 2 ++ 2 files changed, 6 insertions(+) create mode 100644 mysql-test/include/have_log_bin.inc create mode 100644 mysql-test/r/have_log_bin.require diff --git a/mysql-test/include/have_log_bin.inc b/mysql-test/include/have_log_bin.inc new file mode 100644 index 00000000000..11530dc953e --- /dev/null +++ b/mysql-test/include/have_log_bin.inc @@ -0,0 +1,4 @@ +-- require r/have_log_bin.require +disable_query_log; +show variables like "log_bin"; +enable_query_log; diff --git a/mysql-test/r/have_log_bin.require b/mysql-test/r/have_log_bin.require new file mode 100644 index 00000000000..cacdf8df0ce --- /dev/null +++ b/mysql-test/r/have_log_bin.require @@ -0,0 +1,2 @@ +Variable_name Value +have_log_bin ON From 9f18586ccf6982bef930251a5ace9e00073e22b2 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 14 May 2007 14:39:37 +0200 Subject: [PATCH 10/21] disable test if log bin is not turned on --- mysql-test/t/ndb_binlog_basic2.test | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql-test/t/ndb_binlog_basic2.test b/mysql-test/t/ndb_binlog_basic2.test index 9fa9f2f965a..bcc6b503320 100644 --- a/mysql-test/t/ndb_binlog_basic2.test +++ b/mysql-test/t/ndb_binlog_basic2.test @@ -1,4 +1,5 @@ -- source include/have_ndb.inc +-- source include/have_log_bin.inc --error ER_NDB_CANT_SWITCH_BINLOG_FORMAT set session binlog_format=row; From cb238b0e423c6a3d84d85790c9991b3e533d7794 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 14 May 2007 14:43:07 +0200 Subject: [PATCH 11/21] corrected manual merge --- sql/ha_ndbcluster.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 7ab7de6ab81..304b8fc510d 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -6089,7 +6089,8 @@ void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment, const NdbError err= ndb->getNdbError(); sql_print_error("Error %lu in ::get_auto_increment(): %s", (ulong) err.code, err.message); - DBUG_RETURN(~(ulonglong) 0); + *first_value= ~(ulonglong) 0; + DBUG_VOID_RETURN; } break; } From 2695f46b2a9ff7efe9a526b13e9640ba26b498d3 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 15 May 2007 08:34:39 +0200 Subject: [PATCH 12/21] #26906 No message slogan found - added errormessage and code for "declaring node dead" --- ndb/include/mgmapi/ndbd_exit_codes.h | 1 + ndb/src/kernel/blocks/qmgr/QmgrMain.cpp | 2 +- ndb/src/kernel/error/ndbd_exit_codes.c | 2 ++ 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/ndb/include/mgmapi/ndbd_exit_codes.h b/ndb/include/mgmapi/ndbd_exit_codes.h index 874bf0aa253..b8a65b54672 100644 --- a/ndb/include/mgmapi/ndbd_exit_codes.h +++ b/ndb/include/mgmapi/ndbd_exit_codes.h @@ -79,6 +79,7 @@ typedef ndbd_exit_classification_enum ndbd_exit_classification; #define NDBD_EXIT_NO_MORE_UNDOLOG 2312 #define NDBD_EXIT_SR_UNDOLOG 2313 #define NDBD_EXIT_SINGLE_USER_MODE 2314 +#define NDBD_EXIT_NODE_DECLARED_DEAD 2315 #define NDBD_EXIT_MEMALLOC 2327 #define NDBD_EXIT_BLOCK_JBUFCONGESTION 2334 #define NDBD_EXIT_TIME_QUEUE_SHORT 2335 diff --git a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index a0a19620a05..a76838f7007 100644 --- a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -2816,7 +2816,7 @@ void Qmgr::failReportLab(Signal* signal, Uint16 aFailedNode, if (failedNodePtr.i == getOwnNodeId()) { jam(); - Uint32 code = 0; + Uint32 code = NDBD_EXIT_NODE_DECLARED_DEAD; const char * msg = 0; char extra[100]; switch(aFailCause){ diff --git a/ndb/src/kernel/error/ndbd_exit_codes.c b/ndb/src/kernel/error/ndbd_exit_codes.c index 37a54e33350..68d8f22f158 100644 --- a/ndb/src/kernel/error/ndbd_exit_codes.c +++ b/ndb/src/kernel/error/ndbd_exit_codes.c @@ -57,6 +57,8 @@ static const ErrStruct errArray[] = "error(s) on other node(s)"}, {NDBD_EXIT_PARTITIONED_SHUTDOWN, XAE, "Partitioned cluster detected. " "Please check if cluster is already running"}, + {NDBD_EXIT_NODE_DECLARED_DEAD, XAE, + "Node declared dead. See error log for details"}, {NDBD_EXIT_POINTER_NOTINRANGE, XIE, "Pointer too large"}, {NDBD_EXIT_SR_OTHERNODEFAILED, XRE, "Another node failed during system " "restart, please investigate error(s) on other node(s)"}, From bc5ef9ffa190133061c35def819ba329f727749e Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 15 May 2007 09:03:00 +0200 Subject: [PATCH 13/21] Bug #26386 ndbd wont start after changing schema backported error code from 5.1 split error check when reading schema file ndb/include/mgmapi/ndbd_exit_codes.h: backported error code from 5.1 ndb/src/kernel/blocks/dbdict/Dbdict.cpp: backported error code from 5.1 split error check when reading schema file ndb/src/kernel/error/ndbd_exit_codes.c: backported error code from 5.1 --- ndb/include/mgmapi/ndbd_exit_codes.h | 1 + ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 26 +++++++++++++++++-------- ndb/src/kernel/error/ndbd_exit_codes.c | 1 + 3 files changed, 20 insertions(+), 8 deletions(-) diff --git a/ndb/include/mgmapi/ndbd_exit_codes.h b/ndb/include/mgmapi/ndbd_exit_codes.h index b8a65b54672..1051fd9e394 100644 --- a/ndb/include/mgmapi/ndbd_exit_codes.h +++ b/ndb/include/mgmapi/ndbd_exit_codes.h @@ -80,6 +80,7 @@ typedef ndbd_exit_classification_enum ndbd_exit_classification; #define NDBD_EXIT_SR_UNDOLOG 2313 #define NDBD_EXIT_SINGLE_USER_MODE 2314 #define NDBD_EXIT_NODE_DECLARED_DEAD 2315 +#define NDBD_EXIT_SR_SCHEMAFILE 2316 #define NDBD_EXIT_MEMALLOC 2327 #define NDBD_EXIT_BLOCK_JBUFCONGESTION 2334 #define NDBD_EXIT_TIME_QUEUE_SHORT 2335 diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index a039c1bdbe7..fd7aabc8b67 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -1069,14 +1069,24 @@ void Dbdict::readSchemaConf(Signal* signal, for (Uint32 n = 0; n < xsf->noOfPages; n++) { SchemaFile * sf = &xsf->schemaPage[n]; - bool ok = - memcmp(sf->Magic, NDB_SF_MAGIC, sizeof(sf->Magic)) == 0 && - sf->FileSize != 0 && - sf->FileSize % NDB_SF_PAGE_SIZE == 0 && - sf->FileSize == sf0->FileSize && - sf->PageNumber == n && - computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS) == 0; - ndbrequire(ok || !crashInd); + bool ok = false; + if (memcmp(sf->Magic, NDB_SF_MAGIC, sizeof(sf->Magic)) != 0) + { jam(); } + else if (sf->FileSize == 0) + { jam(); } + else if (sf->FileSize % NDB_SF_PAGE_SIZE != 0) + { jam(); } + else if (sf->FileSize != sf0->FileSize) + { jam(); } + else if (sf->PageNumber != n) + { jam(); } + else if (computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS) != 0) + { jam(); } + else if (crashInd) + { jam(); } + else + ok = true; + ndbrequireErr(ok, NDBD_EXIT_SR_SCHEMAFILE); if (! ok) { jam(); ndbrequire(fsPtr.p->fsState == FsConnectRecord::READ_SCHEMA1); diff --git a/ndb/src/kernel/error/ndbd_exit_codes.c b/ndb/src/kernel/error/ndbd_exit_codes.c index 68d8f22f158..92bee522d24 100644 --- a/ndb/src/kernel/error/ndbd_exit_codes.c +++ b/ndb/src/kernel/error/ndbd_exit_codes.c @@ -65,6 +65,7 @@ static const ErrStruct errArray[] = {NDBD_EXIT_NODE_NOT_DEAD, XRE, "Internal node state conflict, " "most probably resolved by restarting node again"}, {NDBD_EXIT_SR_REDOLOG, XFI, "Error while reading the REDO log"}, + {NDBD_EXIT_SR_SCHEMAFILE, XFI, "Error while reading the schema file"}, /* Currently unused? */ {2311, XIE, "Conflict when selecting restart type"}, {NDBD_EXIT_NO_MORE_UNDOLOG, XCR, From aad9135c007a00dfb0f8c97a057168f401512bd5 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 15 May 2007 09:08:16 +0200 Subject: [PATCH 14/21] ndb - bug#24631 add Dbdict::restartDropObj* storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp: add restartDropObj - handle undo files "specially" storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp: add restartDropObj* storage/ndb/src/kernel/blocks/lgman.cpp: allow drop of meta files storage/ndb/src/kernel/blocks/tsman.cpp: allow drop of meta files storage/ndb/test/ndbapi/testDict.cpp: add testcase storage/ndb/test/run-test/daily-basic-tests.txt: add testcase --- .../ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 189 +++++++++++++++++- .../ndb/src/kernel/blocks/dbdict/Dbdict.hpp | 9 +- storage/ndb/src/kernel/blocks/lgman.cpp | 13 +- storage/ndb/src/kernel/blocks/tsman.cpp | 6 + storage/ndb/test/ndbapi/testDict.cpp | 157 +++++++++++++++ .../ndb/test/run-test/daily-basic-tests.txt | 4 + 6 files changed, 372 insertions(+), 6 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index de365e886a0..e5ed9e49642 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -188,7 +188,7 @@ struct { 0, 0, 0, 0, &Dbdict::drop_undofile_prepare_start, 0, 0, - 0, 0, + 0, &Dbdict::drop_undofile_commit_complete, 0, 0, 0 } }; @@ -3209,9 +3209,7 @@ Dbdict::restartDropTab(Signal* signal, Uint32 tableId, case DictTabInfo::LogfileGroup: case DictTabInfo::Datafile: case DictTabInfo::Undofile: - warningEvent("Dont drop object: %d", tableId); - c_restartRecord.activeTable++; - checkSchemaStatus(signal); + restartDropObj(signal, tableId, old_entry); return; } @@ -3254,6 +3252,9 @@ Dbdict::restartDropTab_complete(Signal* signal, checkSchemaStatus(signal); } +/** + * Create Obj during NR/SR + */ void Dbdict::restartCreateObj(Signal* signal, Uint32 tableId, @@ -3482,6 +3483,170 @@ Dbdict::restartCreateObj_commit_complete_done(Signal* signal, checkSchemaStatus(signal); } +/** + * Drop object during NR/SR + */ +void +Dbdict::restartDropObj(Signal* signal, + Uint32 tableId, + const SchemaFile::TableEntry * entry) +{ + jam(); + + DropObjRecordPtr dropObjPtr; + ndbrequire(c_opDropObj.seize(dropObjPtr)); + + const Uint32 key = ++c_opRecordSequence; + dropObjPtr.p->key = key; + c_opDropObj.add(dropObjPtr); + dropObjPtr.p->m_errorCode = 0; + dropObjPtr.p->m_senderRef = reference(); + dropObjPtr.p->m_senderData = tableId; + dropObjPtr.p->m_clientRef = reference(); + dropObjPtr.p->m_clientData = tableId; + + dropObjPtr.p->m_obj_id = tableId; + dropObjPtr.p->m_obj_type = entry->m_tableType; + dropObjPtr.p->m_obj_version = entry->m_tableVersion; + + dropObjPtr.p->m_callback.m_callbackData = key; + dropObjPtr.p->m_callback.m_callbackFunction= + safe_cast(&Dbdict::restartDropObj_prepare_start_done); + + ndbout_c("Dropping %d %d", tableId, entry->m_tableType); + switch(entry->m_tableType){ + case DictTabInfo::Tablespace: + case DictTabInfo::LogfileGroup:{ + jam(); + Ptr fg_ptr; + ndbrequire(c_filegroup_hash.find(fg_ptr, tableId)); + dropObjPtr.p->m_obj_ptr_i = fg_ptr.i; + dropObjPtr.p->m_vt_index = 3; + break; + } + case DictTabInfo::Datafile:{ + jam(); + Ptr file_ptr; + dropObjPtr.p->m_vt_index = 2; + ndbrequire(c_file_hash.find(file_ptr, tableId)); + dropObjPtr.p->m_obj_ptr_i = file_ptr.i; + break; + } + case DictTabInfo::Undofile:{ + jam(); + Ptr file_ptr; + dropObjPtr.p->m_vt_index = 4; + ndbrequire(c_file_hash.find(file_ptr, tableId)); + dropObjPtr.p->m_obj_ptr_i = file_ptr.i; + + /** + * Undofiles are only removed from logfile groups file list + * as drop undofile is currently not supported... + * file will be dropped by lgman when dropping filegroup + */ + dropObjPtr.p->m_callback.m_callbackFunction= + safe_cast(&Dbdict::restartDropObj_commit_complete_done); + + if (f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete) + (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete) + (signal, dropObjPtr.p); + else + execute(signal, dropObjPtr.p->m_callback, 0); + return; + } + default: + jamLine(entry->m_tableType); + ndbrequire(false); + } + + if (f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_start) + (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_start) + (signal, dropObjPtr.p); + else + execute(signal, dropObjPtr.p->m_callback, 0); +} + +void +Dbdict::restartDropObj_prepare_start_done(Signal* signal, + Uint32 callbackData, + Uint32 returnCode) +{ + jam(); + ndbrequire(returnCode == 0); + DropObjRecordPtr dropObjPtr; + ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); + ndbrequire(dropObjPtr.p->m_errorCode == 0); + + dropObjPtr.p->m_callback.m_callbackFunction = + safe_cast(&Dbdict::restartDropObj_prepare_complete_done); + + if (f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_complete) + (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_complete) + (signal, dropObjPtr.p); + else + execute(signal, dropObjPtr.p->m_callback, 0); +} + +void +Dbdict::restartDropObj_prepare_complete_done(Signal* signal, + Uint32 callbackData, + Uint32 returnCode) +{ + jam(); + ndbrequire(returnCode == 0); + DropObjRecordPtr dropObjPtr; + ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); + ndbrequire(dropObjPtr.p->m_errorCode == 0); + + dropObjPtr.p->m_callback.m_callbackFunction = + safe_cast(&Dbdict::restartDropObj_commit_start_done); + + if (f_dict_op[dropObjPtr.p->m_vt_index].m_commit_start) + (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_commit_start) + (signal, dropObjPtr.p); + else + execute(signal, dropObjPtr.p->m_callback, 0); +} + +void +Dbdict::restartDropObj_commit_start_done(Signal* signal, + Uint32 callbackData, + Uint32 returnCode) +{ + jam(); + ndbrequire(returnCode == 0); + DropObjRecordPtr dropObjPtr; + ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); + ndbrequire(dropObjPtr.p->m_errorCode == 0); + + dropObjPtr.p->m_callback.m_callbackFunction = + safe_cast(&Dbdict::restartDropObj_commit_complete_done); + + if (f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete) + (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete) + (signal, dropObjPtr.p); + else + execute(signal, dropObjPtr.p->m_callback, 0); +} + + +void +Dbdict::restartDropObj_commit_complete_done(Signal* signal, + Uint32 callbackData, + Uint32 returnCode) +{ + jam(); + ndbrequire(returnCode == 0); + DropObjRecordPtr dropObjPtr; + ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); + ndbrequire(dropObjPtr.p->m_errorCode == 0); + + c_opDropObj.release(dropObjPtr); + + c_restartRecord.activeTable++; + checkSchemaStatus(signal); +} + /* **************************************************************** */ /* ---------------------------------------------------------------- */ /* MODULE: NODE FAILURE HANDLING ------------------------- */ @@ -16239,6 +16404,22 @@ Dbdict::drop_file_commit_complete(Signal* signal, SchemaOp* op) execute(signal, op->m_callback, 0); } +void +Dbdict::drop_undofile_commit_complete(Signal* signal, SchemaOp* op) +{ + FilePtr f_ptr; + FilegroupPtr fg_ptr; + + jam(); + c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); + ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); + Local_file_list list(c_file_pool, fg_ptr.p->m_logfilegroup.m_files); + list.remove(f_ptr); + release_object(f_ptr.p->m_obj_ptr_i); + c_file_hash.release(f_ptr); + execute(signal, op->m_callback, 0); +} + void Dbdict::drop_file_abort_start(Signal* signal, SchemaOp* op) { diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp index e5b918ca270..3fff330d699 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp @@ -2565,6 +2565,12 @@ private: const SchemaFile::TableEntry *, const SchemaFile::TableEntry *); void restartDropTab_complete(Signal*, Uint32 callback, Uint32); + + void restartDropObj(Signal*, Uint32, const SchemaFile::TableEntry *); + void restartDropObj_prepare_start_done(Signal*, Uint32, Uint32); + void restartDropObj_prepare_complete_done(Signal*, Uint32, Uint32); + void restartDropObj_commit_start_done(Signal*, Uint32, Uint32); + void restartDropObj_commit_complete_done(Signal*, Uint32, Uint32); void restart_checkSchemaStatusComplete(Signal*, Uint32 callback, Uint32); void restart_writeSchemaConf(Signal*, Uint32 callbackData, Uint32); @@ -2657,7 +2663,8 @@ public: void send_drop_fg(Signal*, SchemaOp*, DropFilegroupImplReq::RequestInfo); void drop_undofile_prepare_start(Signal* signal, SchemaOp*); - + void drop_undofile_commit_complete(Signal* signal, SchemaOp*); + int checkSingleUserMode(Uint32 senderRef); }; diff --git a/storage/ndb/src/kernel/blocks/lgman.cpp b/storage/ndb/src/kernel/blocks/lgman.cpp index 4af27e25124..25cdac89737 100644 --- a/storage/ndb/src/kernel/blocks/lgman.cpp +++ b/storage/ndb/src/kernel/blocks/lgman.cpp @@ -436,7 +436,6 @@ Lgman::drop_filegroup_drop_files(Signal* signal, { jam(); ndbrequire(! (ptr.p->m_state & Logfile_group::LG_THREAD_MASK)); - ndbrequire(ptr.p->m_meta_files.isEmpty()); ndbrequire(ptr.p->m_outstanding_fs == 0); Local_undofile_list list(m_file_pool, ptr.p->m_files); @@ -452,6 +451,18 @@ Lgman::drop_filegroup_drop_files(Signal* signal, return; } + Local_undofile_list metalist(m_file_pool, ptr.p->m_meta_files); + if (metalist.first(file_ptr)) + { + jam(); + metalist.remove(file_ptr); + list.add(file_ptr); + file_ptr.p->m_create.m_senderRef = ref; + file_ptr.p->m_create.m_senderData = data; + create_file_abort(signal, ptr, file_ptr); + return; + } + free_logbuffer_memory(ptr); m_logfile_group_hash.release(ptr); DropFilegroupImplConf *conf = (DropFilegroupImplConf*)signal->getDataPtr(); diff --git a/storage/ndb/src/kernel/blocks/tsman.cpp b/storage/ndb/src/kernel/blocks/tsman.cpp index 62aa80a67fe..8f61ec0cf7b 100644 --- a/storage/ndb/src/kernel/blocks/tsman.cpp +++ b/storage/ndb/src/kernel/blocks/tsman.cpp @@ -1309,6 +1309,12 @@ Tsman::execDROP_FILE_REQ(Signal* signal) Local_datafile_list free(m_file_pool, fg_ptr.p->m_free_files); free.remove(file_ptr); } + else if(find_file_by_id(file_ptr, fg_ptr.p->m_meta_files, req.file_id)) + { + jam(); + Local_datafile_list meta(m_file_pool, fg_ptr.p->m_meta_files); + meta.remove(file_ptr); + } else { errorCode = DropFileImplRef::NoSuchFile; diff --git a/storage/ndb/test/ndbapi/testDict.cpp b/storage/ndb/test/ndbapi/testDict.cpp index 9828cb768df..13c071f968e 100644 --- a/storage/ndb/test/ndbapi/testDict.cpp +++ b/storage/ndb/test/ndbapi/testDict.cpp @@ -2204,6 +2204,159 @@ runBug21755(NDBT_Context* ctx, NDBT_Step* step) return NDBT_OK; } +static +int +create_tablespace(NdbDictionary::Dictionary* pDict, + const char * lgname, + const char * tsname, + const char * dfname) +{ + NdbDictionary::Tablespace ts; + ts.setName(tsname); + ts.setExtentSize(1024*1024); + ts.setDefaultLogfileGroup(lgname); + + if(pDict->createTablespace(ts) != 0) + { + g_err << "Failed to create tablespace:" + << endl << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + + NdbDictionary::Datafile df; + df.setPath(dfname); + df.setSize(1*1024*1024); + df.setTablespace(tsname); + + if(pDict->createDatafile(df) != 0) + { + g_err << "Failed to create datafile:" + << endl << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + return 0; +} + +int +runBug24631(NDBT_Context* ctx, NDBT_Step* step) +{ + char tsname[256]; + char dfname[256]; + char lgname[256]; + char ufname[256]; + NdbRestarter res; + + if (res.getNumDbNodes() < 2) + return NDBT_OK; + + Ndb* pNdb = GETNDB(step); + NdbDictionary::Dictionary* pDict = pNdb->getDictionary(); + + NdbDictionary::Dictionary::List list; + if (pDict->listObjects(list) == -1) + return NDBT_FAILED; + + const char * lgfound = 0; + + for (Uint32 i = 0; icreateLogfileGroup(lg) != 0) + { + g_err << "Failed to create logfilegroup:" + << endl << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + + NdbDictionary::Undofile uf; + BaseString::snprintf(ufname, sizeof(ufname), "%s-%u", lgname, rand()); + uf.setPath(ufname); + uf.setSize(2*1024*1024); + uf.setLogfileGroup(lgname); + + if(pDict->createUndofile(uf) != 0) + { + g_err << "Failed to create undofile:" + << endl << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + } + else + { + BaseString::snprintf(lgname, sizeof(lgname), "%s", lgfound); + } + + BaseString::snprintf(tsname, sizeof(tsname), "TS-%u", rand()); + BaseString::snprintf(dfname, sizeof(dfname), "%s-%u.dat", tsname, rand()); + + if (create_tablespace(pDict, lgname, tsname, dfname)) + return NDBT_FAILED; + + + int node = res.getRandomNotMasterNodeId(rand()); + res.restartOneDbNode(node, false, true, true); + NdbSleep_SecSleep(3); + + if (pDict->dropDatafile(pDict->getDatafile(0, dfname)) != 0) + { + g_err << "Failed to drop datafile: " << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + + if (pDict->dropTablespace(pDict->getTablespace(tsname)) != 0) + { + g_err << "Failed to drop tablespace: " << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + + if (res.waitNodesNoStart(&node, 1)) + return NDBT_FAILED; + + res.startNodes(&node, 1); + if (res.waitClusterStarted()) + return NDBT_FAILED; + + if (create_tablespace(pDict, lgname, tsname, dfname)) + return NDBT_FAILED; + + if (pDict->dropDatafile(pDict->getDatafile(0, dfname)) != 0) + { + g_err << "Failed to drop datafile: " << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + + if (pDict->dropTablespace(pDict->getTablespace(tsname)) != 0) + { + g_err << "Failed to drop tablespace: " << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + + if (lgfound == 0) + { + if (pDict->dropLogfileGroup(pDict->getLogfileGroup(lgname)) != 0) + return NDBT_FAILED; + } + + return NDBT_OK; +} + struct RandSchemaOp { struct Obj @@ -2707,6 +2860,10 @@ TESTCASE("DictRestart", ""){ INITIALIZER(runDictRestart); } +TESTCASE("Bug24631", + ""){ + INITIALIZER(runBug24631); +} NDBT_TESTSUITE_END(testDict); int main(int argc, const char** argv){ diff --git a/storage/ndb/test/run-test/daily-basic-tests.txt b/storage/ndb/test/run-test/daily-basic-tests.txt index e080536dad9..8f24e8826f9 100644 --- a/storage/ndb/test/run-test/daily-basic-tests.txt +++ b/storage/ndb/test/run-test/daily-basic-tests.txt @@ -619,6 +619,10 @@ max-time: 1500 cmd: testDict args: -l 25 -n DictRestart T1 +max-time: 500 +cmd: testDict +args: -n Bug24631 T1 + # # TEST NDBAPI # From 10e3a96e470b69aaa39ba889152bafca52bdd53a Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 15 May 2007 12:02:58 +0200 Subject: [PATCH 15/21] improve error message on corrup schema file --- ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 31 +++++++++++++++++-------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index fd7aabc8b67..a8db352e705 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -79,6 +79,9 @@ #include #include +#include +extern EventLogger g_eventLogger; + #define ZNOT_FOUND 626 #define ZALREADYEXIST 630 @@ -1070,26 +1073,34 @@ void Dbdict::readSchemaConf(Signal* signal, for (Uint32 n = 0; n < xsf->noOfPages; n++) { SchemaFile * sf = &xsf->schemaPage[n]; bool ok = false; + const char *reason; if (memcmp(sf->Magic, NDB_SF_MAGIC, sizeof(sf->Magic)) != 0) - { jam(); } + { jam(); reason = "magic code"; } else if (sf->FileSize == 0) - { jam(); } + { jam(); reason = "file size == 0"; } else if (sf->FileSize % NDB_SF_PAGE_SIZE != 0) - { jam(); } + { jam(); reason = "invalid size multiple"; } else if (sf->FileSize != sf0->FileSize) - { jam(); } + { jam(); reason = "invalid size"; } else if (sf->PageNumber != n) - { jam(); } + { jam(); reason = "invalid page number"; } else if (computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS) != 0) - { jam(); } - else if (crashInd) - { jam(); } + { jam(); reason = "invalid checksum"; } else ok = true; - ndbrequireErr(ok, NDBD_EXIT_SR_SCHEMAFILE); - if (! ok) { + + if (!ok) + { + char reason_msg[128]; + snprintf(reason_msg, sizeof(reason_msg), + "schema file corrupt, page %u (%s, " + "sz=%u sz0=%u pn=%u)", + n, reason, sf->FileSize, sf0->FileSize, sf->PageNumber); + if (crashInd) + progError(__LINE__, NDBD_EXIT_SR_SCHEMAFILE, reason_msg); jam(); ndbrequire(fsPtr.p->fsState == FsConnectRecord::READ_SCHEMA1); + infoEvent("primary %s, trying backup", reason_msg); readSchemaRef(signal, fsPtr); return; } From ea3569af6681de57754f623b0a916e03e5a12f4f Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 15 May 2007 12:07:09 +0200 Subject: [PATCH 16/21] add error code to ndb require --- ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index a8db352e705..d86f32dc8d1 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -1098,8 +1098,9 @@ void Dbdict::readSchemaConf(Signal* signal, n, reason, sf->FileSize, sf0->FileSize, sf->PageNumber); if (crashInd) progError(__LINE__, NDBD_EXIT_SR_SCHEMAFILE, reason_msg); + ndbrequireErr(fsPtr.p->fsState == FsConnectRecord::READ_SCHEMA1, + NDBD_EXIT_SR_SCHEMAFILE); jam(); - ndbrequire(fsPtr.p->fsState == FsConnectRecord::READ_SCHEMA1); infoEvent("primary %s, trying backup", reason_msg); readSchemaRef(signal, fsPtr); return; From 3acd8d31ff095c1c5109698a88dc38601ea9196e Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 17 May 2007 08:54:30 +0200 Subject: [PATCH 17/21] ndb - bug#28491 disable "disable expand check" as it does not solve problems anyway storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp: disable "disable expand check" as it does not solve problems anyway --- storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp index 3b27446d3a9..d34cfb159a4 100644 --- a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp @@ -5202,9 +5202,9 @@ void Dbacc::execEXPANDCHECK2(Signal* signal) { jamEntry(); - if(refToBlock(signal->getSendersBlockRef()) == DBLQH){ + if(refToBlock(signal->getSendersBlockRef()) == DBLQH) + { jam(); - reenable_expand_after_redo_log_exection_complete(signal); return; } From 9bfed5190a93327194ed323c6d80a23956ef3c07 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 18 May 2007 09:48:52 +0200 Subject: [PATCH 18/21] ndb - bug#28443 Make sure that data can not e left lingering in receive buffer ndb/src/common/transporter/Packer.cpp: test, make MAX_RECEIVED_SIGNALS an variable ndb/src/common/transporter/TCP_Transporter.hpp: add new method for checking of data in receive buffer ndb/src/common/transporter/TransporterRegistry.cpp: Make sure that 1) poll_TCP return true if any transporter has data in receiveBuffer (previously only if socket had data) 2) performReceive executes signals if TCP transporter has data in receiveBuffer (previously only if data was recevied) ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp: test, make MAX_RECEIVED_SIGNALS an variable ndb/test/ndbapi/testNdbApi.cpp: testcase ndb/test/run-test/daily-basic-tests.txt: testcase --- ndb/src/common/transporter/Packer.cpp | 5 +++ .../common/transporter/TCP_Transporter.hpp | 4 ++ .../transporter/TransporterRegistry.cpp | 39 ++++++++++--------- ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp | 17 ++++++++ ndb/test/ndbapi/testNdbApi.cpp | 34 ++++++++++++++++ ndb/test/run-test/daily-basic-tests.txt | 4 ++ 6 files changed, 85 insertions(+), 18 deletions(-) diff --git a/ndb/src/common/transporter/Packer.cpp b/ndb/src/common/transporter/Packer.cpp index 9eba335330d..503ff453e7e 100644 --- a/ndb/src/common/transporter/Packer.cpp +++ b/ndb/src/common/transporter/Packer.cpp @@ -21,7 +21,12 @@ #include #include +#ifdef ERROR_INSERT +Uint32 MAX_RECEIVED_SIGNALS = 1024; +#else #define MAX_RECEIVED_SIGNALS 1024 +#endif + Uint32 TransporterRegistry::unpack(Uint32 * readPtr, Uint32 sizeOfData, diff --git a/ndb/src/common/transporter/TCP_Transporter.hpp b/ndb/src/common/transporter/TCP_Transporter.hpp index 151ec261506..d081c6175a0 100644 --- a/ndb/src/common/transporter/TCP_Transporter.hpp +++ b/ndb/src/common/transporter/TCP_Transporter.hpp @@ -100,6 +100,10 @@ private: virtual void updateReceiveDataPtr(Uint32 bytesRead); virtual Uint32 get_free_buffer() const; + + inline bool hasReceiveData () const { + return receiveBuffer.sizeOfData > 0; + } protected: /** * Setup client/server and perform connect/accept diff --git a/ndb/src/common/transporter/TransporterRegistry.cpp b/ndb/src/common/transporter/TransporterRegistry.cpp index 3f190d16264..c0ee93a8489 100644 --- a/ndb/src/common/transporter/TransporterRegistry.cpp +++ b/ndb/src/common/transporter/TransporterRegistry.cpp @@ -807,6 +807,7 @@ TransporterRegistry::poll_OSE(Uint32 timeOutMillis) Uint32 TransporterRegistry::poll_TCP(Uint32 timeOutMillis) { + bool hasdata = false; if (false && nTCPTransporters == 0) { tcpReadSelectReply = 0; @@ -851,6 +852,7 @@ TransporterRegistry::poll_TCP(Uint32 timeOutMillis) // Put the connected transporters in the socket read-set FD_SET(socket, &tcpReadset); } + hasdata |= t->hasReceiveData(); } // The highest socket value plus one @@ -867,7 +869,7 @@ TransporterRegistry::poll_TCP(Uint32 timeOutMillis) } #endif - return tcpReadSelectReply; + return tcpReadSelectReply || hasdata; } #endif @@ -902,25 +904,26 @@ TransporterRegistry::performReceive() #endif #ifdef NDB_TCP_TRANSPORTER - if(tcpReadSelectReply > 0) + for (int i=0; igetRemoteNodeId(); - const NDB_SOCKET_TYPE socket = t->getSocket(); - if(is_connected(nodeId)){ - if(t->isConnected() && FD_ISSET(socket, &tcpReadset)) + checkJobBuffer(); + TCP_Transporter *t = theTCPTransporters[i]; + const NodeId nodeId = t->getRemoteNodeId(); + const NDB_SOCKET_TYPE socket = t->getSocket(); + if(is_connected(nodeId)){ + if(t->isConnected()) + { + if (FD_ISSET(socket, &tcpReadset)) { - const int receiveSize = t->doReceive(); - if(receiveSize > 0) - { - Uint32 * ptr; - Uint32 sz = t->getReceiveData(&ptr); - Uint32 szUsed = unpack(ptr, sz, nodeId, ioStates[nodeId]); - t->updateReceiveDataPtr(szUsed); - } + t->doReceive(); + } + + if (t->hasReceiveData()) + { + Uint32 * ptr; + Uint32 sz = t->getReceiveData(&ptr); + Uint32 szUsed = unpack(ptr, sz, nodeId, ioStates[nodeId]); + t->updateReceiveDataPtr(szUsed); } } } diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index 6519444c364..c5f1ba2575a 100644 --- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -135,6 +135,7 @@ Cmvmi::~Cmvmi() #ifdef ERROR_INSERT NodeBitmask c_error_9000_nodes_mask; +extern Uint32 MAX_RECEIVED_SIGNALS; #endif void Cmvmi::execNDB_TAMPER(Signal* signal) @@ -164,6 +165,22 @@ void Cmvmi::execNDB_TAMPER(Signal* signal) kill(getpid(), SIGABRT); } #endif + +#ifdef ERROR_INSERT + if (signal->theData[0] == 9003) + { + if (MAX_RECEIVED_SIGNALS < 1024) + { + MAX_RECEIVED_SIGNALS = 1024; + } + else + { + MAX_RECEIVED_SIGNALS = rand() % 128; + } + ndbout_c("MAX_RECEIVED_SIGNALS: %d", MAX_RECEIVED_SIGNALS); + CLEAR_ERROR_INSERT_VALUE; + } +#endif }//execNDB_TAMPER() void Cmvmi::execSET_LOGLEVELORD(Signal* signal) diff --git a/ndb/test/ndbapi/testNdbApi.cpp b/ndb/test/ndbapi/testNdbApi.cpp index 3a06269f8dc..1ef8c628dd4 100644 --- a/ndb/test/ndbapi/testNdbApi.cpp +++ b/ndb/test/ndbapi/testNdbApi.cpp @@ -1131,7 +1131,36 @@ int runBug_11133(NDBT_Context* ctx, NDBT_Step* step){ return result; } +int +runBug28443(NDBT_Context* ctx, NDBT_Step* step) +{ + int result = NDBT_OK; + int records = ctx->getNumRecords(); + + NdbRestarter restarter; + restarter.insertErrorInAllNodes(9003); + + for (Uint32 i = 0; igetNumLoops(); i++) + { + HugoTransactions hugoTrans(*ctx->getTab()); + if (hugoTrans.loadTable(GETNDB(step), records, 2048) != 0) + { + result = NDBT_FAILED; + goto done; + } + if (runClearTable(ctx, step) != 0) + { + result = NDBT_FAILED; + goto done; + } + } + +done: + restarter.insertErrorInAllNodes(9003); + + return result; +} NDBT_TESTSUITE(testNdbApi); TESTCASE("MaxNdb", @@ -1212,6 +1241,11 @@ TESTCASE("Bug_11133", INITIALIZER(runBug_11133); FINALIZER(runClearTable); } +TESTCASE("Bug28443", + ""){ + INITIALIZER(runBug28443); + FINALIZER(runClearTable); +} NDBT_TESTSUITE_END(testNdbApi); int main(int argc, const char** argv){ diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index 51ee6d14f00..fffe1ac9046 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -606,6 +606,10 @@ max-time: 500 cmd: testNdbApi args: -n Bug_11133 T1 +max-time: 1000 +cmd: testNdbApi +args: -n BugBug28443 + #max-time: 500 #cmd: testInterpreter #args: T1 From a5731933a1877dafad0783a226460067bd91bb70 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 18 May 2007 11:06:03 +0200 Subject: [PATCH 19/21] ndb - bug#28443 review comment if some tcp-transporter has data, then do select with timeout 0 ndb/src/common/transporter/TransporterRegistry.cpp: review comment if some tcp-transporter has data, then do select with timeout 0 --- .../transporter/TransporterRegistry.cpp | 34 ++++++++++--------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/ndb/src/common/transporter/TransporterRegistry.cpp b/ndb/src/common/transporter/TransporterRegistry.cpp index c0ee93a8489..c459b5640dc 100644 --- a/ndb/src/common/transporter/TransporterRegistry.cpp +++ b/ndb/src/common/transporter/TransporterRegistry.cpp @@ -814,22 +814,6 @@ TransporterRegistry::poll_TCP(Uint32 timeOutMillis) return 0; } - struct timeval timeout; -#ifdef NDB_OSE - // Return directly if there are no TCP transporters configured - - if(timeOutMillis <= 1){ - timeout.tv_sec = 0; - timeout.tv_usec = 1025; - } else { - timeout.tv_sec = timeOutMillis / 1000; - timeout.tv_usec = (timeOutMillis % 1000) * 1000; - } -#else - timeout.tv_sec = timeOutMillis / 1000; - timeout.tv_usec = (timeOutMillis % 1000) * 1000; -#endif - NDB_SOCKET_TYPE maxSocketValue = -1; // Needed for TCP/IP connections @@ -855,6 +839,24 @@ TransporterRegistry::poll_TCP(Uint32 timeOutMillis) hasdata |= t->hasReceiveData(); } + timeOutMillis = hasdata ? 0 : timeOutMillis; + + struct timeval timeout; +#ifdef NDB_OSE + // Return directly if there are no TCP transporters configured + + if(timeOutMillis <= 1){ + timeout.tv_sec = 0; + timeout.tv_usec = 1025; + } else { + timeout.tv_sec = timeOutMillis / 1000; + timeout.tv_usec = (timeOutMillis % 1000) * 1000; + } +#else + timeout.tv_sec = timeOutMillis / 1000; + timeout.tv_usec = (timeOutMillis % 1000) * 1000; +#endif + // The highest socket value plus one maxSocketValue++; From deba58ee928000a03f51afe29b67652104fdc444 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 18 May 2007 11:34:57 +0200 Subject: [PATCH 20/21] ndb - bug#28443 review comment 2, atleast 1 signal need for test prg ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp: review comment, atleast 1 signal need --- ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index c5f1ba2575a..75a6117ce08 100644 --- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -175,7 +175,7 @@ void Cmvmi::execNDB_TAMPER(Signal* signal) } else { - MAX_RECEIVED_SIGNALS = rand() % 128; + MAX_RECEIVED_SIGNALS = 1 + (rand() % 128); } ndbout_c("MAX_RECEIVED_SIGNALS: %d", MAX_RECEIVED_SIGNALS); CLEAR_ERROR_INSERT_VALUE; From f77ba282516f37eb13d7cfede98700377652d70c Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 22 May 2007 17:53:07 +0200 Subject: [PATCH 21/21] Bug #28593 cluster backup scans in acc index order, bad for disk data - change to scan in tup and disk order (if applicable) storage/ndb/src/kernel/blocks/backup/Backup.cpp: tup scan for backup also, not only lcp storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp: choose disk order scan for disktables during backup + move bug workaround for 27776 from tup to lqh storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp: add missing jamEntry storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp: move bug workaround for 27776 from tup to lqh storage/ndb/src/kernel/blocks/pgman.cpp: missing jamEntry --- .../ndb/src/kernel/blocks/backup/Backup.cpp | 2 +- .../ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 29 +++++++++++++++++-- .../kernel/blocks/dbtup/DbtupDiskAlloc.cpp | 21 ++++++++++++-- .../ndb/src/kernel/blocks/dbtup/DbtupScan.cpp | 2 -- storage/ndb/src/kernel/blocks/pgman.cpp | 2 ++ 5 files changed, 47 insertions(+), 9 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.cpp b/storage/ndb/src/kernel/blocks/backup/Backup.cpp index a07617f0bfb..57082eaccc8 100644 --- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp @@ -3543,10 +3543,10 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal) ScanFragReq::setHoldLockFlag(req->requestInfo, 0); ScanFragReq::setKeyinfoFlag(req->requestInfo, 0); ScanFragReq::setAttrLen(req->requestInfo,attrLen); + ScanFragReq::setTupScanFlag(req->requestInfo, 1); if (ptr.p->is_lcp()) { ScanFragReq::setScanPrio(req->requestInfo, 1); - ScanFragReq::setTupScanFlag(req->requestInfo, 1); ScanFragReq::setNoDiskFlag(req->requestInfo, 1); ScanFragReq::setLcpScanFlag(req->requestInfo, 1); } diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index a359267f9d9..6c99e8d0e13 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -8476,9 +8476,32 @@ void Dblqh::continueAfterReceivingAllAiLab(Signal* signal) AccScanReq::setLockMode(req->requestInfo, scanptr.p->scanLockMode); AccScanReq::setReadCommittedFlag(req->requestInfo, scanptr.p->readCommitted); AccScanReq::setDescendingFlag(req->requestInfo, scanptr.p->descending); - AccScanReq::setNoDiskScanFlag(req->requestInfo, - !tcConnectptr.p->m_disk_table); - AccScanReq::setLcpScanFlag(req->requestInfo, scanptr.p->lcpScan); + + if (refToBlock(tcConnectptr.p->clientBlockref) == BACKUP) + { + if (scanptr.p->lcpScan) + { + AccScanReq::setNoDiskScanFlag(req->requestInfo, 1); + AccScanReq::setLcpScanFlag(req->requestInfo, 1); + } + else + { + /* If backup scan disktables in disk order */ + AccScanReq::setNoDiskScanFlag(req->requestInfo, + !tcConnectptr.p->m_disk_table); + AccScanReq::setLcpScanFlag(req->requestInfo, 0); + } + } + else + { +#if BUG_27776_FIXED + AccScanReq::setNoDiskScanFlag(req->requestInfo, + !tcConnectptr.p->m_disk_table); +#else + AccScanReq::setNoDiskScanFlag(req->requestInfo, 1); +#endif + AccScanReq::setLcpScanFlag(req->requestInfo, 0); + } req->transId1 = tcConnectptr.p->transid[0]; req->transId2 = tcConnectptr.p->transid[1]; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp index 54abbf18664..db336df6652 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp @@ -318,6 +318,7 @@ Dbtup::restart_setup_page(Disk_alloc_info& alloc, PagePtr pagePtr) unsigned uncommitted, committed; uncommitted = committed = ~(unsigned)0; int ret = tsman.get_page_free_bits(&page, &uncommitted, &committed); + jamEntry(); idx = alloc.calc_page_free_bits(real_free); ddassert(idx == committed); @@ -428,12 +429,12 @@ Dbtup::disk_page_prealloc(Signal* signal, c_extent_pool.getPtr(ext); if ((pageBits= tsman.alloc_page_from_extent(&ext.p->m_key, bits)) >= 0) { - jam(); + jamEntry(); found= true; } else { - jam(); + jamEntry(); /** * The current extent is not in a free list * and since it couldn't accomadate the request @@ -490,7 +491,7 @@ Dbtup::disk_page_prealloc(Signal* signal, if ((err= tsman.alloc_extent(&ext.p->m_key)) < 0) { - jam(); + jamEntry(); #if NOT_YET_UNDO_ALLOC_EXTENT c_lgman->free_log_space(logfile_group_id, sizeof(Disk_undo::AllocExtent)>>2); @@ -542,6 +543,7 @@ Dbtup::disk_page_prealloc(Signal* signal, alloc.m_curr_extent_info_ptr_i= ext.i; ext.p->m_free_matrix_pos= RNIL; pageBits= tsman.alloc_page_from_extent(&ext.p->m_key, bits); + jamEntry(); ddassert(pageBits >= 0); } @@ -601,6 +603,7 @@ Dbtup::disk_page_prealloc(Signal* signal, } int res= m_pgman.get_page(signal, preq, flags); + jamEntry(); switch(res) { case 0: @@ -900,6 +903,7 @@ Dbtup::disk_page_set_dirty(PagePtr pagePtr) // Make sure no one will allocate it... tsman.unmap_page(&key, MAX_FREE_LIST - 1); + jamEntry(); } void @@ -951,6 +955,7 @@ Dbtup::disk_page_unmap_callback(Uint32 page_id, Uint32 dirty_count) fragPtr.p->m_tablespace_id); tsman.unmap_page(&key, idx); + jamEntry(); pagePtr.p->list_index = idx | 0x8000; } @@ -999,6 +1004,7 @@ Dbtup::disk_page_alloc(Signal* signal, fragPtrP->m_tablespace_id); tsman.update_page_free_bits(key, new_bits, lsn); + jamEntry(); } } @@ -1051,6 +1057,7 @@ Dbtup::disk_page_free(Signal *signal, fragPtrP->m_tablespace_id); tsman.update_page_free_bits(key, new_bits, lsn); + jamEntry(); } Uint32 ext = pagePtr.p->m_extent_info_ptr; @@ -1104,6 +1111,7 @@ Dbtup::disk_page_abort_prealloc(Signal *signal, Fragrecord* fragPtrP, memcpy(&req.m_page, key, sizeof(Local_key)); int res= m_pgman.get_page(signal, req, flags); + jamEntry(); switch(res) { case 0: @@ -1232,6 +1240,7 @@ Dbtup::disk_page_alloc_extent_log_buffer_callback(Signal* signal, Uint64 lsn= lgman.add_entry(c, 1); tsman.update_lsn(&key, lsn); + jamEntry(); } #endif @@ -1250,6 +1259,7 @@ Dbtup::disk_page_undo_alloc(Page* page, const Local_key* key, Uint64 lsn= lgman.add_entry(c, 1); m_pgman.update_lsn(* key, lsn); + jamEntry(); return lsn; } @@ -1279,6 +1289,7 @@ Dbtup::disk_page_undo_update(Page* page, const Local_key* key, Uint64 lsn= lgman.add_entry(c, 3); m_pgman.update_lsn(* key, lsn); + jamEntry(); return lsn; } @@ -1308,6 +1319,7 @@ Dbtup::disk_page_undo_free(Page* page, const Local_key* key, Uint64 lsn= lgman.add_entry(c, 3); m_pgman.update_lsn(* key, lsn); + jamEntry(); return lsn; } @@ -1402,6 +1414,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn, int flags = 0; int res= m_pgman.get_page(signal, preq, flags); + jamEntry(); switch(res) { case 0: @@ -1545,6 +1558,7 @@ Dbtup::disk_restart_undo_callback(Signal* signal, lsn = undo->m_lsn - 1; // make sure undo isn't run again... m_pgman.update_lsn(undo->m_key, lsn); + jamEntry(); } else if (DBG_UNDO) { @@ -1637,6 +1651,7 @@ Dbtup::disk_restart_undo_page_bits(Signal* signal, Apply_undo* undo) fragPtrP->m_tablespace_id); tsman.restart_undo_page_free_bits(&undo->m_key, new_bits, undo->m_lsn, lsn); + jamEntry(); } int diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp index 04e60edfb2e..eb9b9c7acc2 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp @@ -62,13 +62,11 @@ Dbtup::execACC_SCANREQ(Signal* signal) break; } -#if BUG_27776_FIXED if (!AccScanReq::getNoDiskScanFlag(req->requestInfo) && tablePtr.p->m_no_of_disk_attributes) { bits |= ScanOp::SCAN_DD; } -#endif bool mm = (bits & ScanOp::SCAN_DD); if (tablePtr.p->m_attributes[mm].m_no_of_varsize > 0) { diff --git a/storage/ndb/src/kernel/blocks/pgman.cpp b/storage/ndb/src/kernel/blocks/pgman.cpp index af648c71253..719b60fa466 100644 --- a/storage/ndb/src/kernel/blocks/pgman.cpp +++ b/storage/ndb/src/kernel/blocks/pgman.cpp @@ -1561,6 +1561,7 @@ Pgman::execFSWRITEREF(Signal* signal) int Pgman::get_page(Signal* signal, Ptr ptr, Page_request page_req) { + jamEntry(); #ifdef VM_TRACE Ptr tmp = { &page_req, RNIL}; debugOut << "PGMAN: >get_page" << endl; @@ -1708,6 +1709,7 @@ Pgman::get_page(Signal* signal, Ptr ptr, Page_request page_req) void Pgman::update_lsn(Ptr ptr, Uint32 block, Uint64 lsn) { + jamEntry(); #ifdef VM_TRACE const char* bname = getBlockName(block, "?"); debugOut << "PGMAN: >update_lsn: block=" << bname << " lsn=" << lsn << endl;