From 5edab966ee59eb5610d6b712d0df9b98ac46fd1a Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 12 Oct 2006 10:31:27 +0800 Subject: [PATCH 01/15] fixed for BUG#21519, 'MySQLD Err msg "Management server closed connection" needs date & time stamp'. added date & time stamp for the messages that will be printed to the mysqld message file. storage/ndb/src/common/transporter/OSE_Receiver.cpp: added date & time stamp for the messages. storage/ndb/src/common/transporter/TCP_Transporter.cpp: added date & time stamp for the messages. storage/ndb/src/common/transporter/TransporterRegistry.cpp: added date & time stamp for the messages. --- .../src/common/transporter/OSE_Receiver.cpp | 12 ++++----- .../common/transporter/TCP_Transporter.cpp | 25 +++++++++++-------- .../transporter/TransporterRegistry.cpp | 24 +++++++++--------- 3 files changed, 32 insertions(+), 29 deletions(-) diff --git a/storage/ndb/src/common/transporter/OSE_Receiver.cpp b/storage/ndb/src/common/transporter/OSE_Receiver.cpp index 63a33fc8f24..cc82585cb25 100644 --- a/storage/ndb/src/common/transporter/OSE_Receiver.cpp +++ b/storage/ndb/src/common/transporter/OSE_Receiver.cpp @@ -114,11 +114,11 @@ OSE_Receiver::checkWaitStack(NodeId _nodeId){ if (waitStack[i]->dataSignal.senderNodeId == _nodeId && waitStack[i]->dataSignal.sigId == nextSigId[_nodeId]){ - ndbout_c("INFO: signal popped from waitStack, sigId = %d", + g_eventLogger.info("signal popped from waitStack, sigId = %d", waitStack[i]->dataSignal.sigId); if(isFull()){ - ndbout_c("ERROR: receiveBuffer is full"); + g_eventLogger.error("receiveBuffer is full"); reportError(callbackObj, _nodeId, TE_RECEIVE_BUFFER_FULL); return false; } @@ -172,7 +172,7 @@ OSE_Receiver::insertWaitStack(union SIGNAL* _sig){ waitStack[waitStackCount] = _sig; waitStackCount++; } else { - ndbout_c("ERROR: waitStack is full"); + g_eventLogger.error("waitStack is full"); reportError(callbackObj, localNodeId, TE_WAIT_STACK_FULL); } } @@ -231,14 +231,14 @@ OSE_Receiver::doReceive(Uint32 timeOutMillis) { } else { // Signal was not received in correct order // Check values and put it in the waitStack - ndbout_c("WARNING: sigId out of order," + g_eventLogger.warning("sigId out of order," " currSigId = %d, nextSigId = %d", currSigId, nextSigId[nodeId]); if (currSigId < nextSigId[nodeId]){ // Current recieved sigId was smaller than nextSigId // There is no use to put it in the waitStack - ndbout_c("ERROR: recieved sigId was smaller than nextSigId"); + g_eventLogger.error("recieved sigId was smaller than nextSigId"); reportError(callbackObj, nodeId, TE_TOO_SMALL_SIGID); return false; } @@ -246,7 +246,7 @@ OSE_Receiver::doReceive(Uint32 timeOutMillis) { if (currSigId > (nextSigId[nodeId] + waitStackSize)){ // Current sigId was larger than nextSigId + size of waitStack // we can never "save" so many signal's on the stack - ndbout_c("ERROR: currSigId > (nextSigId + size of waitStack)"); + g_eventLogger.error("currSigId > (nextSigId + size of waitStack)"); reportError(callbackObj, nodeId, TE_TOO_LARGE_SIGID); return false; } diff --git a/storage/ndb/src/common/transporter/TCP_Transporter.cpp b/storage/ndb/src/common/transporter/TCP_Transporter.cpp index 5db12d3985c..c347b813840 100644 --- a/storage/ndb/src/common/transporter/TCP_Transporter.cpp +++ b/storage/ndb/src/common/transporter/TCP_Transporter.cpp @@ -20,6 +20,9 @@ #include "TCP_Transporter.hpp" #include #include + +#include +extern EventLogger g_eventLogger; // End of stuff to be moved #if defined NDB_OSE || defined NDB_SOFTOSE @@ -159,14 +162,14 @@ TCP_Transporter::setSocketOptions(){ if (setsockopt(theSocket, SOL_SOCKET, SO_RCVBUF, (char*)&sockOptRcvBufSize, sizeof(sockOptRcvBufSize)) < 0) { #ifdef DEBUG_TRANSPORTER - ndbout_c("The setsockopt SO_RCVBUF error code = %d", InetErrno); + g_eventLogger.error("The setsockopt SO_RCVBUF error code = %d", InetErrno); #endif }//if if (setsockopt(theSocket, SOL_SOCKET, SO_SNDBUF, (char*)&sockOptSndBufSize, sizeof(sockOptSndBufSize)) < 0) { #ifdef DEBUG_TRANSPORTER - ndbout_c("The setsockopt SO_SNDBUF error code = %d", InetErrno); + g_eventLogger.error("The setsockopt SO_SNDBUF error code = %d", InetErrno); #endif }//if @@ -177,7 +180,7 @@ TCP_Transporter::setSocketOptions(){ if (setsockopt(theSocket, IPPROTO_TCP, TCP_NODELAY, (char*)&sockOptNodelay, sizeof(sockOptNodelay)) < 0) { #ifdef DEBUG_TRANSPORTER - ndbout_c("The setsockopt TCP_NODELAY error code = %d", InetErrno); + g_eventLogger.error("The setsockopt TCP_NODELAY error code = %d", InetErrno); #endif }//if } @@ -191,7 +194,7 @@ TCP_Transporter::setSocketNonBlocking(NDB_SOCKET_TYPE socket){ if(ioctlsocket(socket, FIONBIO, &ul)) { #ifdef DEBUG_TRANSPORTER - ndbout_c("Set non-blocking server error3: %d", InetErrno); + g_eventLogger.error("Set non-blocking server error3: %d", InetErrno); #endif }//if return true; @@ -205,13 +208,13 @@ TCP_Transporter::setSocketNonBlocking(NDB_SOCKET_TYPE socket){ flags = fcntl(socket, F_GETFL, 0); if (flags < 0) { #ifdef DEBUG_TRANSPORTER - ndbout_c("Set non-blocking server error1: %s", strerror(InetErrno)); + g_eventLogger.error("Set non-blocking server error1: %s", strerror(InetErrno)); #endif }//if flags |= NDB_NONBLOCK; if (fcntl(socket, F_SETFL, flags) == -1) { #ifdef DEBUG_TRANSPORTER - ndbout_c("Set non-blocking server error2: %s", strerror(InetErrno)); + g_eventLogger.error("Set non-blocking server error2: %s", strerror(InetErrno)); #endif }//if return true; @@ -349,7 +352,7 @@ TCP_Transporter::doSend() { } else { // Send failed #if defined DEBUG_TRANSPORTER - ndbout_c("Send Failure(disconnect==%d) to node = %d nBytesSent = %d " + g_eventLogger.error("Send Failure(disconnect==%d) to node = %d nBytesSent = %d " "errno = %d strerror = %s", DISCONNECT_ERRNO(InetErrno, nBytesSent), remoteNodeId, nBytesSent, InetErrno, @@ -384,11 +387,11 @@ TCP_Transporter::doReceive() { if(receiveBuffer.sizeOfData > receiveBuffer.sizeOfBuffer){ #ifdef DEBUG_TRANSPORTER - ndbout_c("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)", + g_eventLogger.error("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)", receiveBuffer.sizeOfData, receiveBuffer.sizeOfBuffer); - ndbout_c("nBytesRead = %d", nBytesRead); + g_eventLogger.error("nBytesRead = %d", nBytesRead); #endif - ndbout_c("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)", + g_eventLogger.error("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)", receiveBuffer.sizeOfData, receiveBuffer.sizeOfBuffer); report_error(TE_INVALID_MESSAGE_LENGTH); return 0; @@ -405,7 +408,7 @@ TCP_Transporter::doReceive() { return nBytesRead; } else { #if defined DEBUG_TRANSPORTER - ndbout_c("Receive Failure(disconnect==%d) to node = %d nBytesSent = %d " + g_eventLogger.error("Receive Failure(disconnect==%d) to node = %d nBytesSent = %d " "errno = %d strerror = %s", DISCONNECT_ERRNO(InetErrno, nBytesRead), remoteNodeId, nBytesRead, InetErrno, diff --git a/storage/ndb/src/common/transporter/TransporterRegistry.cpp b/storage/ndb/src/common/transporter/TransporterRegistry.cpp index 4a0be702a86..395f69ede73 100644 --- a/storage/ndb/src/common/transporter/TransporterRegistry.cpp +++ b/storage/ndb/src/common/transporter/TransporterRegistry.cpp @@ -891,7 +891,7 @@ TransporterRegistry::poll_TCP(Uint32 timeOutMillis) tcpReadSelectReply = select(maxSocketValue, &tcpReadset, 0, 0, &timeout); if(false && tcpReadSelectReply == -1 && errno == EINTR) - ndbout_c("woke-up by signal"); + g_eventLogger.info("woke-up by signal"); #ifdef NDB_WIN32 if(tcpReadSelectReply == SOCKET_ERROR) @@ -1313,12 +1313,12 @@ TransporterRegistry::start_clients_thread() } else if(ndb_mgm_is_connected(m_mgm_handle)) { - ndbout_c("Failed to get dynamic port to connect to: %d", res); + g_eventLogger.info("Failed to get dynamic port to connect to: %d", res); ndb_mgm_disconnect(m_mgm_handle); } else { - ndbout_c("Management server closed connection early. " + g_eventLogger.info("Management server closed connection early. " "It is probably being shut down (or has problems). " "We will retry the connection."); } @@ -1416,7 +1416,7 @@ TransporterRegistry::start_service(SocketServer& socket_server) DBUG_ENTER("TransporterRegistry::start_service"); if (m_transporter_interface.size() > 0 && !nodeIdSpecified) { - ndbout_c("TransporterRegistry::startReceiving: localNodeId not specified"); + g_eventLogger.error("TransporterRegistry::startReceiving: localNodeId not specified"); DBUG_RETURN(false); } @@ -1442,7 +1442,7 @@ TransporterRegistry::start_service(SocketServer& socket_server) * If it wasn't a dynamically allocated port, or * our attempts at getting a new dynamic port failed */ - ndbout_c("Unable to setup transporter service port: %s:%d!\n" + g_eventLogger.error("Unable to setup transporter service port: %s:%d!\n" "Please check if the port is already used,\n" "(perhaps the node is already running)", t.m_interface ? t.m_interface : "*", t.m_s_service_port); @@ -1575,13 +1575,13 @@ bool TransporterRegistry::connect_client(NdbMgmHandle *h) if(!mgm_nodeid) { - ndbout_c("%s: %d", __FILE__, __LINE__); + g_eventLogger.error("%s: %d", __FILE__, __LINE__); return false; } Transporter * t = theTransporters[mgm_nodeid]; if (!t) { - ndbout_c("%s: %d", __FILE__, __LINE__); + g_eventLogger.error("%s: %d", __FILE__, __LINE__); return false; } DBUG_RETURN(t->connect_client(connect_ndb_mgmd(h))); @@ -1597,7 +1597,7 @@ NDB_SOCKET_TYPE TransporterRegistry::connect_ndb_mgmd(NdbMgmHandle *h) if ( h==NULL || *h == NULL ) { - ndbout_c("%s: %d", __FILE__, __LINE__); + g_eventLogger.error("%s: %d", __FILE__, __LINE__); return NDB_INVALID_SOCKET; } @@ -1610,10 +1610,10 @@ NDB_SOCKET_TYPE TransporterRegistry::connect_ndb_mgmd(NdbMgmHandle *h) m_transporter_interface[i].m_s_service_port, &mgm_reply) < 0) { - ndbout_c("Error: %s: %d", + g_eventLogger.error("Error: %s: %d", ndb_mgm_get_latest_error_desc(*h), ndb_mgm_get_latest_error(*h)); - ndbout_c("%s: %d", __FILE__, __LINE__); + g_eventLogger.error("%s: %d", __FILE__, __LINE__); ndb_mgm_destroy_handle(h); return NDB_INVALID_SOCKET; } @@ -1625,10 +1625,10 @@ NDB_SOCKET_TYPE TransporterRegistry::connect_ndb_mgmd(NdbMgmHandle *h) NDB_SOCKET_TYPE sockfd= ndb_mgm_convert_to_transporter(h); if ( sockfd == NDB_INVALID_SOCKET) { - ndbout_c("Error: %s: %d", + g_eventLogger.error("Error: %s: %d", ndb_mgm_get_latest_error_desc(*h), ndb_mgm_get_latest_error(*h)); - ndbout_c("%s: %d", __FILE__, __LINE__); + g_eventLogger.error("%s: %d", __FILE__, __LINE__); ndb_mgm_destroy_handle(h); } return sockfd; From 3def506b9db15adfecb2d7745f505e4131089692 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 13 Oct 2006 12:48:05 +0800 Subject: [PATCH 02/15] ndb - fixed for BUG#15021, binlog_index table become inconsistent if errors during purge of binlogs. if EMFILE error occured while purging binary logs, stop purging logs and report error message to user. mysys/my_open.c: report EMFILE error when opening file failed. sql/log.cc: report EMFILE error when purging logs, and stop purging logs when EMFILE error occured. sql/log.h: added LOG_INFO_EMFILE error number. sql/share/errmsg.txt: added EMFILE error message for purging binary logs. sql/sql_repl.cc: added EMFILE error message. sql/table.cc: report EMFILE error. --- mysys/my_open.c | 12 ++++++++++-- sql/log.cc | 12 ++++++++++++ sql/log.h | 2 ++ sql/share/errmsg.txt | 3 ++- sql/sql_repl.cc | 1 + sql/table.cc | 11 +++++++++++ 6 files changed, 38 insertions(+), 3 deletions(-) diff --git a/mysys/my_open.c b/mysys/my_open.c index ab2f7c9ff27..a023a5ebe63 100644 --- a/mysys/my_open.c +++ b/mysys/my_open.c @@ -167,9 +167,17 @@ File my_register_filename(File fd, const char *FileName, enum file_type else my_errno=errno; DBUG_PRINT("error",("Got error %d on open",my_errno)); - if (MyFlags & (MY_FFNF | MY_FAE | MY_WME)) - my_error(error_message_number, MYF(ME_BELL+ME_WAITTANG), + if (MyFlags & (MY_FFNF | MY_FAE | MY_WME)) { + if (my_errno == EMFILE) { + DBUG_PRINT("error",("print err: %d",EE_OUT_OF_FILERESOURCES)); + my_error(EE_OUT_OF_FILERESOURCES, MYF(ME_BELL+ME_WAITTANG), FileName, my_errno); + } else { + DBUG_PRINT("error",("print err: %d",error_message_number)); + my_error(error_message_number, MYF(ME_BELL+ME_WAITTANG), + FileName, my_errno); + } + } return(fd); } diff --git a/sql/log.cc b/sql/log.cc index b63ec563baf..7b6724e7a90 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -2687,6 +2687,7 @@ int MYSQL_BIN_LOG::purge_logs(const char *to_log, ulonglong *decrease_log_space) { int error; + int ret = 0; bool exit_loop= 0; LOG_INFO log_info; DBUG_ENTER("purge_logs"); @@ -2731,6 +2732,14 @@ int MYSQL_BIN_LOG::purge_logs(const char *to_log, *decrease_log_space-= file_size; ha_binlog_index_purge_file(current_thd, log_info.log_file_name); + if (current_thd->query_error) { + DBUG_PRINT("info",("query error: %d", current_thd->query_error)); + if (my_errno == EMFILE) { + DBUG_PRINT("info",("my_errno: %d, set ret = LOG_INFO_EMFILE", my_errno)); + ret = LOG_INFO_EMFILE; + break; + } + } if (find_next_log(&log_info, 0) || exit_loop) break; @@ -2741,6 +2750,9 @@ int MYSQL_BIN_LOG::purge_logs(const char *to_log, the log index file after restart - otherwise, this should be safe */ error= update_log_index(&log_info, need_update_threads); + if (error == 0) { + error = ret; + } err: if (need_mutex) diff --git a/sql/log.h b/sql/log.h index 8f75601f02b..7e739f4a9fb 100644 --- a/sql/log.h +++ b/sql/log.h @@ -114,6 +114,8 @@ extern TC_LOG_DUMMY tc_log_dummy; #define LOG_INFO_MEM -6 #define LOG_INFO_FATAL -7 #define LOG_INFO_IN_USE -8 +#define LOG_INFO_EMFILE -9 + /* bitmap to SQL_LOG::close() */ #define LOG_CLOSE_INDEX 1 diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt index a34e8c152cf..f9fb23c2191 100644 --- a/sql/share/errmsg.txt +++ b/sql/share/errmsg.txt @@ -6001,4 +6001,5 @@ ER_BAD_LOG_STATEMENT ger "Sie können eine Logtabelle nicht '%s', wenn Loggen angeschaltet ist" ER_NON_INSERTABLE_TABLE eng "The target table %-.100s of the %s is not insertable-into" - +ER_BINLOG_PURGE_EMFILE + eng "Too many files opened, please execute the command again" diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 52489087b02..c281f00184e 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -239,6 +239,7 @@ bool purge_error_message(THD* thd, int res) case LOG_INFO_MEM: errmsg= ER_OUT_OF_RESOURCES; break; case LOG_INFO_FATAL: errmsg= ER_BINLOG_PURGE_FATAL_ERR; break; case LOG_INFO_IN_USE: errmsg= ER_LOG_IN_USE; break; + case LOG_INFO_EMFILE: errmsg= ER_BINLOG_PURGE_EMFILE; break; default: errmsg= ER_LOG_PURGE_UNKNOWN_ERR; break; } diff --git a/sql/table.cc b/sql/table.cc index 5b41ad48696..fb05162061e 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1562,6 +1562,17 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias, error= 1; my_errno= ENOENT; } + else if (ha_err == EMFILE) + { + /* + Too many files opened, use same error message as if the .frm + file can't open + */ + DBUG_PRINT("error", ("open file: %s failed, too many files opened (errno: %d)", + share->normalized_path.str, ha_err)); + error= 1; + my_errno= EMFILE; + } else { outparam->file->print_error(ha_err, MYF(0)); From e1a122509d7a892ddbe205e3a80b7319694c447d Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 18 Oct 2006 11:06:35 +0800 Subject: [PATCH 03/15] add a new mgmapi ndb_mgm_get_db_parameter_info, which retrieve parameter's name storage/ndb/include/mgmapi/mgmapi.h: add a new function declaration storage/ndb/src/mgmapi/Makefile.am: add a link of a new file ParamInfo.cpp which locate in mgmsrv dir storage/ndb/src/mgmapi/mgmapi_configuration.cpp: add a new mgmapi function ndb_mgm_get_db_parameter_info storage/ndb/src/mgmsrv/ParamInfo.cpp: New BitKeeper file ``storage/ndb/src/mgmsrv/ParamInfo.cpp'' initialization information of all parameters, this copy from mgmsrv/ConfigInfo.cpp storage/ndb/src/mgmsrv/ParamInfo.hpp: New BitKeeper file ``storage/ndb/src/mgmsrv/ParamInfo.hpp'' --- storage/ndb/include/mgmapi/mgmapi.h | 8 + storage/ndb/src/mgmapi/Makefile.am | 2 +- .../ndb/src/mgmapi/mgmapi_configuration.cpp | 38 + storage/ndb/src/mgmsrv/ParamInfo.cpp | 2113 +++++++++++++++++ storage/ndb/src/mgmsrv/ParamInfo.hpp | 44 + 5 files changed, 2204 insertions(+), 1 deletion(-) create mode 100644 storage/ndb/src/mgmsrv/ParamInfo.cpp create mode 100644 storage/ndb/src/mgmsrv/ParamInfo.hpp diff --git a/storage/ndb/include/mgmapi/mgmapi.h b/storage/ndb/include/mgmapi/mgmapi.h index b6b87ebaaa9..87b67327efc 100644 --- a/storage/ndb/include/mgmapi/mgmapi.h +++ b/storage/ndb/include/mgmapi/mgmapi.h @@ -1133,6 +1133,14 @@ extern "C" { int ndb_mgm_check_connection(NdbMgmHandle handle); int ndb_mgm_report_event(NdbMgmHandle handle, Uint32 *data, Uint32 length); + + struct ndb_mgm_param_info + { + Uint32 m_id; + const char * m_name; + }; + int ndb_mgm_get_db_parameter_info(Uint32 paramId, struct ndb_mgm_param_info * info, + size_t * size); #endif #ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED diff --git a/storage/ndb/src/mgmapi/Makefile.am b/storage/ndb/src/mgmapi/Makefile.am index d04be9f16a4..c0e0c396537 100644 --- a/storage/ndb/src/mgmapi/Makefile.am +++ b/storage/ndb/src/mgmapi/Makefile.am @@ -1,7 +1,7 @@ noinst_LTLIBRARIES = libmgmapi.la -libmgmapi_la_SOURCES = mgmapi.cpp ndb_logevent.cpp mgmapi_configuration.cpp LocalConfig.cpp ../kernel/error/ndbd_exit_codes.c +libmgmapi_la_SOURCES = mgmapi.cpp ndb_logevent.cpp mgmapi_configuration.cpp LocalConfig.cpp ../kernel/error/ndbd_exit_codes.c ../mgmsrv/ParamInfo.cpp INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/include/mgmapi diff --git a/storage/ndb/src/mgmapi/mgmapi_configuration.cpp b/storage/ndb/src/mgmapi/mgmapi_configuration.cpp index 80ab428c05a..86dbbe8dc04 100644 --- a/storage/ndb/src/mgmapi/mgmapi_configuration.cpp +++ b/storage/ndb/src/mgmapi/mgmapi_configuration.cpp @@ -1,6 +1,10 @@ #include #include #include "mgmapi_configuration.hpp" +#include "../mgmsrv/ParamInfo.hpp" + +extern const ParamInfo ParamInfoArray[]; +extern const int ParamInfoNum; ndb_mgm_configuration_iterator::ndb_mgm_configuration_iterator (const ndb_mgm_configuration & conf, unsigned type_of_section) @@ -155,3 +159,37 @@ ndb_mgm_find(ndb_mgm_configuration_iterator* iter, int param, unsigned search){ return iter->find(param, search); } + +/** + * Retrieve information about parameter + * @param info : in - pointer to structure allocated by caller + * @param size : in/out : pointer to int initialized to sizeof(ndb_mgm_param_info)...will be set to bytes set by function on return +*/ +extern "C" +int +ndb_mgm_get_db_parameter_info(Uint32 paramId, struct ndb_mgm_param_info * info, size_t * size) { + if ( paramId == 0 ) { + return -1; + } + + for (int i = 0; i < ParamInfoNum; i++) { + if (paramId == ParamInfoArray[i]._paramId && strcmp(DB_TOKEN, ParamInfoArray[i]._section) == 0) { + size_t tmp = 0; + if (tmp + sizeof(info->m_id) <= *size) + { + info->m_id = ParamInfoArray[i]._paramId; + tmp += sizeof(info->m_id); + } + + if (tmp + sizeof(info->m_name) <= *size) + { + info->m_name = ParamInfoArray[i]._fname; + tmp += sizeof(info->m_name); + } + + *size = tmp; + return 0; + } + } + return -1; +} diff --git a/storage/ndb/src/mgmsrv/ParamInfo.cpp b/storage/ndb/src/mgmsrv/ParamInfo.cpp new file mode 100644 index 00000000000..aabab435d77 --- /dev/null +++ b/storage/ndb/src/mgmsrv/ParamInfo.cpp @@ -0,0 +1,2113 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include +#include <../../include/kernel/ndb_limits.h> +#include "ParamInfo.hpp" +#include + +#ifndef MYSQLCLUSTERDIR +#define MYSQLCLUSTERDIR "." +#endif + +#define KEY_INTERNAL 0 +#define MAX_INT_RNIL 0xfffffeff +#define MAX_PORT_NO 65535 + +#define _STR_VALUE(x) #x +#define STR_VALUE(x) _STR_VALUE(x) + +/**************************************************************************** + * Section names + ****************************************************************************/ +#define DB_TOKEN_PRINT "ndbd(DB)" +#define MGM_TOKEN_PRINT "ndb_mgmd(MGM)" +#define API_TOKEN_PRINT "mysqld(API)" + +/** + * A MANDATORY parameters must be specified in the config file + * An UNDEFINED parameter may or may not be specified in the config file + */ +static const char* MANDATORY = (char*)~(UintPtr)0;// Default value for mandatory params. +static const char* UNDEFINED = 0; // Default value for undefined params. + +extern const ParamInfo ParamInfoArray[]; +extern const int ParamInfoNum; + +/** + * The default constructors create objects with suitable values for the + * configuration parameters. + * + * Some are however given the value MANDATORY which means that the value + * must be specified in the configuration file. + * + * Min and max values are also given for some parameters. + * - Attr1: Name in file (initial config file) + * - Attr2: Name in prop (properties object) + * - Attr3: Name of Section (in init config file) + * - Attr4: Updateable + * - Attr5: Type of parameter (INT or BOOL) + * - Attr6: Default Value (number only) + * - Attr7: Min value + * - Attr8: Max value + * + * Parameter constraints are coded in file Config.cpp. + * + * ******************************************************************* + * Parameters used under development should be marked "NOTIMPLEMENTED" + * ******************************************************************* + */ +const ParamInfo ParamInfoArray[] = { + + /**************************************************************************** + * COMPUTER + ***************************************************************************/ + { + KEY_INTERNAL, + "COMPUTER", + "COMPUTER", + "Computer section", + CI_INTERNAL, + false, + CI_SECTION, + 0, + 0, 0 }, + + { + KEY_INTERNAL, + "Id", + "COMPUTER", + "Name of computer", + CI_USED, + false, + CI_STRING, + MANDATORY, + 0, 0 }, + + { + KEY_INTERNAL, + "HostName", + "COMPUTER", + "Hostname of computer (e.g. mysql.com)", + CI_USED, + false, + CI_STRING, + MANDATORY, + 0, 0 }, + + { + KEY_INTERNAL, + "ByteOrder", + "COMPUTER", + 0, + CI_DEPRICATED, + false, + CI_STRING, + UNDEFINED, + 0, + 0 }, + + /**************************************************************************** + * SYSTEM + ***************************************************************************/ + { + CFG_SECTION_SYSTEM, + "SYSTEM", + "SYSTEM", + "System section", + CI_USED, + false, + CI_SECTION, + (const char *)CFG_SECTION_SYSTEM, + 0, 0 }, + + { + CFG_SYS_NAME, + "Name", + "SYSTEM", + "Name of system (NDB Cluster)", + CI_USED, + false, + CI_STRING, + MANDATORY, + 0, 0 }, + + { + CFG_SYS_PRIMARY_MGM_NODE, + "PrimaryMGMNode", + "SYSTEM", + "Node id of Primary "MGM_TOKEN_PRINT" node", + CI_USED, + false, + CI_INT, + "0", + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_SYS_CONFIG_GENERATION, + "ConfigGenerationNumber", + "SYSTEM", + "Configuration generation number", + CI_USED, + false, + CI_INT, + "0", + "0", + STR_VALUE(MAX_INT_RNIL) }, + + /*************************************************************************** + * DB + ***************************************************************************/ + { + CFG_SECTION_NODE, + DB_TOKEN, + DB_TOKEN, + "Node section", + CI_USED, + false, + CI_SECTION, + (const char *)NODE_TYPE_DB, + 0, 0 + }, + + { + CFG_NODE_HOST, + "HostName", + DB_TOKEN, + "Name of computer for this node", + CI_INTERNAL, + false, + CI_STRING, + "localhost", + 0, 0 }, + + { + CFG_NODE_SYSTEM, + "System", + DB_TOKEN, + "Name of system for this node", + CI_INTERNAL, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + { + KEY_INTERNAL, + "Id", + DB_TOKEN, + "", + CI_DEPRICATED, + false, + CI_INT, + MANDATORY, + "1", + STR_VALUE(MAX_NODES) }, + + { + CFG_NODE_ID, + "NodeId", + DB_TOKEN, + "Number identifying the database node ("DB_TOKEN_PRINT")", + CI_USED, + false, + CI_INT, + MANDATORY, + "1", + STR_VALUE(MAX_NODES) }, + + { + KEY_INTERNAL, + "ServerPort", + DB_TOKEN, + "Port used to setup transporter", + CI_USED, + false, + CI_INT, + UNDEFINED, + "1", + STR_VALUE(MAX_PORT_NO) }, + + { + CFG_DB_NO_REPLICAS, + "NoOfReplicas", + DB_TOKEN, + "Number of copies of all data in the database (1-4)", + CI_USED, + false, + CI_INT, + MANDATORY, + "1", + "4" }, + + { + CFG_DB_NO_ATTRIBUTES, + "MaxNoOfAttributes", + DB_TOKEN, + "Total number of attributes stored in database. I.e. sum over all tables", + CI_USED, + false, + CI_INT, + "1000", + "32", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_NO_TABLES, + "MaxNoOfTables", + DB_TOKEN, + "Total number of tables stored in the database", + CI_USED, + false, + CI_INT, + "128", + "8", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_NO_ORDERED_INDEXES, + "MaxNoOfOrderedIndexes", + DB_TOKEN, + "Total number of ordered indexes that can be defined in the system", + CI_USED, + false, + CI_INT, + "128", + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_NO_UNIQUE_HASH_INDEXES, + "MaxNoOfUniqueHashIndexes", + DB_TOKEN, + "Total number of unique hash indexes that can be defined in the system", + CI_USED, + false, + CI_INT, + "64", + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_NO_INDEXES, + "MaxNoOfIndexes", + DB_TOKEN, + "Total number of indexes that can be defined in the system", + CI_DEPRICATED, + false, + CI_INT, + "128", + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_NO_INDEX_OPS, + "MaxNoOfConcurrentIndexOperations", + DB_TOKEN, + "Total number of index operations that can execute simultaneously on one "DB_TOKEN_PRINT" node", + CI_USED, + false, + CI_INT, + "8K", + "0", + STR_VALUE(MAX_INT_RNIL) + }, + + { + CFG_DB_NO_TRIGGERS, + "MaxNoOfTriggers", + DB_TOKEN, + "Total number of triggers that can be defined in the system", + CI_USED, + false, + CI_INT, + "768", + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_NO_TRIGGER_OPS, + "MaxNoOfFiredTriggers", + DB_TOKEN, + "Total number of triggers that can fire simultaneously in one "DB_TOKEN_PRINT" node", + CI_USED, + false, + CI_INT, + "4000", + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + KEY_INTERNAL, + "ExecuteOnComputer", + DB_TOKEN, + "String referencing an earlier defined COMPUTER", + CI_USED, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + { + CFG_DB_NO_SAVE_MSGS, + "MaxNoOfSavedMessages", + DB_TOKEN, + "Max number of error messages in error log and max number of trace files", + CI_USED, + true, + CI_INT, + "25", + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_MEMLOCK, + "LockPagesInMainMemory", + DB_TOKEN, + "If set to yes, then NDB Cluster data will not be swapped out to disk", + CI_USED, + true, + CI_BOOL, + "false", + "false", + "true" }, + + { + CFG_DB_WATCHDOG_INTERVAL, + "TimeBetweenWatchDogCheck", + DB_TOKEN, + "Time between execution checks inside a database node", + CI_USED, + true, + CI_INT, + "6000", + "70", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_STOP_ON_ERROR, + "StopOnError", + DB_TOKEN, + "If set to N, "DB_TOKEN_PRINT" automatically restarts/recovers in case of node failure", + CI_USED, + true, + CI_BOOL, + "true", + "false", + "true" }, + + { + CFG_DB_STOP_ON_ERROR_INSERT, + "RestartOnErrorInsert", + DB_TOKEN, + "See src/kernel/vm/Emulator.hpp NdbRestartType for details", + CI_INTERNAL, + true, + CI_INT, + "2", + "0", + "4" }, + + { + CFG_DB_NO_OPS, + "MaxNoOfConcurrentOperations", + DB_TOKEN, + "Max number of operation records in transaction coordinator", + CI_USED, + false, + CI_INT, + "32k", + "32", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_NO_LOCAL_OPS, + "MaxNoOfLocalOperations", + DB_TOKEN, + "Max number of operation records defined in the local storage node", + CI_USED, + false, + CI_INT, + UNDEFINED, + "32", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_NO_LOCAL_SCANS, + "MaxNoOfLocalScans", + DB_TOKEN, + "Max number of fragment scans in parallel in the local storage node", + CI_USED, + false, + CI_INT, + UNDEFINED, + "32", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_BATCH_SIZE, + "BatchSizePerLocalScan", + DB_TOKEN, + "Used to calculate the number of lock records for scan with hold lock", + CI_USED, + false, + CI_INT, + STR_VALUE(DEF_BATCH_SIZE), + "1", + STR_VALUE(MAX_PARALLEL_OP_PER_SCAN) }, + + { + CFG_DB_NO_TRANSACTIONS, + "MaxNoOfConcurrentTransactions", + DB_TOKEN, + "Max number of transaction executing concurrently on the "DB_TOKEN_PRINT" node", + CI_USED, + false, + CI_INT, + "4096", + "32", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_NO_SCANS, + "MaxNoOfConcurrentScans", + DB_TOKEN, + "Max number of scans executing concurrently on the "DB_TOKEN_PRINT" node", + CI_USED, + false, + CI_INT, + "256", + "2", + "500" }, + + { + CFG_DB_TRANS_BUFFER_MEM, + "TransactionBufferMemory", + DB_TOKEN, + "Dynamic buffer space (in bytes) for key and attribute data allocated for each "DB_TOKEN_PRINT" node", + CI_USED, + false, + CI_INT, + "1M", + "1K", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_INDEX_MEM, + "IndexMemory", + DB_TOKEN, + "Number bytes on each "DB_TOKEN_PRINT" node allocated for storing indexes", + CI_USED, + false, + CI_INT64, + "18M", + "1M", + "1024G" }, + + { + CFG_DB_DATA_MEM, + "DataMemory", + DB_TOKEN, + "Number bytes on each "DB_TOKEN_PRINT" node allocated for storing data", + CI_USED, + false, + CI_INT64, + "80M", + "1M", + "1024G" }, + + { + CFG_DB_UNDO_INDEX_BUFFER, + "UndoIndexBuffer", + DB_TOKEN, + "Number bytes on each "DB_TOKEN_PRINT" node allocated for writing UNDO logs for index part", + CI_USED, + false, + CI_INT, + "2M", + "1M", + STR_VALUE(MAX_INT_RNIL)}, + + { + CFG_DB_UNDO_DATA_BUFFER, + "UndoDataBuffer", + DB_TOKEN, + "Number bytes on each "DB_TOKEN_PRINT" node allocated for writing UNDO logs for data part", + CI_USED, + false, + CI_INT, + "16M", + "1M", + STR_VALUE(MAX_INT_RNIL)}, + + { + CFG_DB_REDO_BUFFER, + "RedoBuffer", + DB_TOKEN, + "Number bytes on each "DB_TOKEN_PRINT" node allocated for writing REDO logs", + CI_USED, + false, + CI_INT, + "8M", + "1M", + STR_VALUE(MAX_INT_RNIL)}, + + { + CFG_DB_LONG_SIGNAL_BUFFER, + "LongMessageBuffer", + DB_TOKEN, + "Number bytes on each "DB_TOKEN_PRINT" node allocated for internal long messages", + CI_USED, + false, + CI_INT, + "1M", + "512k", + STR_VALUE(MAX_INT_RNIL)}, + + { + CFG_DB_DISK_PAGE_BUFFER_MEMORY, + "DiskPageBufferMemory", + DB_TOKEN, + "Number bytes on each "DB_TOKEN_PRINT" node allocated for disk page buffer cache", + CI_USED, + false, + CI_INT64, + "64M", + "4M", + "1024G" }, + + { + CFG_DB_SGA, + "SharedGlobalMemory", + DB_TOKEN, + "Total number bytes on each "DB_TOKEN_PRINT" node allocated for any use", + CI_USED, + false, + CI_INT64, + "20M", + "0", + "65536G" }, // 32k pages * 32-bit i value + + { + CFG_DB_START_PARTIAL_TIMEOUT, + "StartPartialTimeout", + DB_TOKEN, + "Time to wait before trying to start wo/ all nodes. 0=Wait forever", + CI_USED, + true, + CI_INT, + "30000", + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_START_PARTITION_TIMEOUT, + "StartPartitionedTimeout", + DB_TOKEN, + "Time to wait before trying to start partitioned. 0=Wait forever", + CI_USED, + true, + CI_INT, + "60000", + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_START_FAILURE_TIMEOUT, + "StartFailureTimeout", + DB_TOKEN, + "Time to wait before terminating. 0=Wait forever", + CI_USED, + true, + CI_INT, + "0", + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_HEARTBEAT_INTERVAL, + "HeartbeatIntervalDbDb", + DB_TOKEN, + "Time between "DB_TOKEN_PRINT"-"DB_TOKEN_PRINT" heartbeats. "DB_TOKEN_PRINT" considered dead after 3 missed HBs", + CI_USED, + true, + CI_INT, + "1500", + "10", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_API_HEARTBEAT_INTERVAL, + "HeartbeatIntervalDbApi", + DB_TOKEN, + "Time between "API_TOKEN_PRINT"-"DB_TOKEN_PRINT" heartbeats. "API_TOKEN_PRINT" connection closed after 3 missed HBs", + CI_USED, + true, + CI_INT, + "1500", + "100", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_LCP_INTERVAL, + "TimeBetweenLocalCheckpoints", + DB_TOKEN, + "Time between taking snapshots of the database (expressed in 2log of bytes)", + CI_USED, + true, + CI_INT, + "20", + "0", + "31" }, + + { + CFG_DB_GCP_INTERVAL, + "TimeBetweenGlobalCheckpoints", + DB_TOKEN, + "Time between doing group commit of transactions to disk", + CI_USED, + true, + CI_INT, + "2000", + "10", + "32000" }, + + { + CFG_DB_NO_REDOLOG_FILES, + "NoOfFragmentLogFiles", + DB_TOKEN, + "No of 16 Mbyte Redo log files in each of 4 file sets belonging to "DB_TOKEN_PRINT" node", + CI_USED, + false, + CI_INT, + "16", + "3", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_MAX_OPEN_FILES, + "MaxNoOfOpenFiles", + DB_TOKEN, + "Max number of files open per "DB_TOKEN_PRINT" node.(One thread is created per file)", + CI_USED, + false, + CI_INT, + "40", + "20", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_INITIAL_OPEN_FILES, + "InitialNoOfOpenFiles", + DB_TOKEN, + "Initial number of files open per "DB_TOKEN_PRINT" node.(One thread is created per file)", + CI_USED, + false, + CI_INT, + "27", + "20", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_TRANSACTION_CHECK_INTERVAL, + "TimeBetweenInactiveTransactionAbortCheck", + DB_TOKEN, + "Time between inactive transaction checks", + CI_USED, + true, + CI_INT, + "1000", + "1000", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_TRANSACTION_INACTIVE_TIMEOUT, + "TransactionInactiveTimeout", + DB_TOKEN, + "Time application can wait before executing another transaction part (ms).\n" + "This is the time the transaction coordinator waits for the application\n" + "to execute or send another part (query, statement) of the transaction.\n" + "If the application takes too long time, the transaction gets aborted.\n" + "Timeout set to 0 means that we don't timeout at all on application wait.", + CI_USED, + true, + CI_INT, + STR_VALUE(MAX_INT_RNIL), + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_TRANSACTION_DEADLOCK_TIMEOUT, + "TransactionDeadlockDetectionTimeout", + DB_TOKEN, + "Time transaction can be executing in a DB node (ms).\n" + "This is the time the transaction coordinator waits for each database node\n" + "of the transaction to execute a request. If the database node takes too\n" + "long time, the transaction gets aborted.", + CI_USED, + true, + CI_INT, + "1200", + "50", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_LCP_DISC_PAGES_TUP_SR, + "NoOfDiskPagesToDiskDuringRestartTUP", + DB_TOKEN, + "DiskCheckpointSpeedSr", + CI_DEPRICATED, + true, + CI_INT, + "40", + "1", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_LCP_DISC_PAGES_TUP, + "NoOfDiskPagesToDiskAfterRestartTUP", + DB_TOKEN, + "DiskCheckpointSpeed", + CI_DEPRICATED, + true, + CI_INT, + "40", + "1", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_LCP_DISC_PAGES_ACC_SR, + "NoOfDiskPagesToDiskDuringRestartACC", + DB_TOKEN, + "DiskCheckpointSpeedSr", + CI_DEPRICATED, + true, + CI_INT, + "20", + "1", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_LCP_DISC_PAGES_ACC, + "NoOfDiskPagesToDiskAfterRestartACC", + DB_TOKEN, + "DiskCheckpointSpeed", + CI_DEPRICATED, + true, + CI_INT, + "20", + "1", + STR_VALUE(MAX_INT_RNIL) }, + + + { + CFG_DB_DISCLESS, + "Diskless", + DB_TOKEN, + "Run wo/ disk", + CI_USED, + true, + CI_BOOL, + "false", + "false", + "true"}, + + { + KEY_INTERNAL, + "Discless", + DB_TOKEN, + "Diskless", + CI_DEPRICATED, + true, + CI_BOOL, + "false", + "false", + "true"}, + + + + { + CFG_DB_ARBIT_TIMEOUT, + "ArbitrationTimeout", + DB_TOKEN, + "Max time (milliseconds) database partion waits for arbitration signal", + CI_USED, + false, + CI_INT, + "3000", + "10", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_NODE_DATADIR, + "DataDir", + DB_TOKEN, + "Data directory for this node", + CI_USED, + false, + CI_STRING, + MYSQLCLUSTERDIR, + 0, 0 }, + + { + CFG_DB_FILESYSTEM_PATH, + "FileSystemPath", + DB_TOKEN, + "Path to directory where the "DB_TOKEN_PRINT" node stores its data (directory must exist)", + CI_USED, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + { + CFG_LOGLEVEL_STARTUP, + "LogLevelStartup", + DB_TOKEN, + "Node startup info printed on stdout", + CI_USED, + false, + CI_INT, + "1", + "0", + "15" }, + + { + CFG_LOGLEVEL_SHUTDOWN, + "LogLevelShutdown", + DB_TOKEN, + "Node shutdown info printed on stdout", + CI_USED, + false, + CI_INT, + "0", + "0", + "15" }, + + { + CFG_LOGLEVEL_STATISTICS, + "LogLevelStatistic", + DB_TOKEN, + "Transaction, operation, transporter info printed on stdout", + CI_USED, + false, + CI_INT, + "0", + "0", + "15" }, + + { + CFG_LOGLEVEL_CHECKPOINT, + "LogLevelCheckpoint", + DB_TOKEN, + "Local and Global checkpoint info printed on stdout", + CI_USED, + false, + CI_INT, + "0", + "0", + "15" }, + + { + CFG_LOGLEVEL_NODERESTART, + "LogLevelNodeRestart", + DB_TOKEN, + "Node restart, node failure info printed on stdout", + CI_USED, + false, + CI_INT, + "0", + "0", + "15" }, + + { + CFG_LOGLEVEL_CONNECTION, + "LogLevelConnection", + DB_TOKEN, + "Node connect/disconnect info printed on stdout", + CI_USED, + false, + CI_INT, + "0", + "0", + "15" }, + + { + CFG_LOGLEVEL_CONGESTION, + "LogLevelCongestion", + DB_TOKEN, + "Congestion info printed on stdout", + CI_USED, + false, + CI_INT, + "0", + "0", + "15" }, + + { + CFG_LOGLEVEL_ERROR, + "LogLevelError", + DB_TOKEN, + "Transporter, heartbeat errors printed on stdout", + CI_USED, + false, + CI_INT, + "0", + "0", + "15" }, + + { + CFG_LOGLEVEL_INFO, + "LogLevelInfo", + DB_TOKEN, + "Heartbeat and log info printed on stdout", + CI_USED, + false, + CI_INT, + "0", + "0", + "15" }, + + /** + * Backup + */ + { + CFG_DB_PARALLEL_BACKUPS, + "ParallelBackups", + DB_TOKEN, + "Maximum number of parallel backups", + CI_NOTIMPLEMENTED, + false, + CI_INT, + "1", + "1", + "1" }, + + { + CFG_DB_BACKUP_DATADIR, + "BackupDataDir", + DB_TOKEN, + "Path to where to store backups", + CI_USED, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + { + CFG_DB_DISK_SYNCH_SIZE, + "DiskSyncSize", + DB_TOKEN, + "Data written to a file before a synch is forced", + CI_USED, + false, + CI_INT, + "4M", + "32k", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_CHECKPOINT_SPEED, + "DiskCheckpointSpeed", + DB_TOKEN, + "Bytes per second allowed to be written by checkpoint", + CI_USED, + false, + CI_INT, + "10M", + "1M", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_CHECKPOINT_SPEED_SR, + "DiskCheckpointSpeedInRestart", + DB_TOKEN, + "Bytes per second allowed to be written by checkpoint during restart", + CI_USED, + false, + CI_INT, + "100M", + "1M", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_BACKUP_MEM, + "BackupMemory", + DB_TOKEN, + "Total memory allocated for backups per node (in bytes)", + CI_USED, + false, + CI_INT, + "4M", // sum of BackupDataBufferSize and BackupLogBufferSize + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_BACKUP_DATA_BUFFER_MEM, + "BackupDataBufferSize", + DB_TOKEN, + "Default size of databuffer for a backup (in bytes)", + CI_USED, + false, + CI_INT, + "2M", // remember to change BackupMemory + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_BACKUP_LOG_BUFFER_MEM, + "BackupLogBufferSize", + DB_TOKEN, + "Default size of logbuffer for a backup (in bytes)", + CI_USED, + false, + CI_INT, + "2M", // remember to change BackupMemory + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_BACKUP_WRITE_SIZE, + "BackupWriteSize", + DB_TOKEN, + "Default size of filesystem writes made by backup (in bytes)", + CI_USED, + false, + CI_INT, + "32K", + "2K", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_BACKUP_MAX_WRITE_SIZE, + "BackupMaxWriteSize", + DB_TOKEN, + "Max size of filesystem writes made by backup (in bytes)", + CI_USED, + false, + CI_INT, + "256K", + "2K", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_DB_STRING_MEMORY, + "StringMemory", + DB_TOKEN, + "Default size of string memory (0 -> 5% of max 1-100 -> %of max, >100 -> actual bytes)", + CI_USED, + false, + CI_INT, + "0", + "0", + STR_VALUE(MAX_INT_RNIL) }, + + /*************************************************************************** + * API + ***************************************************************************/ + { + CFG_SECTION_NODE, + API_TOKEN, + API_TOKEN, + "Node section", + CI_USED, + false, + CI_SECTION, + (const char *)NODE_TYPE_API, + 0, 0 + }, + + { + CFG_NODE_HOST, + "HostName", + API_TOKEN, + "Name of computer for this node", + CI_INTERNAL, + false, + CI_STRING, + "", + 0, 0 }, + + { + CFG_NODE_SYSTEM, + "System", + API_TOKEN, + "Name of system for this node", + CI_INTERNAL, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + { + KEY_INTERNAL, + "Id", + API_TOKEN, + "", + CI_DEPRICATED, + false, + CI_INT, + MANDATORY, + "1", + STR_VALUE(MAX_NODES) }, + + { + CFG_NODE_ID, + "NodeId", + API_TOKEN, + "Number identifying application node ("API_TOKEN_PRINT")", + CI_USED, + false, + CI_INT, + MANDATORY, + "1", + STR_VALUE(MAX_NODES) }, + + { + KEY_INTERNAL, + "ExecuteOnComputer", + API_TOKEN, + "String referencing an earlier defined COMPUTER", + CI_USED, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + { + CFG_NODE_ARBIT_RANK, + "ArbitrationRank", + API_TOKEN, + "If 0, then "API_TOKEN_PRINT" is not arbitrator. Kernel selects arbitrators in order 1, 2", + CI_USED, + false, + CI_INT, + "0", + "0", + "2" }, + + { + CFG_NODE_ARBIT_DELAY, + "ArbitrationDelay", + API_TOKEN, + "When asked to arbitrate, arbitrator waits this long before voting (msec)", + CI_USED, + false, + CI_INT, + "0", + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_MAX_SCAN_BATCH_SIZE, + "MaxScanBatchSize", + "API", + "The maximum collective batch size for one scan", + CI_USED, + false, + CI_INT, + STR_VALUE(MAX_SCAN_BATCH_SIZE), + "32k", + "16M" }, + + { + CFG_BATCH_BYTE_SIZE, + "BatchByteSize", + "API", + "The default batch size in bytes", + CI_USED, + false, + CI_INT, + STR_VALUE(SCAN_BATCH_SIZE), + "1k", + "1M" }, + + { + CFG_BATCH_SIZE, + "BatchSize", + "API", + "The default batch size in number of records", + CI_USED, + false, + CI_INT, + STR_VALUE(DEF_BATCH_SIZE), + "1", + STR_VALUE(MAX_PARALLEL_OP_PER_SCAN) }, + + /**************************************************************************** + * MGM + ***************************************************************************/ + { + CFG_SECTION_NODE, + MGM_TOKEN, + MGM_TOKEN, + "Node section", + CI_USED, + false, + CI_SECTION, + (const char *)NODE_TYPE_MGM, + 0, 0 + }, + + { + CFG_NODE_HOST, + "HostName", + MGM_TOKEN, + "Name of computer for this node", + CI_INTERNAL, + false, + CI_STRING, + "", + 0, 0 }, + + { + CFG_NODE_DATADIR, + "DataDir", + MGM_TOKEN, + "Data directory for this node", + CI_USED, + false, + CI_STRING, + MYSQLCLUSTERDIR, + 0, 0 }, + + { + CFG_NODE_SYSTEM, + "System", + MGM_TOKEN, + "Name of system for this node", + CI_INTERNAL, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + { + KEY_INTERNAL, + "Id", + MGM_TOKEN, + "", + CI_DEPRICATED, + false, + CI_INT, + MANDATORY, + "1", + STR_VALUE(MAX_NODES) }, + + { + CFG_NODE_ID, + "NodeId", + MGM_TOKEN, + "Number identifying the management server node ("MGM_TOKEN_PRINT")", + CI_USED, + false, + CI_INT, + MANDATORY, + "1", + STR_VALUE(MAX_NODES) }, + + { + CFG_LOG_DESTINATION, + "LogDestination", + MGM_TOKEN, + "String describing where logmessages are sent", + CI_USED, + false, + CI_STRING, + 0, + 0, 0 }, + + { + KEY_INTERNAL, + "ExecuteOnComputer", + MGM_TOKEN, + "String referencing an earlier defined COMPUTER", + CI_USED, + false, + CI_STRING, + 0, + 0, 0 }, + + { + KEY_INTERNAL, + "MaxNoOfSavedEvents", + MGM_TOKEN, + "", + CI_USED, + false, + CI_INT, + "100", + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_MGM_PORT, + "PortNumber", + MGM_TOKEN, + "Port number to give commands to/fetch configurations from management server", + CI_USED, + false, + CI_INT, + NDB_PORT, + "0", + STR_VALUE(MAX_PORT_NO) }, + + { + KEY_INTERNAL, + "PortNumberStats", + MGM_TOKEN, + "Port number used to get statistical information from a management server", + CI_USED, + false, + CI_INT, + UNDEFINED, + "0", + STR_VALUE(MAX_PORT_NO) }, + + { + CFG_NODE_ARBIT_RANK, + "ArbitrationRank", + MGM_TOKEN, + "If 0, then "MGM_TOKEN_PRINT" is not arbitrator. Kernel selects arbitrators in order 1, 2", + CI_USED, + false, + CI_INT, + "1", + "0", + "2" }, + + { + CFG_NODE_ARBIT_DELAY, + "ArbitrationDelay", + MGM_TOKEN, + "", + CI_USED, + false, + CI_INT, + "0", + "0", + STR_VALUE(MAX_INT_RNIL) }, + + /**************************************************************************** + * TCP + ***************************************************************************/ + { + CFG_SECTION_CONNECTION, + "TCP", + "TCP", + "Connection section", + CI_USED, + false, + CI_SECTION, + (const char *)CONNECTION_TYPE_TCP, + 0, 0 + }, + + { + CFG_CONNECTION_HOSTNAME_1, + "HostName1", + "TCP", + "Name/IP of computer on one side of the connection", + CI_INTERNAL, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + { + CFG_CONNECTION_HOSTNAME_2, + "HostName2", + "TCP", + "Name/IP of computer on one side of the connection", + CI_INTERNAL, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + { + CFG_CONNECTION_NODE_1, + "NodeId1", + "TCP", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", + CI_USED, + false, + CI_STRING, + MANDATORY, + 0, 0 }, + + { + CFG_CONNECTION_NODE_2, + "NodeId2", + "TCP", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", + CI_USED, + false, + CI_STRING, + MANDATORY, + 0, 0 }, + + { + CFG_CONNECTION_GROUP, + "Group", + "TCP", + "", + CI_USED, + false, + CI_INT, + "55", + "0", "200" }, + + { + CFG_CONNECTION_NODE_ID_SERVER, + "NodeIdServer", + "TCP", + "", + CI_USED, + false, + CI_INT, + MANDATORY, + "1", "63" }, + + { + CFG_CONNECTION_SEND_SIGNAL_ID, + "SendSignalId", + "TCP", + "Sends id in each signal. Used in trace files.", + CI_USED, + false, + CI_BOOL, + "true", + "false", + "true" }, + + + { + CFG_CONNECTION_CHECKSUM, + "Checksum", + "TCP", + "If checksum is enabled, all signals between nodes are checked for errors", + CI_USED, + false, + CI_BOOL, + "false", + "false", + "true" }, + + { + CFG_CONNECTION_SERVER_PORT, + "PortNumber", + "TCP", + "Port used for this transporter", + CI_USED, + false, + CI_INT, + MANDATORY, + "0", + STR_VALUE(MAX_PORT_NO) }, + + { + CFG_TCP_SEND_BUFFER_SIZE, + "SendBufferMemory", + "TCP", + "Bytes of buffer for signals sent from this node", + CI_USED, + false, + CI_INT, + "256K", + "64K", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_TCP_RECEIVE_BUFFER_SIZE, + "ReceiveBufferMemory", + "TCP", + "Bytes of buffer for signals received by this node", + CI_USED, + false, + CI_INT, + "64K", + "16K", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_TCP_PROXY, + "Proxy", + "TCP", + "", + CI_USED, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + { + CFG_CONNECTION_NODE_1_SYSTEM, + "NodeId1_System", + "TCP", + "System for node 1 in connection", + CI_INTERNAL, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + { + CFG_CONNECTION_NODE_2_SYSTEM, + "NodeId2_System", + "TCP", + "System for node 2 in connection", + CI_INTERNAL, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + + /**************************************************************************** + * SHM + ***************************************************************************/ + { + CFG_SECTION_CONNECTION, + "SHM", + "SHM", + "Connection section", + CI_USED, + false, + CI_SECTION, + (const char *)CONNECTION_TYPE_SHM, + 0, 0 }, + + { + CFG_CONNECTION_HOSTNAME_1, + "HostName1", + "SHM", + "Name/IP of computer on one side of the connection", + CI_INTERNAL, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + { + CFG_CONNECTION_HOSTNAME_2, + "HostName2", + "SHM", + "Name/IP of computer on one side of the connection", + CI_INTERNAL, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + { + CFG_CONNECTION_SERVER_PORT, + "PortNumber", + "SHM", + "Port used for this transporter", + CI_USED, + false, + CI_INT, + MANDATORY, + "0", + STR_VALUE(MAX_PORT_NO) }, + + { + CFG_SHM_SIGNUM, + "Signum", + "SHM", + "Signum to be used for signalling", + CI_USED, + false, + CI_INT, + UNDEFINED, + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_CONNECTION_NODE_1, + "NodeId1", + "SHM", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", + CI_USED, + false, + CI_STRING, + MANDATORY, + 0, 0 }, + + { + CFG_CONNECTION_NODE_2, + "NodeId2", + "SHM", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", + CI_USED, + false, + CI_STRING, + MANDATORY, + 0, 0 }, + + { + CFG_CONNECTION_GROUP, + "Group", + "SHM", + "", + CI_USED, + false, + CI_INT, + "35", + "0", "200" }, + + { + CFG_CONNECTION_NODE_ID_SERVER, + "NodeIdServer", + "SHM", + "", + CI_USED, + false, + CI_INT, + MANDATORY, + "1", "63" }, + + { + CFG_CONNECTION_SEND_SIGNAL_ID, + "SendSignalId", + "SHM", + "Sends id in each signal. Used in trace files.", + CI_USED, + false, + CI_BOOL, + "false", + "false", + "true" }, + + + { + CFG_CONNECTION_CHECKSUM, + "Checksum", + "SHM", + "If checksum is enabled, all signals between nodes are checked for errors", + CI_USED, + false, + CI_BOOL, + "true", + "false", + "true" }, + + { + CFG_SHM_KEY, + "ShmKey", + "SHM", + "A shared memory key", + CI_USED, + false, + CI_INT, + UNDEFINED, + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_SHM_BUFFER_MEM, + "ShmSize", + "SHM", + "Size of shared memory segment", + CI_USED, + false, + CI_INT, + "1M", + "64K", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_CONNECTION_NODE_1_SYSTEM, + "NodeId1_System", + "SHM", + "System for node 1 in connection", + CI_INTERNAL, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + { + CFG_CONNECTION_NODE_2_SYSTEM, + "NodeId2_System", + "SHM", + "System for node 2 in connection", + CI_INTERNAL, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + /**************************************************************************** + * SCI + ***************************************************************************/ + { + CFG_SECTION_CONNECTION, + "SCI", + "SCI", + "Connection section", + CI_USED, + false, + CI_SECTION, + (const char *)CONNECTION_TYPE_SCI, + 0, 0 + }, + + { + CFG_CONNECTION_NODE_1, + "NodeId1", + "SCI", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", + CI_USED, + false, + CI_STRING, + MANDATORY, + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_CONNECTION_NODE_2, + "NodeId2", + "SCI", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", + CI_USED, + false, + CI_STRING, + MANDATORY, + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_CONNECTION_GROUP, + "Group", + "SCI", + "", + CI_USED, + false, + CI_INT, + "15", + "0", "200" }, + + { + CFG_CONNECTION_NODE_ID_SERVER, + "NodeIdServer", + "SCI", + "", + CI_USED, + false, + CI_INT, + MANDATORY, + "1", "63" }, + + { + CFG_CONNECTION_HOSTNAME_1, + "HostName1", + "SCI", + "Name/IP of computer on one side of the connection", + CI_INTERNAL, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + { + CFG_CONNECTION_HOSTNAME_2, + "HostName2", + "SCI", + "Name/IP of computer on one side of the connection", + CI_INTERNAL, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + { + CFG_CONNECTION_SERVER_PORT, + "PortNumber", + "SCI", + "Port used for this transporter", + CI_USED, + false, + CI_INT, + MANDATORY, + "0", + STR_VALUE(MAX_PORT_NO) }, + + { + CFG_SCI_HOST1_ID_0, + "Host1SciId0", + "SCI", + "SCI-node id for adapter 0 on Host1 (a computer can have two adapters)", + CI_USED, + false, + CI_INT, + MANDATORY, + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_SCI_HOST1_ID_1, + "Host1SciId1", + "SCI", + "SCI-node id for adapter 1 on Host1 (a computer can have two adapters)", + CI_USED, + false, + CI_INT, + "0", + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_SCI_HOST2_ID_0, + "Host2SciId0", + "SCI", + "SCI-node id for adapter 0 on Host2 (a computer can have two adapters)", + CI_USED, + false, + CI_INT, + MANDATORY, + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_SCI_HOST2_ID_1, + "Host2SciId1", + "SCI", + "SCI-node id for adapter 1 on Host2 (a computer can have two adapters)", + CI_USED, + false, + CI_INT, + "0", + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_CONNECTION_SEND_SIGNAL_ID, + "SendSignalId", + "SCI", + "Sends id in each signal. Used in trace files.", + CI_USED, + false, + CI_BOOL, + "true", + "false", + "true" }, + + { + CFG_CONNECTION_CHECKSUM, + "Checksum", + "SCI", + "If checksum is enabled, all signals between nodes are checked for errors", + CI_USED, + false, + CI_BOOL, + "false", + "false", + "true" }, + + { + CFG_SCI_SEND_LIMIT, + "SendLimit", + "SCI", + "Transporter send buffer contents are sent when this no of bytes is buffered", + CI_USED, + false, + CI_INT, + "8K", + "128", + "32K" }, + + { + CFG_SCI_BUFFER_MEM, + "SharedBufferSize", + "SCI", + "Size of shared memory segment", + CI_USED, + false, + CI_INT, + "1M", + "64K", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_CONNECTION_NODE_1_SYSTEM, + "NodeId1_System", + "SCI", + "System for node 1 in connection", + CI_INTERNAL, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + { + CFG_CONNECTION_NODE_2_SYSTEM, + "NodeId2_System", + "SCI", + "System for node 2 in connection", + CI_INTERNAL, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + /**************************************************************************** + * OSE + ***************************************************************************/ + { + CFG_SECTION_CONNECTION, + "OSE", + "OSE", + "Connection section", + CI_USED, + false, + CI_SECTION, + (const char *)CONNECTION_TYPE_OSE, + 0, 0 + }, + + { + CFG_CONNECTION_HOSTNAME_1, + "HostName1", + "OSE", + "Name of computer on one side of the connection", + CI_USED, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + { + CFG_CONNECTION_HOSTNAME_2, + "HostName2", + "OSE", + "Name of computer on one side of the connection", + CI_USED, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + { + CFG_CONNECTION_NODE_1, + "NodeId1", + "OSE", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", + CI_USED, + false, + CI_INT, + MANDATORY, + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_CONNECTION_NODE_2, + "NodeId2", + "OSE", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", + CI_USED, + false, + CI_INT, + UNDEFINED, + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_CONNECTION_SEND_SIGNAL_ID, + "SendSignalId", + "OSE", + "Sends id in each signal. Used in trace files.", + CI_USED, + false, + CI_BOOL, + "true", + "false", + "true" }, + + { + CFG_CONNECTION_CHECKSUM, + "Checksum", + "OSE", + "If checksum is enabled, all signals between nodes are checked for errors", + CI_USED, + false, + CI_BOOL, + "false", + "false", + "true" }, + + { + CFG_OSE_PRIO_A_SIZE, + "PrioASignalSize", + "OSE", + "Size of priority A signals (in bytes)", + CI_USED, + false, + CI_INT, + "1000", + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_OSE_PRIO_B_SIZE, + "PrioBSignalSize", + "OSE", + "Size of priority B signals (in bytes)", + CI_USED, + false, + CI_INT, + "1000", + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_OSE_RECEIVE_ARRAY_SIZE, + "ReceiveArraySize", + "OSE", + "Number of OSE signals checked for correct ordering (in no of OSE signals)", + CI_USED, + false, + CI_INT, + "10", + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_CONNECTION_NODE_1_SYSTEM, + "NodeId1_System", + "OSE", + "System for node 1 in connection", + CI_INTERNAL, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, + + { + CFG_CONNECTION_NODE_2_SYSTEM, + "NodeId2_System", + "OSE", + "System for node 2 in connection", + CI_INTERNAL, + false, + CI_STRING, + UNDEFINED, + 0, 0 }, +}; + +const int ParamInfoNum = sizeof(ParamInfoArray) / sizeof(ParamInfo); diff --git a/storage/ndb/src/mgmsrv/ParamInfo.hpp b/storage/ndb/src/mgmsrv/ParamInfo.hpp new file mode 100644 index 00000000000..7d12cd6252f --- /dev/null +++ b/storage/ndb/src/mgmsrv/ParamInfo.hpp @@ -0,0 +1,44 @@ +#ifndef PARAMINFO_H +#define PARAMINFO_H + +#define DB_TOKEN "DB" +#define MGM_TOKEN "MGM" +#define API_TOKEN "API" + +#ifdef __cplusplus +extern "C" +{ +#endif + +/** + * The Configuration parameter type and status + */ + +enum ParameterType { CI_BOOL, CI_INT, CI_INT64, CI_STRING, CI_SECTION }; +enum ParameterStatus { CI_USED, ///< Active + CI_DEPRICATED, ///< Can be, but shouldn't + CI_NOTIMPLEMENTED, ///< Is ignored. + CI_INTERNAL ///< Not configurable by the user +}; + +/** + * Entry for one configuration parameter + */ +typedef struct m_ParamInfo { + Uint32 _paramId; + const char* _fname; + const char* _section; + const char* _description; + ParameterStatus _status; + bool _updateable; + ParameterType _type; + const char* _default; + const char* _min; + const char* _max; +}ParamInfo; + +#ifdef __cplusplus +} +#endif + +#endif From 72059a9be72c98f19713c7c72ac75fc6fcdd8705 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 18 Oct 2006 13:45:41 +0800 Subject: [PATCH 04/15] Bug#19454: Some values of MaxNoOfTriggers can stop a server correct segment fault in setSize(), and improve the error message with displaying the specific parameter's name if malloc fail storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp: add an argument refer to the ID of a parameter which will be malloced storage/ndb/src/kernel/vm/ArrayPool.hpp: in setSize(), correct the segment default due to overflow of two 32-bit interger's multiply --- .../ndb/src/kernel/blocks/dbtup/DbtupGen.cpp | 2 +- storage/ndb/src/kernel/vm/ArrayPool.hpp | 29 +++++++++++++++---- 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp index c59cf4015af..e6dad285bc2 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp @@ -310,7 +310,7 @@ void Dbtup::execREAD_CONFIG_REQ(Signal* signal) c_storedProcPool.setSize(noOfStoredProc); c_buildIndexPool.setSize(c_noOfBuildIndexRec); - c_triggerPool.setSize(noOfTriggers); + c_triggerPool.setSize(noOfTriggers, false, true, true, CFG_DB_NO_TRIGGERS); c_extent_hash.setSize(1024); // 4k diff --git a/storage/ndb/src/kernel/vm/ArrayPool.hpp b/storage/ndb/src/kernel/vm/ArrayPool.hpp index 6a5bb948dd8..837b466bcf8 100644 --- a/storage/ndb/src/kernel/vm/ArrayPool.hpp +++ b/storage/ndb/src/kernel/vm/ArrayPool.hpp @@ -25,6 +25,7 @@ #include #include #include +#include template class Array; @@ -43,7 +44,8 @@ public: * * Note, can currently only be called once */ - bool setSize(Uint32 noOfElements, bool align = false, bool exit_on_error = true, bool guard = true); + bool setSize(Uint32 noOfElements, bool align = false, bool exit_on_error = true, + bool guard = true, Uint32 paramId = 0); bool set(T*, Uint32 cnt, bool align = false); void clear() { theArray = 0; } @@ -221,13 +223,19 @@ template inline bool ArrayPool::setSize(Uint32 noOfElements, - bool align, bool exit_on_error, bool guard){ + bool align, bool exit_on_error, bool guard, Uint32 paramId){ if(size == 0){ if(noOfElements == 0) return true; + Uint64 real_size = (Uint64)noOfElements * sizeof(T); + size_t req_size = (size_t)real_size; + Uint64 real_size_align = real_size + sizeof(T); + size_t req_size_align = (size_t)real_size_align; + if(align) { - alloc_ptr = ndbd_malloc((noOfElements+1) * sizeof(T)); + if((Uint64)req_size_align == real_size_align && req_size_align > 0) + alloc_ptr = ndbd_malloc(req_size_align); UintPtr p = (UintPtr)alloc_ptr; UintPtr mod = p % sizeof(T); if (mod) @@ -236,14 +244,23 @@ ArrayPool::setSize(Uint32 noOfElements, } theArray = (T *)p; } - else - theArray = (T *)(alloc_ptr = ndbd_malloc(noOfElements * sizeof(T))); + else if((Uint64)req_size == real_size && req_size > 0) + theArray = (T *)(alloc_ptr = ndbd_malloc(req_size)); if(theArray == 0) { + char errmsg[255] = "ArrayPool::setSize malloc failed"; + struct ndb_mgm_param_info param_info; + size_t size = sizeof(ndb_mgm_param_info); if (!exit_on_error) return false; - ErrorReporter::handleAssert("ArrayPool::setSize malloc failed", + + if(0 != paramId && 0 == ndb_mgm_get_db_parameter_info(paramId, ¶m_info, &size)) { + BaseString::snprintf(errmsg, sizeof(errmsg), + "ArrayPool::setSize malloc parameter %s failed", param_info.m_name); + } + + ErrorReporter::handleAssert(errmsg, __FILE__, __LINE__, NDBD_EXIT_MEMALLOC); return false; // not reached } From e1922f0b322d9d8a258bd9238e0f3f19710dc27a Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 18 Oct 2006 18:12:55 +0800 Subject: [PATCH 05/15] BUG #14612 If a restore is done on a slave cluster, yet the apply_status and schema tables don't exist, ndb_restore tool should create them. storage/ndb/tools/restore/consumer.hpp: Add create apply_status and schema system table method when they don't exist. storage/ndb/tools/restore/consumer_restore.cpp: Implement to create apply_status and schema system table method when they don't exist. storage/ndb/tools/restore/consumer_restore.hpp: Add create apply_status and schema system table method when they don't exist. storage/ndb/tools/restore/restore_main.cpp: if apply_status and schema system tables don't exist, create them also. --- storage/ndb/tools/restore/consumer.hpp | 1 + .../ndb/tools/restore/consumer_restore.cpp | 27 +++++++++++++++++++ .../ndb/tools/restore/consumer_restore.hpp | 1 + storage/ndb/tools/restore/restore_main.cpp | 9 +++++++ 4 files changed, 38 insertions(+) diff --git a/storage/ndb/tools/restore/consumer.hpp b/storage/ndb/tools/restore/consumer.hpp index d5c6d38985a..37f67884e01 100644 --- a/storage/ndb/tools/restore/consumer.hpp +++ b/storage/ndb/tools/restore/consumer.hpp @@ -36,6 +36,7 @@ public: virtual void logEntry(const LogEntry &){} virtual void endOfLogEntrys(){} virtual bool finalize_table(const TableS &){return true;} + virtual bool createSystable(const TableS &){ return true;} virtual bool update_apply_status(const RestoreMetaData &metaData){return true;} NODE_GROUP_MAP *m_nodegroup_map; uint m_nodegroup_map_len; diff --git a/storage/ndb/tools/restore/consumer_restore.cpp b/storage/ndb/tools/restore/consumer_restore.cpp index 507058e2743..fe2e771d930 100644 --- a/storage/ndb/tools/restore/consumer_restore.cpp +++ b/storage/ndb/tools/restore/consumer_restore.cpp @@ -666,6 +666,33 @@ err: return result; } +bool +BackupRestore::createSystable(const TableS & tables){ + const char *tablename = tables.getTableName(); + + if( strcmp(tablename, NDB_REP_DB "/def/" NDB_APPLY_TABLE) != 0 && + strcmp(tablename, NDB_REP_DB "/def/" NDB_SCHEMA_TABLE) != 0 ) + { + return true; + } + + BaseString tmp(tablename); + Vector split; + if(tmp.split(split, "/") != 3){ + err << "Invalid table name format " << tablename << endl; + return false; + } + + m_ndb->setDatabaseName(split[0].c_str()); + m_ndb->setSchemaName(split[1].c_str()); + + NdbDictionary::Dictionary* dict = m_ndb->getDictionary(); + if( dict->getTable(split[2].c_str()) != NULL ){ + return true; + } + return table(tables); +} + bool BackupRestore::table(const TableS & table){ if (!m_restore && !m_restore_meta) diff --git a/storage/ndb/tools/restore/consumer_restore.hpp b/storage/ndb/tools/restore/consumer_restore.hpp index c1d9472aea0..3d20cb3041e 100644 --- a/storage/ndb/tools/restore/consumer_restore.hpp +++ b/storage/ndb/tools/restore/consumer_restore.hpp @@ -73,6 +73,7 @@ public: virtual void endOfLogEntrys(); virtual bool finalize_table(const TableS &); virtual bool has_temp_error(); + virtual bool createSystable(const TableS & table); virtual bool update_apply_status(const RestoreMetaData &metaData); void connectToMysql(); bool map_in_frm(char *new_data, const char *data, diff --git a/storage/ndb/tools/restore/restore_main.cpp b/storage/ndb/tools/restore/restore_main.cpp index c6947f3bf01..8a632d388e0 100644 --- a/storage/ndb/tools/restore/restore_main.cpp +++ b/storage/ndb/tools/restore/restore_main.cpp @@ -567,6 +567,15 @@ main(int argc, char** argv) err << metaData[i]->getTableName() << " ... Exiting " << endl; exitHandler(NDBT_FAILED); } + } else { + for(Uint32 j= 0; j < g_consumers.size(); j++) + if (!g_consumers[j]->createSystable(* metaData[i])) + { + err << "Restore: Failed to restore system table: "; + err << metaData[i]->getTableName() << " ... Exiting " << endl; + exitHandler(NDBT_FAILED); + } + } } debug << "Close tables" << endl; From b8beac6f90fe8cb6dc848d86fcb0ab382c16b237 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 19 Oct 2006 13:49:32 +0800 Subject: [PATCH 06/15] BUG #22547 LongMessageBuffer too large causes ndbd to exit and gives the exact paramter name in error log. storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp: Add a parameter id parameter for setSize method. When allocating memory error results in ndbd exit can gives exact parameter name in error log. --- storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index 957248bcf56..4b7ac6654fe 100644 --- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -62,7 +62,8 @@ Cmvmi::Cmvmi(Block_context& ctx) : &long_sig_buffer_size); long_sig_buffer_size= long_sig_buffer_size / 256; - g_sectionSegmentPool.setSize(long_sig_buffer_size); + g_sectionSegmentPool.setSize(long_sig_buffer_size, + false,true,true,CFG_DB_LONG_SIGNAL_BUFFER); // Add received signals addRecSignal(GSN_CONNECT_REP, &Cmvmi::execCONNECT_REP); From 9d71c4f2f33687c99ebeb5056bd7ad7cb1ca19f9 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 20 Oct 2006 10:54:27 +0800 Subject: [PATCH 07/15] ndb - bug#22548, Data nodes fail during starting if MaxNoOfUniqueHashIndexes has upper limit. added check in ndb_mgmd so that the sum of MaxNoOfTables, MaxNoOfOrderedIndexes, MaxNoOfUniqueHashIndexes and the number of System Tables must not overflow the max Uint32 number. storage/ndb/src/mgmsrv/ConfigInfo.cpp: the sum of MaxNoOfTables, MaxNoOfOrderedIndexes, MaxNoOfUniqueHashIndexes and the number of System Tables must not overflow the max value of Uint32. --- storage/ndb/src/mgmsrv/ConfigInfo.cpp | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/storage/ndb/src/mgmsrv/ConfigInfo.cpp b/storage/ndb/src/mgmsrv/ConfigInfo.cpp index 6d36662e516..b1268b8bb7d 100644 --- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp @@ -3307,7 +3307,31 @@ checkDbConstraints(InitConfigFileParser::Context & ctx, const char *){ } else { ctx.m_userProperties.put("NoOfReplicas", replicas); } + + /** + * In kernel, will calculate the MaxNoOfMeataTables use the following sum: + * Uint32 noOfMetaTables = noOfTables + noOfOrderedIndexes + + * noOfUniqueHashIndexes + 2 + * 2 is the number of the SysTables. + * So must check that the sum does't exceed the max value of Uint32. + */ + Uint32 noOfTables = 0, + noOfOrderedIndexes = 0, + noOfUniqueHashIndexes = 0; + ctx.m_currentSection->get("MaxNoOfTables", &noOfTables); + ctx.m_currentSection->get("MaxNoOfOrderedIndexes", &noOfOrderedIndexes); + ctx.m_currentSection->get("MaxNoOfUniqueHashIndexes", &noOfUniqueHashIndexes); + + Uint64 sum= (Uint64)noOfTables + noOfOrderedIndexes + noOfUniqueHashIndexes; + if (sum > ((Uint32)~0 - 2)) { + ctx.reportError("The sum of MaxNoOfTables, MaxNoOfOrderedIndexes and" + " MaxNoOfUniqueHashIndexes must not exceed %u - [%s]" + " starting at line: %d", + ((Uint32)~0 - 2), ctx.fname, ctx.m_sectionLineno); + return false; + } + return true; } From d1a7f24d053937c27685f778e0539bfa94da28d6 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 20 Oct 2006 14:55:14 +0800 Subject: [PATCH 08/15] BUG #21858 Make sure retry when EINTR returns, which decreases memory leak chance. storage/ndb/src/common/util/File.cpp: Avoid memory leak when EINTR error returns. Even though a close error happens, a ERROR message in out-file is given, and this shouldn't affect the normally running. --- storage/ndb/src/common/util/File.cpp | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/storage/ndb/src/common/util/File.cpp b/storage/ndb/src/common/util/File.cpp index 056b7ff199b..6776f736d16 100644 --- a/storage/ndb/src/common/util/File.cpp +++ b/storage/ndb/src/common/util/File.cpp @@ -123,12 +123,24 @@ bool File_class::close() { bool rc = true; + int retval = 0; + if (m_file != NULL) { ::fflush(m_file); - rc = (::fclose(m_file) == 0 ? true : false); - m_file = NULL; // Try again? + retval = ::fclose(m_file); + while ( (retval != 0) && (errno == EINTR) ){ + retval = ::fclose(m_file); + } + if( retval == 0){ + rc = true; + } + else { + rc = false; + ndbout_c("ERROR: Close file error in File.cpp for %s",strerror(errno)); + } } + m_file = NULL; return rc; } From a3d6c0f92bc2ff3bb40cb0416d36c6205523a714 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 1 Nov 2006 11:04:26 +0800 Subject: [PATCH 09/15] ndb - BUG#18647, cluster.schema database needs to be hidden. Hide cluster.schema table when executing 'show tables' on mysql client. sql/ha_ndbcluster.cc: Hide cluster.schema table to user. --- sql/ha_ndbcluster.cc | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index d86d87a0bd0..11347e08324 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -6404,6 +6404,23 @@ int ndbcluster_find_files(handlerton *hton, THD *thd, hash_free(&ok_tables); hash_free(&ndb_tables); + + // Delete schema file from files + if (!strcmp(db, NDB_REP_DB)) + { + uint count = 0; + while (count++ < files->elements) + { + file_name = (char *)files->pop(); + if (!strcmp(file_name, NDB_SCHEMA_TABLE)) + { + DBUG_PRINT("info", ("skip %s.%s table, it should be hidden to user", + NDB_REP_DB, NDB_SCHEMA_TABLE)); + continue; + } + files->push_back(file_name); + } + } } // extra bracket to avoid gcc 2.95.3 warning DBUG_RETURN(0); } From e997b3622ae48f5a710d5fb91b8a012ba8a35266 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 1 Nov 2006 10:50:19 +0000 Subject: [PATCH 10/15] BUG#21052 Modify the overflow error existed in last patch. ndb/include/ndbapi/ndberror.h: Change unsigned int to int ndb/src/mgmsrv/Services.cpp: Modify the overflow error existed in last patch. Change ther length of m_text to 512. ndb/src/ndbapi/ndberror.c: Change the parameter of ndb_error_string from unsinged int to int. --- ndb/include/ndbapi/ndberror.h | 2 +- ndb/src/mgmsrv/Services.cpp | 9 ++++++--- ndb/src/ndbapi/ndberror.c | 7 +++++-- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/ndb/include/ndbapi/ndberror.h b/ndb/include/ndbapi/ndberror.h index 2225f68f08d..4d4eddfe617 100644 --- a/ndb/include/ndbapi/ndberror.h +++ b/ndb/include/ndbapi/ndberror.h @@ -93,7 +93,7 @@ typedef ndberror_classification_enum ndberror_classification; const char *ndberror_status_message(ndberror_status); const char *ndberror_classification_message(ndberror_classification); void ndberror_update(ndberror_struct *); -int ndb_error_string(int err_no, char *str, unsigned int size); +int ndb_error_string(int err_no, char *str, int size); #endif /* doxygen skip internal*/ diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp index 95afeba30ac..c8d99d2f2da 100644 --- a/ndb/src/mgmsrv/Services.cpp +++ b/ndb/src/mgmsrv/Services.cpp @@ -1337,7 +1337,7 @@ Ndb_mgmd_event_service::log(int eventType, const Uint32* theData, NodeId nodeId) if (EventLoggerBase::event_lookup(eventType,cat,threshold,severity,textF)) DBUG_VOID_RETURN; - char m_text[256]; + char m_text[512]; EventLogger::getText(m_text, sizeof(m_text), textF, theData, nodeId); @@ -1356,8 +1356,11 @@ Ndb_mgmd_event_service::log(int eventType, const Uint32* theData, NodeId nodeId) if(strcmp(ndb_logevent_body[i].token,"error") == 0) { int m_text_len= strlen(m_text); - snprintf(m_text+m_text_len, 4 , " - "); - ndb_error_string(theData[3], m_text+(m_text_len+3), sizeof(m_text)-m_text_len-3); + if(sizeof(m_text)-m_text_len-3 > 0) + { + BaseString::snprintf(m_text+m_text_len, 4 , " - "); + ndb_error_string(val, m_text+(m_text_len+3), sizeof(m_text)-m_text_len-3); + } } } diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c index 6173292261d..247fdfe65c9 100644 --- a/ndb/src/ndbapi/ndberror.c +++ b/ndb/src/ndbapi/ndberror.c @@ -680,11 +680,14 @@ const char *ndberror_classification_message(ndberror_classification classificati return empty_string; } -int ndb_error_string(int err_no, char *str, unsigned int size) +int ndb_error_string(int err_no, char *str, int size) { ndberror_struct error; - unsigned int len; + int len; + assert(size > 1); + if(size <= 1) + return 0; error.code = err_no; ndberror_update(&error); From 98575bb401270c3100d0e7a3c5826209d4f97323 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 1 Nov 2006 18:23:30 +0000 Subject: [PATCH 11/15] BUG#19352 return exact error message when parameters overflow in 32-bit platform. ndb/src/kernel/vm/SimulatedBlock.cpp: Return exact error message when parameters overflow in 32-bit platform. --- ndb/src/kernel/vm/SimulatedBlock.cpp | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/ndb/src/kernel/vm/SimulatedBlock.cpp b/ndb/src/kernel/vm/SimulatedBlock.cpp index 9b52ac65331..d5ba3ac63c1 100644 --- a/ndb/src/kernel/vm/SimulatedBlock.cpp +++ b/ndb/src/kernel/vm/SimulatedBlock.cpp @@ -658,24 +658,26 @@ SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear) void * p = NULL; size_t size = n*s; + Uint64 real_size = (Uint64)((Uint64)n)*((Uint64)s); refresh_watch_dog(); - if (size > 0){ + if (real_size > 0){ #ifdef VM_TRACE_MEM - ndbout_c("%s::allocRecord(%s, %u, %u) = %u bytes", + ndbout_c("%s::allocRecord(%s, %u, %u) = %llu bytes", getBlockName(number()), type, s, n, - size); + real_size); #endif - p = NdbMem_Allocate(size); + if( real_size == (Uint64)size ) + p = NdbMem_Allocate(size); if (p == NULL){ char buf1[255]; char buf2[255]; BaseString::snprintf(buf1, sizeof(buf1), "%s could not allocate memory for %s", getBlockName(number()), type); - BaseString::snprintf(buf2, sizeof(buf2), "Requested: %ux%u = %u bytes", - (Uint32)s, (Uint32)n, (Uint32)size); + BaseString::snprintf(buf2, sizeof(buf2), "Requested: %ux%u = %llu bytes", + (Uint32)s, (Uint32)n, (Uint64)real_size); ERROR_SET(fatal, ERR_MEMALLOC, buf1, buf2); } From a47ee8676e08aa3e71626c7b42e5be503de0866d Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 13 Nov 2006 15:31:59 +0800 Subject: [PATCH 12/15] Bug#19203, Different error reports for similar cases - unable allocate memory storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp: if malloc failed, it will show the parameter's name storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp: if malloc failed, it will show the parameter's name storage/ndb/src/kernel/vm/ArrayPool.hpp: reduce err message, or else some of it will be cut storage/ndb/src/kernel/vm/SimulatedBlock.cpp: add the feature which it can display the parameter's name if malloc failed storage/ndb/src/kernel/vm/SimulatedBlock.hpp: add an input argument which is the ID of a parameter --- storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp | 3 ++- storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp | 7 +++++-- storage/ndb/src/kernel/vm/ArrayPool.hpp | 2 +- storage/ndb/src/kernel/vm/SimulatedBlock.cpp | 14 +++++++++++--- storage/ndb/src/kernel/vm/SimulatedBlock.hpp | 2 +- 5 files changed, 20 insertions(+), 8 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp index 27355299a9c..16f65aeb13a 100644 --- a/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp +++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp @@ -51,7 +51,8 @@ void Dbacc::initRecords() page8 = (Page8*)allocRecord("Page8", sizeof(Page8), cpagesize, - false); + false, + CFG_DB_INDEX_MEM); operationrec = (Operationrec*)allocRecord("Operationrec", sizeof(Operationrec), diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp index e6dad285bc2..3e86515ccd1 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp @@ -342,6 +342,7 @@ void Dbtup::initRecords() { unsigned i; Uint32 tmp; + Uint32 tmp1 = 0; const ndb_mgm_configuration_iterator * p = m_ctx.m_config.getOwnConfigIterator(); ndbrequire(p != 0); @@ -349,7 +350,7 @@ void Dbtup::initRecords() ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_PAGE, &tmp)); // Records with dynamic sizes - Page* ptr =(Page*)allocRecord("Page", sizeof(Page), tmp, false); + Page* ptr =(Page*)allocRecord("Page", sizeof(Page), tmp, false, CFG_DB_DATA_MEM); c_page_pool.set(ptr, tmp); attrbufrec = (Attrbufrec*)allocRecord("Attrbufrec", @@ -373,7 +374,9 @@ void Dbtup::initRecords() cnoOfTabDescrRec); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_OP_RECS, &tmp)); - c_operation_pool.setSize(tmp); + ndb_mgm_get_int_parameter(p, CFG_DB_NO_LOCAL_OPS, &tmp1); + c_operation_pool.setSize(tmp, false, true, true, + tmp1 == 0 ? CFG_DB_NO_OPS : CFG_DB_NO_LOCAL_OPS); pageRange = (PageRange*)allocRecord("PageRange", sizeof(PageRange), diff --git a/storage/ndb/src/kernel/vm/ArrayPool.hpp b/storage/ndb/src/kernel/vm/ArrayPool.hpp index 837b466bcf8..25318a70b51 100644 --- a/storage/ndb/src/kernel/vm/ArrayPool.hpp +++ b/storage/ndb/src/kernel/vm/ArrayPool.hpp @@ -257,7 +257,7 @@ ArrayPool::setSize(Uint32 noOfElements, if(0 != paramId && 0 == ndb_mgm_get_db_parameter_info(paramId, ¶m_info, &size)) { BaseString::snprintf(errmsg, sizeof(errmsg), - "ArrayPool::setSize malloc parameter %s failed", param_info.m_name); + "Malloc memory for %s failed", param_info.m_name); } ErrorReporter::handleAssert(errmsg, diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp index ae7581abb8d..5ff15bee51e 100644 --- a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp +++ b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp @@ -657,7 +657,7 @@ SimulatedBlock::getBatSize(Uint16 blockNo){ } void* -SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear) +SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear, Uint32 paramId) { void * p = NULL; @@ -678,8 +678,16 @@ SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear) if (p == NULL){ char buf1[255]; char buf2[255]; - BaseString::snprintf(buf1, sizeof(buf1), "%s could not allocate memory for %s", - getBlockName(number()), type); + struct ndb_mgm_param_info param_info; + size_t size = sizeof(ndb_mgm_param_info); + + if(0 != paramId && 0 == ndb_mgm_get_db_parameter_info(paramId, ¶m_info, &size)) { + BaseString::snprintf(buf1, sizeof(buf1), "%s could not allocate memory for parameter %s", + getBlockName(number()), param_info.m_name); + } else { + BaseString::snprintf(buf1, sizeof(buf1), "%s could not allocate memory for %s", + getBlockName(number()), type); + } BaseString::snprintf(buf2, sizeof(buf2), "Requested: %ux%u = %llu bytes", (Uint32)s, (Uint32)n, (Uint64)real_size); ERROR_SET(fatal, NDBD_EXIT_MEMALLOC, buf1, buf2); diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp index 3e90b20705e..2e8f33bf023 100644 --- a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp +++ b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp @@ -377,7 +377,7 @@ protected: * Allocates memory for the datastructures where ndb keeps the data * */ - void* allocRecord(const char * type, size_t s, size_t n, bool clear = true); + void* allocRecord(const char * type, size_t s, size_t n, bool clear = true, Uint32 paramId = 0); /** * Deallocate record From 89d83739eef2d4b936467e6904a1801080b44cde Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 14 Nov 2006 09:33:43 +0800 Subject: [PATCH 13/15] BUG#19808, Online config. Wrong error message if DataMemory less than a data on cluster storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp: add err message when error occur --- storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index a7865c356c8..a60144b5e50 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -3249,8 +3249,20 @@ Dbdict::restartCreateTab_dihComplete(Signal* signal, CreateTableRecordPtr createTabPtr; ndbrequire(c_opCreateTable.find(createTabPtr, callbackData)); - //@todo check error - ndbrequire(createTabPtr.p->m_errorCode == 0); + fprintf(stderr,"Dbdict:restartCreateTab_dihComplete:, errorCode=%d\n", createTabPtr.p->m_errorCode); + if (createTabPtr.p->m_errorCode != 0) + { + char buf[255]; + BaseString::snprintf(buf, sizeof(buf), + "Unable to restart, fail while creating table" + " error: %d. Most likely change of configuration", + createTabPtr.p->m_errorCode); + progError(__LINE__, + NDBD_EXIT_INVALID_CONFIG, + buf); + ndbrequire(createTabPtr.p->m_errorCode == 0); + } + Callback callback; callback.m_callbackData = callbackData; From bedfbf87c56c7b35a504aa71374d3c03b5004de3 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 15 Nov 2006 13:43:02 +0800 Subject: [PATCH 14/15] BUG#23576, Table is created although tablespace does not exist sql/ha_ndbcluster.cc: if use tablespace, that also means store on disk, so pass tablespace's name to ndb, then will get a error from ndb and its cause; --- sql/ha_ndbcluster.cc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 11347e08324..9e6107ea412 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4706,10 +4706,18 @@ int ha_ndbcluster::create(const char *name, } if (info->store_on_disk) + { if (info->tablespace) tab.setTablespace(info->tablespace); else tab.setTablespace("DEFAULT-TS"); + } + else if (info->tablespace) + { + tab.setTablespace(info->tablespace); + info->store_on_disk = true; //if use tablespace, that also means store on disk + } + // No primary key, create shadow key as 64 bit, auto increment if (form->s->primary_key == MAX_KEY) { From eab917dba64eada12cb4001aca7720da31201b08 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 28 Nov 2006 11:14:36 +0800 Subject: [PATCH 15/15] ndb - fiexed for bug#15021, binlog_index table become inconsistent if errors during purge of binlogs. improved the original patch, changed if/else to switch/case. sql/table.cc: changed if/else to switch/case. --- sql/table.cc | 50 +++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/sql/table.cc b/sql/table.cc index fb05162061e..01e98a8f63f 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1553,32 +1553,32 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias, outparam->file->auto_repair() && !(ha_open_flags & HA_OPEN_FOR_REPAIR)); - if (ha_err == HA_ERR_NO_SUCH_TABLE) + switch (ha_err) { - /* - The table did not exists in storage engine, use same error message - as if the .frm file didn't exist - */ - error= 1; - my_errno= ENOENT; - } - else if (ha_err == EMFILE) - { - /* - Too many files opened, use same error message as if the .frm - file can't open - */ - DBUG_PRINT("error", ("open file: %s failed, too many files opened (errno: %d)", - share->normalized_path.str, ha_err)); - error= 1; - my_errno= EMFILE; - } - else - { - outparam->file->print_error(ha_err, MYF(0)); - error_reported= TRUE; - if (ha_err == HA_ERR_TABLE_DEF_CHANGED) - error= 7; + case HA_ERR_NO_SUCH_TABLE: + /* + The table did not exists in storage engine, use same error message + as if the .frm file didn't exist + */ + error= 1; + my_errno= ENOENT; + break; + case EMFILE: + /* + Too many files opened, use same error message as if the .frm + file can't open + */ + DBUG_PRINT("error", ("open file: %s failed, too many files opened (errno: %d)", + share->normalized_path.str, ha_err)); + error= 1; + my_errno= EMFILE; + break; + default: + outparam->file->print_error(ha_err, MYF(0)); + error_reported= TRUE; + if (ha_err == HA_ERR_TABLE_DEF_CHANGED) + error= 7; + break; } goto err; /* purecov: inspected */ }