diff --git a/client/completion_hash.cc b/client/completion_hash.cc index 536e7f9373a..7a3b363c93c 100644 --- a/client/completion_hash.cc +++ b/client/completion_hash.cc @@ -79,7 +79,8 @@ int completion_hash_update(HashTable *ht, char *arKey, uint nKeyLength, if (!memcmp(p->arKey, arKey, nKeyLength)) { entry *n; - n = (entry *) alloc_root(&ht->mem_root,sizeof(entry)); + if (!(n = (entry *) alloc_root(&ht->mem_root,sizeof(entry)))) + return FAILURE; n->pNext = p->pData; n->str = str; p->pData = n; diff --git a/client/mysql.cc b/client/mysql.cc index ace595f9cad..e605c2d7db4 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -1502,7 +1502,10 @@ You can turn off this feature to get a quicker startup with -A\n\n"); if (!(field_names[i] = (char **) alloc_root(&hash_mem_root, sizeof(char *) * (num_fields*2+1)))) - break; + { + mysql_free_result(fields); + break; + } field_names[i][num_fields*2]= '\0'; j=0; while ((sql_field=mysql_fetch_field(fields))) @@ -2077,10 +2080,10 @@ print_table_data_html(MYSQL_RES *result) } while ((cur = mysql_fetch_row(result))) { + ulong *lengths=mysql_fetch_lengths(result); (void) tee_fputs("
parallel
argument
+ will be ignored and maximum parallelism will be used instead. In other words, all
+ fragments which it is possible to scan will be scanned simultaneously and in parallel
+ in such cases.
@subsection secScanLocks Lock handling with scans
- When scanning a table or an index potentially
- a lot of records will be returned.
-
- But Ndb will only lock a batch of rows per fragment at a time.
+ Performing scans on either a tables or an index has the potential
+ return a great many records; however, Ndb will lock only a predetermined
+ number of rows per fragment at a time.
How many rows will be locked per fragment is controlled by the
- batch parameter to NdbScanOperation::readTuples().
+ batch parameter passed to NdbScanOperation::readTuples().
- To let the application handle how locks are released
- NdbScanOperation::nextResult() have a parameter fetch_allow.
- If NdbScanOperation::nextResult() is called with fetch_allow = false, no
- locks may be released as result of the function call. Otherwise the locks
- for the current batch may be released.
+ In order to allow the application to handle how locks are released,
+ NdbScanOperation::nextResult() has a Boolean parameter fetch_allow.
+ If NdbScanOperation::nextResult() is called with fetch_allow equal to
+ false, then no locks may be released as result of the function call.
+ Otherwise the locks for the current batch may be released.
- This example shows scan delete, handling locks in an efficient manner.
+ This next example shows a scan delete that handle locks in an efficient manner.
For the sake of brevity, we omit error-handling.
@code
int check;
@@ -364,40 +355,31 @@
{
// Inner loop for each row within batch
MyScanOperation->deleteCurrentTuple();
- } while((check = MyScanOperation->nextResult(false) == 0));
+ } while((check = MyScanOperation->nextResult(false)) == 0);
// When no more rows in batch, exeute all defined deletes
MyTransaction->execute(NoCommit);
}
@endcode
- See @ref ndbapi_scan.cpp for full example of scan.
+ See @ref ndbapi_scan.cpp for a more complete example of a scan.
@section secError Error Handling
- Errors can occur when
- -# operations are being defined, or when the
- -# transaction is being executed.
+ Errors can occur either when operations making up a transaction are being
+ defined, or when the transaction is actually being executed. Catching and
+ handling either sort of error requires testing the value returned by
+ NdbTransaction::execute(), and then, if an error is indicated (that is,
+ if this value is equal to -1), using the following two methods in order to
+ identify the error's type and location:
- One recommended way to handle a transaction failure
- (i.e. an error is reported) is to:
- -# Rollback transaction (NdbTransaction::execute() with a special parameter)
- -# Close transaction
- -# Restart transaction (if the error was temporary)
-
- @note Transactions are not automatically closed when an error occur. Call
- Ndb::closeTransaction() to close.
-
- Several errors can occur when a transaction holds multiple
- operations which are simultaneously executed.
- In this case the application has to go through the operation
- objects and query for their NdbError objects to find out what really
- happened.
-
- NdbTransaction::getNdbErrorOperation() returns a reference to the
- operation causing the latest error.
- NdbTransaction::getNdbErrorLine() delivers the method number of the
- erroneous method in the operation.
+ - NdbTransaction::getNdbErrorOperation() returns a reference to the
+ operation causing the most recent error.
+ - NdbTransaction::getNdbErrorLine() yields the method number of the
+ erroneous method in the operation.
+
+ This short example illustrates how to detect an error and to use these
+ two methods to identify it:
@code
theTransaction = theNdb->startTransaction();
@@ -405,26 +387,43 @@
if (theOperation == NULL) goto error;
theOperation->readTuple(NdbOperation::LM_Read);
theOperation->setValue("ATTR_1", at1);
- theOperation->setValue("ATTR_2", at1); //Here an error occurs
+ theOperation->setValue("ATTR_2", at1); // Error occurs here
theOperation->setValue("ATTR_3", at1);
theOperation->setValue("ATTR_4", at1);
if (theTransaction->execute(Commit) == -1) {
errorLine = theTransaction->getNdbErrorLine();
errorOperation = theTransaction->getNdbErrorOperation();
+ }
@endcode
- Here errorLine will be 3 as the error occurred in the third method
- on the operation object.
- Getting errorLine == 0 means that the error occurred when executing the
- operations.
- Here errorOperation will be a pointer to the theOperation object.
- NdbTransaction::getNdbError() will return the NdbError object
- including holding information about the error.
+ Here errorLine
will be 3, as the error occurred in the
+ third method called on the NdbOperation object (in this case,
+ theOperation
); if the result of
+ NdbTransaction::getNdbErrorLine() is 0, this means that the error
+ occurred when the operations were executed. In this example,
+ errorOperation
will be a pointer to the theOperation
+ object. The NdbTransaction::getNdbError() method returns an NdbError
+ object providing information about the error.
- Since errors could have occurred even when a commit was reported,
- there is also a special method, NdbTransaction::commitStatus(),
- to check the commit status of the transaction.
+ @note Transactions are not automatically closed when an error occurs. Call
+ Ndb::closeTransaction() to close the transaction.
+
+ One recommended way to handle a transaction failure
+ (i.e. an error is reported) is to:
+ -# Rollback transaction (call NdbTransaction::execute() with a special parameter)
+ -# Close transaction (call NdbTransaction::closeTransaction())
+ -# If the error was temporary, attempt to restart the transaction
+
+ Several errors can occur when a transaction contains multiple
+ operations which are simultaneously executed.
+ In this case the application has to go through all operations
+ and query their NdbError objects to find out what really happened.
+
+ It is also important to note that errors can occur even when a commit is
+ reported as successful. In order to handle such situations, the NDB API
+ provides an additional NdbTransaction::commitStatus() method to check the
+ transactions's commit status.
******************************************************************************/
@@ -434,6 +433,10 @@
*/
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+/**
+ * @page ndbapi_async.cpp ndbapi_async.cpp
+ * @include ndbapi_async.cpp
+ */
/**
* @page ndbapi_async1.cpp ndbapi_async1.cpp
* @include ndbapi_async1.cpp
@@ -455,6 +458,11 @@
* @include ndbapi_scan.cpp
*/
+/**
+ * @page ndbapi_event.cpp ndbapi_event.cpp
+ * @include ndbapi_event.cpp
+ */
+
/**
@page secAdapt Adaptive Send Algorithm
diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp
index 553d85f4129..2100260dab3 100644
--- a/ndb/include/ndbapi/NdbDictionary.hpp
+++ b/ndb/include/ndbapi/NdbDictionary.hpp
@@ -52,6 +52,7 @@ typedef struct charset_info_st CHARSET_INFO;
* -# Dropping secondary indexes (Dictionary::dropIndex)
*
* NdbDictionary has several help (inner) classes to support this:
+ * -# NdbDictionary::Dictionary the dictionary handling dictionary objects
* -# NdbDictionary::Table for creating tables
* -# NdbDictionary::Column for creating table columns
* -# NdbDictionary::Index for creating secondary indexes
@@ -189,12 +190,13 @@ public:
Binary = NDB_TYPE_BINARY, ///< Len
Varbinary = NDB_TYPE_VARBINARY, ///< Length bytes: 1, Max: 255
Datetime = NDB_TYPE_DATETIME, ///< Precision down to 1 sec (sizeof(Datetime) == 8 bytes )
- Timespec = NDB_TYPE_TIMESPEC, ///< Precision down to 1 nsec(sizeof(Datetime) == 12 bytes )
+ Date = NDB_TYPE_DATE, ///< Precision down to 1 day(sizeof(Date) == 4 bytes )
Blob = NDB_TYPE_BLOB, ///< Binary large object (see NdbBlob)
Text = NDB_TYPE_TEXT, ///< Text blob
Bit = NDB_TYPE_BIT, ///< Bit, length specifies no of bits
Longvarchar = NDB_TYPE_LONG_VARCHAR, ///< Length bytes: 2, little-endian
- Longvarbinary = NDB_TYPE_LONG_VARBINARY ///< Length bytes: 2, little-endian
+ Longvarbinary = NDB_TYPE_LONG_VARBINARY, ///< Length bytes: 2, little-endian
+ Time = NDB_TYPE_TIME ///< Time without date
};
/**
@@ -909,6 +911,9 @@ public:
*/
class Event : public Object {
public:
+ /**
+ * Specifies the type of database operations an Event listens to
+ */
enum TableEvent {
TE_INSERT=1, ///< Insert event on table
TE_DELETE=2, ///< Delete event on table
@@ -916,6 +921,10 @@ public:
TE_ALL=7 ///< Any/all event on table (not relevant when
///< events are received)
};
+ /**
+ * Specifies the durability of an event
+ * (future version may supply other types)
+ */
enum EventDurability {
ED_UNDEFINED
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
@@ -930,8 +939,8 @@ public:
// All API's can use it,
// But's its removed when ndb is restarted
#endif
- ,ED_PERMANENT ///< All API's can use it,
- ///< It's still defined after a restart
+ ,ED_PERMANENT ///< All API's can use it.
+ ///< It's still defined after a cluster system restart
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
= 3
#endif
@@ -950,9 +959,12 @@ public:
Event(const char *name, const NdbDictionary::Table& table);
virtual ~Event();
/**
- * Set/get unique identifier for the event
+ * Set unique identifier for the event
*/
void setName(const char *name);
+ /**
+ * Get unique identifier for the event
+ */
const char *getName() const;
/**
* Define table on which events should be detected
@@ -967,7 +979,7 @@ public:
/**
* Set table for which events should be detected
*
- * @note preferred way is using setTable(const NdbDictionary::Table)
+ * @note preferred way is using setTable(const NdbDictionary::Table&)
* or constructor with table object parameter
*/
void setTable(const char *tableName);
@@ -1224,9 +1236,12 @@ public:
#endif
/**
- * Remove table/index from local cache
+ * Remove table from local cache
*/
void removeCachedTable(const char * table);
+ /**
+ * Remove index from local cache
+ */
void removeCachedIndex(const char * index, const char * table);
diff --git a/ndb/include/ndbapi/NdbEventOperation.hpp b/ndb/include/ndbapi/NdbEventOperation.hpp
index c695b5acd86..4f8a05d589c 100644
--- a/ndb/include/ndbapi/NdbEventOperation.hpp
+++ b/ndb/include/ndbapi/NdbEventOperation.hpp
@@ -24,73 +24,79 @@ class NdbEventOperationImpl;
* @class NdbEventOperation
* @brief Class of operations for getting change events from database.
*
- * An NdbEventOperation object is instantiated by
- * Ndb::createEventOperation
+ * Brief description on how to work with events:
*
- * Prior to that an event must have been created in the Database through
- * NdbDictionary::createEvent
- *
- * The instance is removed by Ndb::dropEventOperation
+ * - An event, represented by an NdbDictionary::Event, i created in the
+ * Database through
+ * NdbDictionary::Dictionary::createEvent() (note that this can be done
+ * by any application or thread and not necessarily by the "listener")
+ * - To listen to events, an NdbEventOperation object is instantiated by
+ * Ndb::createEventOperation()
+ * - execute() starts the event flow. Use Ndb::pollEvents() to wait
+ * for an event to occur. Use next() to iterate
+ * through the events that have occured.
+ * - The instance is removed by Ndb::dropEventOperation()
*
* For more info see:
* @ref ndbapi_event.cpp
*
* Known limitations:
*
- * Maximum number of active NdbEventOperations are now set at compile time.
+ * - Maximum number of active NdbEventOperations are now set at compile time.
* Today 100. This will become a configuration parameter later.
- *
- * Maximum number of NdbEventOperations tied to same event are maximum 16
+ * - Maximum number of NdbEventOperations tied to same event are maximum 16
* per process.
*
* Known issues:
*
- * When several NdbEventOperation's are tied to the same event in the same
+ * - When several NdbEventOperation's are tied to the same event in the same
* process they will share the circular buffer. The BufferLength will then
* be the same for all and decided by the first NdbEventOperation
* instantiation. Just make sure to instantiate the "largest" one first.
- *
- * Today all events INSERT/DELETE/UPDATE and all changed attributes are
+ * - Today all events INSERT/DELETE/UPDATE and all changed attributes are
* sent to the API, even if only specific attributes have been specified.
* These are however hidden from the user and only relevant data is shown
* after next().
- * However false exits from Ndb::pollEvents() may occur and thus
+ * - "False" exits from Ndb::pollEvents() may occur and thus
* the subsequent next() will return zero,
* since there was no available data. Just do Ndb::pollEvents() again.
- *
- * Event code does not check table schema version. Make sure to drop events
+ * - Event code does not check table schema version. Make sure to drop events
* after table is dropped. Will be fixed in later
* versions.
- *
- * If a node failure has occured not all events will be recieved
+ * - If a node failure has occured not all events will be recieved
* anymore. Drop NdbEventOperation and Create again after nodes are up
* again. Will be fixed in later versions.
*
* Test status:
- * Tests have been run on 1-node and 2-node systems
*
- * Known bugs:
- *
- * None, except if we can call some of the "issues" above bugs
+ * - Tests have been run on 1-node and 2-node systems
*
* Useful API programs:
*
- * ndb_select_all -d sys 'NDB$EVENTS_0'
- * Will show contents in the system table containing created events.
+ * - ndb_select_all -d sys 'NDB$EVENTS_0'
+ * shows contents in the system table containing created events.
*
+ * @note this is an inteface to viewing events that is subject to change
*/
class NdbEventOperation {
public:
+ /**
+ * State of the NdbEventOperation object
+ */
+ enum State {
+ EO_CREATED, ///< Created but execute() not called
+ EO_EXECUTING, ///< execute() called
+ EO_ERROR ///< An error has occurred. Object unusable.
+ };
/**
* Retrieve current state of the NdbEventOperation object
*/
- enum State {CREATED,EXECUTING,ERROR};
State getState();
/**
* Activates the NdbEventOperation to start receiving events. The
* changed attribute values may be retrieved after next() has returned
- * a value greater than zero. The getValue() methods below must be called
+ * a value greater than zero. The getValue() methods must be called
* prior to execute().
*
* @return 0 if successful otherwise -1.
@@ -112,21 +118,21 @@ public:
* aligned appropriately. The buffer is used directly
* (avoiding a copy penalty) only if it is aligned on a
* 4-byte boundary and the attribute size in bytes
- * (i.e. NdbRecAttr::attrSize times NdbRecAttr::arraySize is
+ * (i.e. NdbRecAttr::attrSize() times NdbRecAttr::arraySize() is
* a multiple of 4).
*
- * @note There are two versions, NdbOperation::getValue and
- * NdbOperation::getPreValue for retrieving the current and
+ * @note There are two versions, getValue() and
+ * getPreValue() for retrieving the current and
* previous value repectively.
*
* @note This method does not fetch the attribute value from
* the database! The NdbRecAttr object returned by this method
* is not readable/printable before the
- * NdbEventConnection::execute has been made and
- * NdbEventConnection::next has returned a value greater than
+ * execute() has been made and
+ * next() has returned a value greater than
* zero. If a specific attribute has not changed the corresponding
* NdbRecAttr will be in state UNDEFINED. This is checked by
- * NdbRecAttr::isNull which then returns -1.
+ * NdbRecAttr::isNull() which then returns -1.
*
* @param anAttrName Attribute name
* @param aValue If this is non-NULL, then the attribute value
@@ -143,11 +149,11 @@ public:
/**
* Retrieves event resultset if available, inserted into the NdbRecAttrs
* specified in getValue() and getPreValue(). To avoid polling for
- * a resultset, one can use Ndb::pollEvents
+ * a resultset, one can use Ndb::pollEvents()
* which will wait on a mutex until an event occurs or the specified
* timeout occurs.
*
- * @return >=0 if successful otherwise -1. Return value inicates number
+ * @return >=0 if successful otherwise -1. Return value indicates number
* of available events. By sending pOverRun one may query for buffer
* overflow and *pOverRun will indicate the number of events that have
* overwritten.
diff --git a/ndb/include/util/NdbSqlUtil.hpp b/ndb/include/util/NdbSqlUtil.hpp
index feb2b97c54b..75e2a819174 100644
--- a/ndb/include/util/NdbSqlUtil.hpp
+++ b/ndb/include/util/NdbSqlUtil.hpp
@@ -86,12 +86,13 @@ public:
Binary = NDB_TYPE_BINARY,
Varbinary = NDB_TYPE_VARBINARY,
Datetime = NDB_TYPE_DATETIME,
- Timespec = NDB_TYPE_TIMESPEC,
+ Date = NDB_TYPE_DATE,
Blob = NDB_TYPE_BLOB,
Text = NDB_TYPE_TEXT,
Bit = NDB_TYPE_BIT,
Longvarchar = NDB_TYPE_LONG_VARCHAR,
- Longvarbinary = NDB_TYPE_LONG_VARBINARY
+ Longvarbinary = NDB_TYPE_LONG_VARBINARY,
+ Time = NDB_TYPE_TIME
};
Enum m_typeId; // redundant
Cmp* m_cmp; // comparison method
@@ -153,12 +154,13 @@ private:
static Cmp cmpBinary;
static Cmp cmpVarbinary;
static Cmp cmpDatetime;
- static Cmp cmpTimespec;
+ static Cmp cmpDate;
static Cmp cmpBlob;
static Cmp cmpText;
static Cmp cmpBit;
static Cmp cmpLongvarchar;
static Cmp cmpLongvarbinary;
+ static Cmp cmpTime;
};
#endif
diff --git a/ndb/src/common/util/NdbSqlUtil.cpp b/ndb/src/common/util/NdbSqlUtil.cpp
index fd23781605c..1e280ae0fac 100644
--- a/ndb/src/common/util/NdbSqlUtil.cpp
+++ b/ndb/src/common/util/NdbSqlUtil.cpp
@@ -153,8 +153,8 @@ NdbSqlUtil::m_typeList[] = {
cmpDatetime
},
{
- Type::Timespec,
- NULL // cmpTimespec
+ Type::Date,
+ cmpDate
},
{
Type::Blob,
@@ -175,6 +175,10 @@ NdbSqlUtil::m_typeList[] = {
{
Type::Longvarbinary,
cmpLongvarbinary
+ },
+ {
+ Type::Time,
+ cmpTime
}
};
@@ -507,19 +511,57 @@ NdbSqlUtil::cmpVarbinary(const void* info, const void* p1, unsigned n1, const vo
return CmpUnknown;
}
-// allowed but ordering is wrong before wl-1442 done
int
NdbSqlUtil::cmpDatetime(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
- return cmpBinary(info, p1, n1, p2, n2, full);
+ if (n2 >= sizeof(Int64)) {
+ Int64 v1, v2;
+ memcpy(&v1, p1, sizeof(Int64));
+ memcpy(&v2, p2, sizeof(Int64));
+ if (v1 < v2)
+ return -1;
+ if (v1 > v2)
+ return +1;
+ return 0;
+ }
+ assert(! full);
+ return CmpUnknown;
}
-// not used by MySQL or NDB
int
-NdbSqlUtil::cmpTimespec(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
+NdbSqlUtil::cmpDate(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
- assert(false);
- return 0;
+#ifdef ndb_date_is_4_byte_native_int
+ if (n2 >= sizeof(Int32)) {
+ Int32 v1, v2;
+ memcpy(&v1, p1, sizeof(Int32));
+ memcpy(&v2, p2, sizeof(Int32));
+ if (v1 < v2)
+ return -1;
+ if (v1 > v2)
+ return +1;
+ return 0;
+ }
+ assert(! full);
+ return CmpUnknown;
+#else
+ if (n2 >= 4) { // may access 4-th byte
+ const uchar* v1 = (const uchar*)p1;
+ const uchar* v2 = (const uchar*)p2;
+ // from Field_newdate::val_int
+ Uint64 j1 = uint3korr(v1);
+ Uint64 j2 = uint3korr(v2);
+ j1 = (j1 % 32L)+(j1 / 32L % 16L)*100L + (j1/(16L*32L))*10000L;
+ j2 = (j2 % 32L)+(j2 / 32L % 16L)*100L + (j2/(16L*32L))*10000L;
+ if (j1 < j2)
+ return -1;
+ if (j1 > j2)
+ return +1;
+ return 0;
+ }
+ assert(! full);
+ return CmpUnknown;
+#endif
}
// not supported
@@ -538,6 +580,25 @@ NdbSqlUtil::cmpText(const void* info, const void* p1, unsigned n1, const void* p
return 0;
}
+int
+NdbSqlUtil::cmpTime(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
+{
+ if (n2 >= 4) { // may access 4-th byte
+ const uchar* v1 = (const uchar*)p1;
+ const uchar* v2 = (const uchar*)p2;
+ // from Field_time::val_int
+ Int32 j1 = sint3korr(v1);
+ Int32 j2 = sint3korr(v2);
+ if (j1 < j2)
+ return -1;
+ if (j1 > j2)
+ return +1;
+ return 0;
+ }
+ assert(! full);
+ return CmpUnknown;
+}
+
// not yet
int
NdbSqlUtil::cmpBit(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
index 6d4ca2d9078..5e79fc1c28f 100644
--- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
@@ -2364,7 +2364,8 @@ Dbtc::handle_special_hash(Uint32 dstHash[4], Uint32* src, Uint32 srcLen,
*/
Uint32 dstLen = xmul * (srcBytes - lb);
ndbrequire(dstLen <= ((dstSize - dstPos) << 2));
- uint n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
+ int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
+ ndbrequire(n != -1);
while ((n & 3) != 0) {
dstPtr[n++] = 0;
}
diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp
index 166bd45d48b..a5e01919781 100644
--- a/ndb/src/mgmapi/mgmapi.cpp
+++ b/ndb/src/mgmapi/mgmapi.cpp
@@ -629,7 +629,6 @@ ndb_mgm_get_status(NdbMgmHandle handle)
malloc(sizeof(ndb_mgm_cluster_state)+
noOfNodes*(sizeof(ndb_mgm_node_state)+sizeof("000.000.000.000#")));
- state->hostname= 0;
state->no_of_nodes= noOfNodes;
ndb_mgm_node_state * ptr = &state->node_states[0];
int nodeId = 0;
@@ -1046,6 +1045,7 @@ struct ndb_mgm_event_categories
{ "CHECKPOINT", NDB_MGM_EVENT_CATEGORY_CHECKPOINT },
{ "DEBUG", NDB_MGM_EVENT_CATEGORY_DEBUG },
{ "INFO", NDB_MGM_EVENT_CATEGORY_INFO },
+ { "WARNING", NDB_MGM_EVENT_CATEGORY_WARNING },
{ "ERROR", NDB_MGM_EVENT_CATEGORY_ERROR },
{ "GREP", NDB_MGM_EVENT_CATEGORY_GREP },
{ "BACKUP", NDB_MGM_EVENT_CATEGORY_BACKUP },
diff --git a/ndb/src/ndbapi/Ndb.cpp b/ndb/src/ndbapi/Ndb.cpp
index baa34b2006e..5ffb087cde7 100644
--- a/ndb/src/ndbapi/Ndb.cpp
+++ b/ndb/src/ndbapi/Ndb.cpp
@@ -1177,7 +1177,14 @@ NdbEventOperation* Ndb::createEventOperation(const char* eventName,
tOp = new NdbEventOperation(this, eventName, bufferLength);
- if (tOp->getState() != NdbEventOperation::CREATED) {
+ if (tOp == 0)
+ {
+ theError.code= 4000;
+ return NULL;
+ }
+
+ if (tOp->getState() != NdbEventOperation::EO_CREATED) {
+ theError.code= tOp->getNdbError().code;
delete tOp;
tOp = NULL;
}
diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp
index db912995b5f..4221c22121d 100644
--- a/ndb/src/ndbapi/NdbDictionary.cpp
+++ b/ndb/src/ndbapi/NdbDictionary.cpp
@@ -956,8 +956,8 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col)
case NdbDictionary::Column::Datetime:
out << "Datetime";
break;
- case NdbDictionary::Column::Timespec:
- out << "Timespec";
+ case NdbDictionary::Column::Date:
+ out << "Date";
break;
case NdbDictionary::Column::Blob:
out << "Blob(" << col.getInlineSize() << "," << col.getPartSize()
@@ -967,6 +967,9 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col)
out << "Text(" << col.getInlineSize() << "," << col.getPartSize()
<< ";" << col.getStripeSize() << ";" << csname << ")";
break;
+ case NdbDictionary::Column::Time:
+ out << "Time";
+ break;
case NdbDictionary::Column::Undefined:
out << "Undefined";
break;
diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp
index 13f9d0c48e1..07a186d8850 100644
--- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp
+++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp
@@ -125,7 +125,7 @@ NdbColumnImpl::init(Type t)
case Binary:
case Varbinary:
case Datetime:
- case Timespec:
+ case Date:
m_precision = 0;
m_scale = 0;
m_length = 1;
@@ -143,6 +143,12 @@ NdbColumnImpl::init(Type t)
m_length = 4;
m_cs = default_cs;
break;
+ case Time:
+ m_precision = 0;
+ m_scale = 0;
+ m_length = 1;
+ m_cs = NULL;
+ break;
case Bit:
m_precision = 0;
m_scale = 0;
diff --git a/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/ndb/src/ndbapi/NdbEventOperationImpl.cpp
index 813a3a5f861..69c05dcb0b7 100644
--- a/ndb/src/ndbapi/NdbEventOperationImpl.cpp
+++ b/ndb/src/ndbapi/NdbEventOperationImpl.cpp
@@ -55,9 +55,8 @@ NdbEventOperationImpl::NdbEventOperationImpl(NdbEventOperation &N,
const char* eventName,
const int bufferLength)
: NdbEventOperation(*this), m_ndb(theNdb),
- m_state(ERROR), m_bufferL(bufferLength)
+ m_state(EO_ERROR), m_bufferL(bufferLength)
{
-
m_eventId = 0;
theFirstRecAttrs[0] = NULL;
theCurrentRecAttrs[0] = NULL;
@@ -71,16 +70,15 @@ NdbEventOperationImpl::NdbEventOperationImpl(NdbEventOperation &N,
// we should lookup id in Dictionary, TODO
// also make sure we only have one listener on each event
- if (!m_ndb) { ndbout_c("m_ndb=NULL"); return; }
+ if (!m_ndb) abort();
NdbDictionary::Dictionary *myDict = m_ndb->getDictionary();
- if (!myDict) { ndbout_c("getDictionary=NULL"); return; }
+ if (!myDict) { m_error.code= m_ndb->getNdbError().code; return; }
const NdbDictionary::Event *myEvnt = myDict->getEvent(eventName);
- if (!myEvnt) { ndbout_c("getEvent()=NULL"); return; }
+ if (!myEvnt) { m_error.code= myDict->getNdbError().code; return; }
m_eventImpl = &myEvnt->m_impl;
- if (!m_eventImpl) { ndbout_c("m_impl=NULL"); return; }
m_bufferHandle = m_ndb->getGlobalEventBufferHandle();
if (m_bufferHandle->m_bufferL > 0)
@@ -88,7 +86,7 @@ NdbEventOperationImpl::NdbEventOperationImpl(NdbEventOperation &N,
else
m_bufferHandle->m_bufferL = m_bufferL;
- m_state = CREATED;
+ m_state = EO_CREATED;
}
NdbEventOperationImpl::~NdbEventOperationImpl()
@@ -106,7 +104,7 @@ NdbEventOperationImpl::~NdbEventOperationImpl()
p = p_next;
}
}
- if (m_state == NdbEventOperation::EXECUTING) {
+ if (m_state == EO_EXECUTING) {
stop();
// m_bufferHandle->dropSubscribeEvent(m_bufferId);
; // We should send stop signal here
@@ -122,7 +120,7 @@ NdbEventOperationImpl::getState()
NdbRecAttr*
NdbEventOperationImpl::getValue(const char *colName, char *aValue, int n)
{
- if (m_state != NdbEventOperation::CREATED) {
+ if (m_state != EO_CREATED) {
ndbout_c("NdbEventOperationImpl::getValue may only be called between instantiation and execute()");
return NULL;
}
@@ -211,8 +209,8 @@ NdbEventOperationImpl::execute()
{
NdbDictionary::Dictionary *myDict = m_ndb->getDictionary();
if (!myDict) {
- ndbout_c("NdbEventOperation::execute(): getDictionary=NULL");
- return 0;
+ m_error.code= m_ndb->getNdbError().code;
+ return -1;
}
if (theFirstRecAttrs[0] == NULL) { // defaults to get all
@@ -245,14 +243,14 @@ NdbEventOperationImpl::execute()
if (r) {
//Error
m_bufferHandle->unprepareAddSubscribeEvent(m_bufferId);
- m_state = NdbEventOperation::ERROR;
+ m_state = EO_ERROR;
} else {
m_bufferHandle->addSubscribeEvent(m_bufferId, this);
- m_state = NdbEventOperation::EXECUTING;
+ m_state = EO_EXECUTING;
}
} else {
//Error
- m_state = NdbEventOperation::ERROR;
+ m_state = EO_ERROR;
}
return r;
}
@@ -261,14 +259,14 @@ int
NdbEventOperationImpl::stop()
{
DBUG_ENTER("NdbEventOperationImpl::stop");
- if (m_state != NdbEventOperation::EXECUTING)
+ if (m_state != EO_EXECUTING)
DBUG_RETURN(-1);
// ndbout_c("NdbEventOperation::stopping()");
NdbDictionary::Dictionary *myDict = m_ndb->getDictionary();
if (!myDict) {
- ndbout_c("NdbEventOperation::stop(): getDictionary=NULL");
+ m_error.code= m_ndb->getNdbError().code;
DBUG_RETURN(-1);
}
@@ -299,13 +297,13 @@ NdbEventOperationImpl::stop()
//Error
m_bufferHandle->unprepareDropSubscribeEvent(m_bufferId);
m_error.code= myDictImpl.m_error.code;
- m_state = NdbEventOperation::ERROR;
+ m_state = EO_ERROR;
} else {
#ifdef EVENT_DEBUG
ndbout_c("NdbEventOperation::dropping()");
#endif
m_bufferHandle->dropSubscribeEvent(m_bufferId);
- m_state = NdbEventOperation::CREATED;
+ m_state = EO_CREATED;
}
DBUG_RETURN(r);
diff --git a/ndb/test/include/NdbSchemaOp.hpp b/ndb/test/include/NdbSchemaOp.hpp
index ac859f8abe8..e2fb4015b88 100644
--- a/ndb/test/include/NdbSchemaOp.hpp
+++ b/ndb/test/include/NdbSchemaOp.hpp
@@ -576,7 +576,8 @@ convertColumnTypeToAttrType(NdbDictionary::Column::Type _type)
case NdbDictionary::Column::Varbinary:
return String;
case NdbDictionary::Column::Datetime:
- case NdbDictionary::Column::Timespec:
+ case NdbDictionary::Column::Date:
+ case NdbDictionary::Column::Time:
case NdbDictionary::Column::Undefined:
default:
return NoAttrTypeDef;
diff --git a/ndb/test/ndbapi/testOIBasic.cpp b/ndb/test/ndbapi/testOIBasic.cpp
index 8a09a2ec9d1..0f31a30f1a7 100644
--- a/ndb/test/ndbapi/testOIBasic.cpp
+++ b/ndb/test/ndbapi/testOIBasic.cpp
@@ -1978,7 +1978,7 @@ Val::cmpchars(Par par, const unsigned char* buf1, unsigned len1, const unsigned
unsigned len = maxxmulsize * col.m_bytelength;
int n1 = NdbSqlUtil::strnxfrm_bug7284(cs, x1, chs->m_xmul * len, buf1, len1);
int n2 = NdbSqlUtil::strnxfrm_bug7284(cs, x2, chs->m_xmul * len, buf2, len2);
- assert(n1 == n2);
+ assert(n1 != -1 && n1 == n2);
k = memcmp(x1, x2, n1);
} else {
k = (*cs->coll->strnncollsp)(cs, buf1, len1, buf2, len2, false);
diff --git a/ndb/tools/restore/consumer.cpp b/ndb/tools/restore/consumer.cpp
index e94c31b2666..4d228230423 100644
--- a/ndb/tools/restore/consumer.cpp
+++ b/ndb/tools/restore/consumer.cpp
@@ -71,7 +71,10 @@ BackupConsumer::create_table_string(const TableS & table,
case NdbDictionary::Column::Datetime:
pos += sprintf(buf+pos, "%s", "datetime");
break;
- case NdbDictionary::Column::Timespec:
+ case NdbDictionary::Column::Date:
+ pos += sprintf(buf+pos, "%s", "date");
+ break;
+ case NdbDictionary::Column::Time:
pos += sprintf(buf+pos, "%s", "time");
break;
case NdbDictionary::Column::Undefined:
diff --git a/pstack/pstack.c b/pstack/pstack.c
index 75869686e35..4cdd80d68b5 100644
--- a/pstack/pstack.c
+++ b/pstack/pstack.c
@@ -1663,7 +1663,7 @@ pr_tag_type (p, name, id, kind)
{
struct pr_handle *info = (struct pr_handle *) p;
const char *t, *tag;
- char idbuf[20];
+ char idbuf[30];
switch (kind)
{
diff --git a/scripts/mysql_fix_privilege_tables.sql b/scripts/mysql_fix_privilege_tables.sql
index 975d8dddbb0..9373a888bc2 100644
--- a/scripts/mysql_fix_privilege_tables.sql
+++ b/scripts/mysql_fix_privilege_tables.sql
@@ -93,10 +93,10 @@ CREATE TABLE IF NOT EXISTS tables_priv (
CREATE TABLE IF NOT EXISTS columns_priv (
Host char(60) DEFAULT '' NOT NULL,
- Db char(60) DEFAULT '' NOT NULL,
+ Db char(64) DEFAULT '' NOT NULL,
User char(16) DEFAULT '' NOT NULL,
- Table_name char(60) DEFAULT '' NOT NULL,
- Column_name char(59) DEFAULT '' NOT NULL,
+ Table_name char(64) DEFAULT '' NOT NULL,
+ Column_name char(64) DEFAULT '' NOT NULL,
Timestamp timestamp(14),
Column_priv set('Select','Insert','Update','References') DEFAULT '' NOT NULL,
PRIMARY KEY (Host,Db,User,Table_name,Column_name)
diff --git a/sql/field.cc b/sql/field.cc
index d15db92e51f..8e0fddae332 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -2440,23 +2440,7 @@ int Field_float::store(double nr)
int Field_float::store(longlong nr)
{
- int error= 0;
- float j= (float) nr;
- if (unsigned_flag && j < 0)
- {
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
- j=0;
- error= 1;
- }
-#ifdef WORDS_BIGENDIAN
- if (table->db_low_byte_first)
- {
- float4store(ptr,j);
- }
- else
-#endif
- memcpy_fixed(ptr,(byte*) &j,sizeof(j));
- return error;
+ return store((double)nr);
}
@@ -2738,23 +2722,7 @@ int Field_double::store(double nr)
int Field_double::store(longlong nr)
{
- double j= (double) nr;
- int error= 0;
- if (unsigned_flag && j < 0)
- {
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
- error= 1;
- j=0;
- }
-#ifdef WORDS_BIGENDIAN
- if (table->db_low_byte_first)
- {
- float8store(ptr,j);
- }
- else
-#endif
- doublestore(ptr,j);
- return error;
+ return store((double)nr);
}
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index 31b15f89806..0bca8c21715 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -3907,7 +3907,7 @@ ha_innobase::create(
error = create_table_def(trx, form, norm_name,
create_info->options & HA_LEX_CREATE_TMP_TABLE ? name2 : NULL,
- !(form->s->db_options_in_use & HA_OPTION_PACK_RECORD));
+ form->s->row_type != ROW_TYPE_REDUNDANT);
if (error) {
innobase_commit_low(trx);
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 767ddc40400..b1fbe392940 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -2393,13 +2393,15 @@ void ha_ndbcluster::print_results()
break;
}
case NdbDictionary::Column::Datetime: {
- // todo
- my_snprintf(buf, sizeof(buf), "Datetime ?");
+ my_snprintf(buf, sizeof(buf), "Datetime ?"); // fix-me
break;
}
- case NdbDictionary::Column::Timespec: {
- // todo
- my_snprintf(buf, sizeof(buf), "Timespec ?");
+ case NdbDictionary::Column::Date: {
+ my_snprintf(buf, sizeof(buf), "Date ?"); // fix-me
+ break;
+ }
+ case NdbDictionary::Column::Time: {
+ my_snprintf(buf, sizeof(buf), "Time ?"); // fix-me
break;
}
case NdbDictionary::Column::Blob: {
@@ -3420,6 +3422,9 @@ int ndbcluster_rollback(THD *thd, void *ndb_transaction)
Define NDB column based on Field.
Returns 0 or mysql error code.
Not member of ha_ndbcluster because NDBCOL cannot be declared.
+
+ MySQL text types with character set "binary" are mapped to true
+ NDB binary types without a character set. This may change.
*/
static int create_ndb_column(NDBCOL &col,
@@ -3495,9 +3500,15 @@ static int create_ndb_column(NDBCOL &col,
col.setType(NDBCOL::Datetime);
col.setLength(1);
break;
- case MYSQL_TYPE_DATE:
case MYSQL_TYPE_NEWDATE:
+ col.setType(NDBCOL::Date);
+ col.setLength(1);
+ break;
case MYSQL_TYPE_TIME:
+ col.setType(NDBCOL::Time);
+ col.setLength(1);
+ break;
+ case MYSQL_TYPE_DATE: // ?
case MYSQL_TYPE_YEAR:
col.setType(NDBCOL::Char);
col.setLength(field->pack_length());
@@ -3509,7 +3520,7 @@ static int create_ndb_column(NDBCOL &col,
col.setType(NDBCOL::Bit);
col.setLength(1);
}
- else if (field->flags & BINARY_FLAG)
+ else if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin)
{
col.setType(NDBCOL::Binary);
col.setLength(field->pack_length());
@@ -3527,7 +3538,7 @@ static int create_ndb_column(NDBCOL &col,
Field_varstring* f= (Field_varstring*)field;
if (f->length_bytes == 1)
{
- if (field->flags & BINARY_FLAG)
+ if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin)
col.setType(NDBCOL::Varbinary);
else {
col.setType(NDBCOL::Varchar);
@@ -3536,7 +3547,7 @@ static int create_ndb_column(NDBCOL &col,
}
else if (f->length_bytes == 2)
{
- if (field->flags & BINARY_FLAG)
+ if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin)
col.setType(NDBCOL::Longvarbinary);
else {
col.setType(NDBCOL::Longvarchar);
@@ -3553,7 +3564,7 @@ static int create_ndb_column(NDBCOL &col,
// Blob types (all come in as MYSQL_TYPE_BLOB)
mysql_type_tiny_blob:
case MYSQL_TYPE_TINY_BLOB:
- if (field->flags & BINARY_FLAG)
+ if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin)
col.setType(NDBCOL::Blob);
else {
col.setType(NDBCOL::Text);
@@ -3566,7 +3577,7 @@ static int create_ndb_column(NDBCOL &col,
break;
//mysql_type_blob:
case MYSQL_TYPE_BLOB:
- if (field->flags & BINARY_FLAG)
+ if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin)
col.setType(NDBCOL::Blob);
else {
col.setType(NDBCOL::Text);
@@ -3588,7 +3599,7 @@ static int create_ndb_column(NDBCOL &col,
break;
mysql_type_medium_blob:
case MYSQL_TYPE_MEDIUM_BLOB:
- if (field->flags & BINARY_FLAG)
+ if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin)
col.setType(NDBCOL::Blob);
else {
col.setType(NDBCOL::Text);
@@ -3600,7 +3611,7 @@ static int create_ndb_column(NDBCOL &col,
break;
mysql_type_long_blob:
case MYSQL_TYPE_LONG_BLOB:
- if (field->flags & BINARY_FLAG)
+ if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin)
col.setType(NDBCOL::Blob);
else {
col.setType(NDBCOL::Text);
diff --git a/sql/handler.cc b/sql/handler.cc
index bbe01dd93d5..b1b741dfee9 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -101,7 +101,7 @@ struct show_table_type_st sys_table_types[]=
};
const char *ha_row_type[] = {
- "", "FIXED", "DYNAMIC", "COMPRESSED","?","?","?"
+ "", "FIXED", "DYNAMIC", "COMPRESSED", "REDUNDANT", "COMPACT", "?","?","?"
};
const char *tx_isolation_names[] =
diff --git a/sql/handler.h b/sql/handler.h
index b10e6bfe88c..e5a794ca1b2 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -167,7 +167,8 @@ struct show_table_type_st {
};
enum row_type { ROW_TYPE_NOT_USED=-1, ROW_TYPE_DEFAULT, ROW_TYPE_FIXED,
- ROW_TYPE_DYNAMIC, ROW_TYPE_COMPRESSED};
+ ROW_TYPE_DYNAMIC, ROW_TYPE_COMPRESSED,
+ ROW_TYPE_REDUNDANT, ROW_TYPE_COMPACT };
/* struct to hold information about the table that should be created */
diff --git a/sql/lex.h b/sql/lex.h
index 56d824b7bb8..871d1d99750 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -116,6 +116,7 @@ static SYMBOL symbols[] = {
{ "COMMENT", SYM(COMMENT_SYM)},
{ "COMMIT", SYM(COMMIT_SYM)},
{ "COMMITTED", SYM(COMMITTED_SYM)},
+ { "COMPACT", SYM(COMPACT_SYM)},
{ "COMPRESSED", SYM(COMPRESSED_SYM)},
{ "CONCURRENT", SYM(CONCURRENT)},
{ "CONDITION", SYM(CONDITION_SYM)},
@@ -378,6 +379,7 @@ static SYMBOL symbols[] = {
{ "READ", SYM(READ_SYM)},
{ "READS", SYM(READS_SYM)},
{ "REAL", SYM(REAL)},
+ { "REDUNDANT", SYM(REDUNDANT_SYM)},
{ "REFERENCES", SYM(REFERENCES)},
{ "REGEXP", SYM(REGEXP)},
{ "RELAY_LOG_FILE", SYM(RELAY_LOG_FILE_SYM)},
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index fa23502ea93..6b1456dfbd3 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -227,6 +227,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token COLLATION_SYM
%token COLUMNS
%token COLUMN_SYM
+%token COMPACT_SYM
%token CONCURRENT
%token CONDITION_SYM
%token CONNECTION_SYM
@@ -381,6 +382,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token READ_SYM
%token READS_SYM
%token REAL_NUM
+%token REDUNDANT_SYM
%token REFERENCES
%token REGEXP
%token RELOAD
@@ -2628,7 +2630,9 @@ row_types:
DEFAULT { $$= ROW_TYPE_DEFAULT; }
| FIXED_SYM { $$= ROW_TYPE_FIXED; }
| DYNAMIC_SYM { $$= ROW_TYPE_DYNAMIC; }
- | COMPRESSED_SYM { $$= ROW_TYPE_COMPRESSED; };
+ | COMPRESSED_SYM { $$= ROW_TYPE_COMPRESSED; }
+ | REDUNDANT_SYM { $$= ROW_TYPE_REDUNDANT; }
+ | COMPACT_SYM { $$= ROW_TYPE_COMPACT; };
raid_types:
RAID_STRIPED_SYM { $$= RAID_TYPE_0; }
@@ -6915,6 +6919,7 @@ keyword:
| COMMENT_SYM {}
| COMMITTED_SYM {}
| COMMIT_SYM {}
+ | COMPACT_SYM {}
| COMPRESSED_SYM {}
| CONCURRENT {}
| CONSISTENT_SYM {}
@@ -7046,6 +7051,7 @@ keyword:
| RAID_CHUNKSIZE {}
| RAID_STRIPED_SYM {}
| RAID_TYPE {}
+ | REDUNDANT_SYM {}
| RELAY_LOG_FILE_SYM {}
| RELAY_LOG_POS_SYM {}
| RELOAD {}