Merge whalegate.ndb.mysql.com:/home/tomas/mysql-5.1-opt
into whalegate.ndb.mysql.com:/home/tomas/mysql-5.1-single-user
This commit is contained in:
commit
19a2814f47
@ -2989,3 +2989,4 @@ win/vs71cache.txt
|
||||
win/vs8cache.txt
|
||||
zlib/*.ds?
|
||||
zlib/*.vcproj
|
||||
libmysqld/ha_ndbcluster_cond.cc
|
||||
|
@ -197,7 +197,6 @@ AC_DEFUN([MYSQL_SETUP_NDBCLUSTER], [
|
||||
|
||||
MAKE_BINARY_DISTRIBUTION_OPTIONS="$MAKE_BINARY_DISTRIBUTION_OPTIONS --with-ndbcluster"
|
||||
|
||||
CXXFLAGS="$CXXFLAGS \$(NDB_CXXFLAGS)"
|
||||
if test "$have_ndb_debug" = "default"
|
||||
then
|
||||
have_ndb_debug=$with_debug
|
||||
|
@ -11,12 +11,14 @@ MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes
|
||||
MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes
|
||||
TimeBetweenGlobalCheckpoints= 500
|
||||
NoOfFragmentLogFiles= 3
|
||||
DiskPageBufferMemory= CHOOSE_DiskPageBufferMemory
|
||||
DiskPageBufferMemory= CHOOSE_DiskPageBufferMemory
|
||||
|
||||
#
|
||||
# Increase deadlock-timeout to cater for slow test-machines
|
||||
# Increase timeouts to cater for slow test-machines
|
||||
# (possibly running several tests in parallell)
|
||||
#
|
||||
HeartbeatIntervalDbDb= 30000
|
||||
HeartbeatIntervalDbApi= 30000
|
||||
#TransactionDeadlockDetectionTimeout= 7500
|
||||
|
||||
[ndbd]
|
||||
|
@ -17,9 +17,11 @@ DiskPageBufferMemory= CHOOSE_DiskPageBufferMemory
|
||||
InitialNoOfOpenFiles= 27
|
||||
|
||||
#
|
||||
# Increase deadlock-timeout to cater for slow test-machines
|
||||
# Increase timeouts to cater for slow test-machines
|
||||
# (possibly running several tests in parallell)
|
||||
#
|
||||
HeartbeatIntervalDbDb= 30000
|
||||
HeartbeatIntervalDbApi= 30000
|
||||
#TransactionDeadlockDetectionTimeout= 7500
|
||||
|
||||
[ndbd]
|
||||
|
@ -11,12 +11,14 @@ MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes
|
||||
MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes
|
||||
TimeBetweenGlobalCheckpoints= 500
|
||||
NoOfFragmentLogFiles= 3
|
||||
DiskPageBufferMemory= CHOOSE_DiskPageBufferMemory
|
||||
DiskPageBufferMemory= CHOOSE_DiskPageBufferMemory
|
||||
|
||||
#
|
||||
# Increase deadlock-timeout to cater for slow test-machines
|
||||
# Increase timeouts to cater for slow test-machines
|
||||
# (possibly running several tests in parallell)
|
||||
#
|
||||
HeartbeatIntervalDbDb= 30000
|
||||
HeartbeatIntervalDbApi= 30000
|
||||
#TransactionDeadlockDetectionTimeout= 7500
|
||||
|
||||
[ndbd]
|
||||
|
@ -382,7 +382,7 @@ create table t1 (a int primary key) engine=ndb;
|
||||
select * from t1;
|
||||
a
|
||||
select * from t1;
|
||||
ERROR HY000: Can't lock file (errno: 4009)
|
||||
ERROR HY000: Can't lock file (errno: 157)
|
||||
use test;
|
||||
drop database test_only_ndb_tables;
|
||||
CREATE TABLE t9 (
|
||||
|
@ -667,6 +667,12 @@ counter datavalue
|
||||
31 newval
|
||||
32 newval
|
||||
drop table t1;
|
||||
create table t1 (a int primary key auto_increment) engine = ndb;
|
||||
insert into t1() values (),(),(),(),(),(),(),(),(),(),(),();
|
||||
insert into t1(a) values (20),(28);
|
||||
insert into t1() values (),(),(),(),(),(),(),(),(),(),(),();
|
||||
insert into t1() values (21), (22);
|
||||
drop table t1;
|
||||
CREATE TABLE t1 ( b INT ) PACK_KEYS = 0 ENGINE = ndb;
|
||||
select * from t1;
|
||||
b
|
||||
|
@ -620,6 +620,21 @@ select * from t1 order by counter;
|
||||
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# bug#27437
|
||||
connection con1;
|
||||
create table t1 (a int primary key auto_increment) engine = ndb;
|
||||
insert into t1() values (),(),(),(),(),(),(),(),(),(),(),();
|
||||
connection con2;
|
||||
insert into t1(a) values (20),(28);
|
||||
connection con1;
|
||||
insert into t1() values (),(),(),(),(),(),(),(),(),(),(),();
|
||||
connection con2;
|
||||
insert into t1() values (21), (22);
|
||||
connection con1;
|
||||
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# BUG#14514 Creating table with packed key fails silently
|
||||
#
|
||||
|
@ -1 +1 @@
|
||||
--default-storage-engine=ndbcluster
|
||||
--default-storage-engine=ndbcluster
|
||||
|
@ -6561,9 +6561,9 @@ int ndbcluster_table_exists_in_engine(handlerton *hton, THD* thd,
|
||||
if (my_strcasecmp(system_charset_info, elmt.name, name))
|
||||
continue;
|
||||
DBUG_PRINT("info", ("Found table"));
|
||||
DBUG_RETURN(1);
|
||||
DBUG_RETURN(HA_ERR_TABLE_EXIST);
|
||||
}
|
||||
DBUG_RETURN(0);
|
||||
DBUG_RETURN(HA_ERR_NO_SUCH_TABLE);
|
||||
}
|
||||
|
||||
|
||||
@ -6927,7 +6927,7 @@ int ndbcluster_find_files(handlerton *hton, THD *thd,
|
||||
DBUG_PRINT("info", ("%s existed on disk", name));
|
||||
// The .ndb file exists on disk, but it's not in list of tables in ndb
|
||||
// Verify that handler agrees table is gone.
|
||||
if (ndbcluster_table_exists_in_engine(hton, thd, db, file_name) == 0)
|
||||
if (ndbcluster_table_exists_in_engine(hton, thd, db, file_name) == HA_ERR_NO_SUCH_TABLE)
|
||||
{
|
||||
DBUG_PRINT("info", ("NDB says %s does not exists", file_name));
|
||||
it.remove();
|
||||
|
@ -3808,6 +3808,17 @@ restart:
|
||||
res= i_ndb->pollEvents(tot_poll_wait, &gci);
|
||||
tot_poll_wait= 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
Just consume any events, not used if no binlogging
|
||||
e.g. node failure events
|
||||
*/
|
||||
Uint64 tmp_gci;
|
||||
if (i_ndb->pollEvents(0, &tmp_gci))
|
||||
while (i_ndb->nextEvent())
|
||||
;
|
||||
}
|
||||
int schema_res= s_ndb->pollEvents(tot_poll_wait, &schema_gci);
|
||||
ndb_latest_received_binlog_epoch= gci;
|
||||
|
||||
|
@ -2886,20 +2886,21 @@ ha_find_files(THD *thd,const char *db,const char *path,
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
||||
/** @brief
|
||||
/*
|
||||
Ask handler if the table exists in engine
|
||||
|
||||
RETURN
|
||||
0 Table does not exist
|
||||
1 Table exists
|
||||
# Error code
|
||||
HA_ERR_NO_SUCH_TABLE Table does not exist
|
||||
HA_ERR_TABLE_EXIST Table exists
|
||||
# Error code
|
||||
|
||||
*/
|
||||
|
||||
*/
|
||||
struct st_table_exists_in_engine_args
|
||||
{
|
||||
const char *db;
|
||||
const char *name;
|
||||
int err;
|
||||
};
|
||||
|
||||
static my_bool table_exists_in_engine_handlerton(THD *thd, plugin_ref plugin,
|
||||
@ -2908,23 +2909,27 @@ static my_bool table_exists_in_engine_handlerton(THD *thd, plugin_ref plugin,
|
||||
st_table_exists_in_engine_args *vargs= (st_table_exists_in_engine_args *)arg;
|
||||
handlerton *hton= plugin_data(plugin, handlerton *);
|
||||
|
||||
int err= HA_ERR_NO_SUCH_TABLE;
|
||||
|
||||
if (hton->state == SHOW_OPTION_YES && hton->table_exists_in_engine)
|
||||
if ((hton->table_exists_in_engine(hton, thd, vargs->db, vargs->name)) == 1)
|
||||
return TRUE;
|
||||
err = hton->table_exists_in_engine(hton, thd, vargs->db, vargs->name);
|
||||
|
||||
vargs->err = err;
|
||||
if (vargs->err == HA_ERR_TABLE_EXIST)
|
||||
return TRUE;
|
||||
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
int ha_table_exists_in_engine(THD* thd, const char* db, const char* name)
|
||||
{
|
||||
int error= 0;
|
||||
DBUG_ENTER("ha_table_exists_in_engine");
|
||||
DBUG_PRINT("enter", ("db: %s, name: %s", db, name));
|
||||
st_table_exists_in_engine_args args= {db, name};
|
||||
error= plugin_foreach(thd, table_exists_in_engine_handlerton,
|
||||
st_table_exists_in_engine_args args= {db, name, HA_ERR_NO_SUCH_TABLE};
|
||||
plugin_foreach(thd, table_exists_in_engine_handlerton,
|
||||
MYSQL_STORAGE_ENGINE_PLUGIN, &args);
|
||||
DBUG_PRINT("exit", ("error: %d", error));
|
||||
DBUG_RETURN(error);
|
||||
DBUG_PRINT("exit", ("error: %d", args.err));
|
||||
DBUG_RETURN(args.err);
|
||||
}
|
||||
|
||||
#ifdef HAVE_NDB_BINLOG
|
||||
|
@ -368,7 +368,7 @@ handlerton *partition_hton;
|
||||
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
const char *opt_ndbcluster_connectstring= 0;
|
||||
const char *opt_ndb_connectstring= 0;
|
||||
char opt_ndb_constrbuf[1024];
|
||||
char opt_ndb_constrbuf[1024]= {0};
|
||||
unsigned opt_ndb_constrbuf_len= 0;
|
||||
my_bool opt_ndb_shm, opt_ndb_optimized_node_selection;
|
||||
ulong opt_ndb_cache_check_time;
|
||||
|
@ -58,8 +58,12 @@
|
||||
#include "events.h"
|
||||
|
||||
/* WITH_NDBCLUSTER_STORAGE_ENGINE */
|
||||
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
extern ulong ndb_cache_check_time;
|
||||
extern char opt_ndb_constrbuf[];
|
||||
extern ulong ndb_extra_logging;
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_NDB_BINLOG
|
||||
extern ulong ndb_report_thresh_binlog_epoch_slip;
|
||||
extern ulong ndb_report_thresh_binlog_mem_usage;
|
||||
@ -471,6 +475,7 @@ static sys_var_thd_bool
|
||||
sys_engine_condition_pushdown(&vars, "engine_condition_pushdown",
|
||||
&SV::engine_condition_pushdown);
|
||||
|
||||
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
/* ndb thread specific variable settings */
|
||||
static sys_var_thd_ulong
|
||||
sys_ndb_autoincrement_prefetch_sz(&vars, "ndb_autoincrement_prefetch_sz",
|
||||
@ -491,6 +496,8 @@ static sys_var_thd_bool
|
||||
sys_ndb_use_transactions(&vars, "ndb_use_transactions", &SV::ndb_use_transactions);
|
||||
static sys_var_long_ptr
|
||||
sys_ndb_cache_check_time(&vars, "ndb_cache_check_time", &ndb_cache_check_time);
|
||||
static sys_var_const_str
|
||||
sys_ndb_connectstring(&vars, "ndb_connectstring", opt_ndb_constrbuf);
|
||||
static sys_var_thd_bool
|
||||
sys_ndb_index_stat_enable(&vars, "ndb_index_stat_enable",
|
||||
&SV::ndb_index_stat_enable);
|
||||
@ -504,6 +511,7 @@ static sys_var_long_ptr
|
||||
sys_ndb_extra_logging(&vars, "ndb_extra_logging", &ndb_extra_logging);
|
||||
static sys_var_thd_bool
|
||||
sys_ndb_use_copying_alter_table(&vars, "ndb_use_copying_alter_table", &SV::ndb_use_copying_alter_table);
|
||||
#endif //WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
|
||||
/* Time/date/datetime formats */
|
||||
|
||||
|
@ -1740,6 +1740,7 @@ bool plugin_foreach_with_mask(THD *thd, plugin_foreach_func *func,
|
||||
pthread_mutex_unlock(&LOCK_plugin);
|
||||
}
|
||||
plugin= plugins[idx];
|
||||
/* It will stop iterating on first engine error when "func" returns TRUE */
|
||||
if (plugin && func(thd, plugin_int_to_ref(plugin), arg))
|
||||
goto err;
|
||||
}
|
||||
|
@ -3511,15 +3511,25 @@ bool mysql_create_table_internal(THD *thd,
|
||||
{
|
||||
bool create_if_not_exists =
|
||||
create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS;
|
||||
|
||||
if (ha_table_exists_in_engine(thd, db, table_name))
|
||||
int retcode = ha_table_exists_in_engine(thd, db, table_name);
|
||||
DBUG_PRINT("info", ("exists_in_engine: %u",retcode));
|
||||
switch (retcode)
|
||||
{
|
||||
DBUG_PRINT("info", ("Table with same name already existed in handler"));
|
||||
case HA_ERR_NO_SUCH_TABLE:
|
||||
/* Normal case, no table exists. we can go and create it */
|
||||
break;
|
||||
case HA_ERR_TABLE_EXIST:
|
||||
DBUG_PRINT("info", ("Table existed in handler"));
|
||||
|
||||
if (create_if_not_exists)
|
||||
goto warn;
|
||||
my_error(ER_TABLE_EXISTS_ERROR,MYF(0),table_name);
|
||||
goto unlock_and_end;
|
||||
if (create_if_not_exists)
|
||||
goto warn;
|
||||
my_error(ER_TABLE_EXISTS_ERROR,MYF(0),table_name);
|
||||
goto unlock_and_end;
|
||||
break;
|
||||
default:
|
||||
DBUG_PRINT("info", ("error: %u from storage engine", retcode));
|
||||
my_error(retcode, MYF(0),table_name);
|
||||
goto unlock_and_end;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -25,3 +25,5 @@ INCLUDES = $(INCLUDES_LOC)
|
||||
LDADD = $(LDADD_LOC)
|
||||
DEFS = @DEFS@ @NDB_DEFS@ $(DEFS_LOC) $(NDB_EXTRA_FLAGS)
|
||||
NDB_CXXFLAGS=@ndb_cxxflags_fix@ $(NDB_CXXFLAGS_LOC)
|
||||
NDB_AM_CXXFLAGS:= $(AM_CXXFLAGS)
|
||||
AM_CXXFLAGS=$(NDB_AM_CXXFLAGS) $(NDB_CXXFLAGS)
|
||||
|
@ -150,6 +150,13 @@ public:
|
||||
*/
|
||||
char char_value() const;
|
||||
|
||||
/**
|
||||
* Get value stored in NdbRecAttr object.
|
||||
*
|
||||
* @return Int8 value.
|
||||
*/
|
||||
Int8 int8_value() const;
|
||||
|
||||
/**
|
||||
* Get value stored in NdbRecAttr object.
|
||||
*
|
||||
@ -185,6 +192,13 @@ public:
|
||||
*/
|
||||
Uint8 u_char_value() const;
|
||||
|
||||
/**
|
||||
* Get value stored in NdbRecAttr object.
|
||||
*
|
||||
* @return Uint8 value.
|
||||
*/
|
||||
Uint8 u_8_value() const;
|
||||
|
||||
/**
|
||||
* Get value stored in NdbRecAttr object.
|
||||
*
|
||||
@ -315,6 +329,13 @@ NdbRecAttr::char_value() const
|
||||
return *(char*)theRef;
|
||||
}
|
||||
|
||||
inline
|
||||
Int8
|
||||
NdbRecAttr::int8_value() const
|
||||
{
|
||||
return *(Int8*)theRef;
|
||||
}
|
||||
|
||||
inline
|
||||
Uint32
|
||||
NdbRecAttr::u_32_value() const
|
||||
@ -336,6 +357,13 @@ NdbRecAttr::u_char_value() const
|
||||
return *(Uint8*)theRef;
|
||||
}
|
||||
|
||||
inline
|
||||
Uint8
|
||||
NdbRecAttr::u_8_value() const
|
||||
{
|
||||
return *(Uint8*)theRef;
|
||||
}
|
||||
|
||||
inline
|
||||
void
|
||||
NdbRecAttr::release()
|
||||
|
@ -1201,9 +1201,19 @@ int Dbtup::handleInsertReq(Signal* signal,
|
||||
if(!prevOp->is_first_operation())
|
||||
org= (Tuple_header*)c_undo_buffer.get_ptr(&prevOp->m_copy_tuple_location);
|
||||
if (regTabPtr->need_expand())
|
||||
{
|
||||
expand_tuple(req_struct, sizes, org, regTabPtr, !disk_insert);
|
||||
memset(req_struct->m_disk_ptr->m_null_bits+
|
||||
regTabPtr->m_offsets[DD].m_null_offset, 0xFF,
|
||||
4*regTabPtr->m_offsets[DD].m_null_words);
|
||||
}
|
||||
else
|
||||
{
|
||||
memcpy(dst, org, 4*regTabPtr->m_offsets[MM].m_fix_header_size);
|
||||
}
|
||||
memset(tuple_ptr->m_null_bits+
|
||||
regTabPtr->m_offsets[MM].m_null_offset, 0xFF,
|
||||
4*regTabPtr->m_offsets[MM].m_null_words);
|
||||
}
|
||||
|
||||
if (disk_insert)
|
||||
@ -1491,6 +1501,7 @@ int Dbtup::handleDeleteReq(Signal* signal,
|
||||
goto error;
|
||||
}
|
||||
memcpy(dst, org, regTabPtr->total_rec_size << 2);
|
||||
req_struct->m_tuple_ptr = (Tuple_header*)dst;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1528,7 +1539,9 @@ int Dbtup::handleDeleteReq(Signal* signal,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (setup_read(req_struct, regOperPtr, regFragPtr, regTabPtr, disk))
|
||||
if (regTabPtr->need_expand(disk))
|
||||
prepare_read(req_struct, regTabPtr, disk);
|
||||
|
||||
{
|
||||
Uint32 RlogSize;
|
||||
int ret= handleReadReq(signal, regOperPtr, regTabPtr, req_struct);
|
||||
|
@ -75,7 +75,7 @@ template <class T>
|
||||
class MemoryChannel
|
||||
{
|
||||
public:
|
||||
MemoryChannel( int size= 256);
|
||||
MemoryChannel( int size= 512);
|
||||
virtual ~MemoryChannel( );
|
||||
|
||||
void writeChannel( T *t);
|
||||
|
@ -344,18 +344,28 @@ err:
|
||||
void
|
||||
DynArr256::init(ReleaseIterator &iter)
|
||||
{
|
||||
iter.m_sz = 0;
|
||||
iter.m_sz = 1;
|
||||
iter.m_pos = 0;
|
||||
iter.m_ptr_i[0] = m_head.m_ptr_i;
|
||||
iter.m_ptr_i[1] = RNIL;
|
||||
iter.m_ptr_i[0] = RNIL;
|
||||
iter.m_ptr_i[1] = m_head.m_ptr_i;
|
||||
iter.m_ptr_i[2] = RNIL;
|
||||
iter.m_ptr_i[3] = RNIL;
|
||||
iter.m_ptr_i[4] = RNIL;
|
||||
}
|
||||
|
||||
bool
|
||||
DynArr256::release(ReleaseIterator &iter)
|
||||
/**
|
||||
* Iter is in next pos
|
||||
*
|
||||
* 0 - done
|
||||
* 1 - data
|
||||
* 2 - no data
|
||||
*/
|
||||
Uint32
|
||||
DynArr256::release(ReleaseIterator &iter, Uint32 * retptr)
|
||||
{
|
||||
Uint32 ptrI = iter.m_ptr_i[iter.m_sz];
|
||||
Uint32 sz = iter.m_sz;
|
||||
Uint32 pos = iter.m_pos;
|
||||
Uint32 ptrI = iter.m_ptr_i[sz];
|
||||
Uint32 page_no = ptrI >> DA256_BITS;
|
||||
Uint32 page_idx = ptrI & DA256_MASK;
|
||||
Uint32 type_id = (~m_pool.m_type_id) & 0xFFFF;
|
||||
@ -364,9 +374,8 @@ DynArr256::release(ReleaseIterator &iter)
|
||||
|
||||
if (ptrI != RNIL)
|
||||
{
|
||||
Uint32 tmp = iter.m_pos & 255;
|
||||
Uint32 p0 = tmp;
|
||||
for (; p0<256 && p0 < tmp + 16; p0++)
|
||||
Uint32 p0 = iter.m_pos & 255;
|
||||
for (; p0<256; p0++)
|
||||
{
|
||||
Uint32 *retVal, *magic_ptr, p;
|
||||
if (p0 != 255)
|
||||
@ -389,55 +398,53 @@ DynArr256::release(ReleaseIterator &iter)
|
||||
}
|
||||
|
||||
Uint32 magic = *magic_ptr;
|
||||
Uint32 val = *retVal;
|
||||
if (unlikely(! ((magic & (1 << p)) && (magic >> 16) == type_id)))
|
||||
goto err;
|
||||
|
||||
Uint32 val = * retVal;
|
||||
if (val != RNIL)
|
||||
if (sz == m_head.m_sz)
|
||||
{
|
||||
if (iter.m_sz + 2 == m_head.m_sz)
|
||||
* retptr = val;
|
||||
p0++;
|
||||
if (p0 != 256)
|
||||
{
|
||||
* retVal = RNIL;
|
||||
m_pool.release(val);
|
||||
iter.m_pos = (iter.m_pos & ~255) + p0;
|
||||
return false;
|
||||
/**
|
||||
* Move next
|
||||
*/
|
||||
iter.m_pos &= ~(Uint32)255;
|
||||
iter.m_pos |= p0;
|
||||
}
|
||||
else
|
||||
{
|
||||
* retVal = RNIL;
|
||||
iter.m_sz++;
|
||||
iter.m_ptr_i[iter.m_sz] = val;
|
||||
iter.m_pos = (p0 << 8);
|
||||
return false;
|
||||
/**
|
||||
* Move up
|
||||
*/
|
||||
m_pool.release(ptrI);
|
||||
iter.m_sz --;
|
||||
iter.m_pos >>= 8;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
else if (val != RNIL)
|
||||
{
|
||||
iter.m_sz++;
|
||||
iter.m_ptr_i[iter.m_sz] = val;
|
||||
iter.m_pos = (p0 << 8);
|
||||
* retVal = RNIL;
|
||||
return 2;
|
||||
}
|
||||
}
|
||||
|
||||
if (p0 == 256)
|
||||
{
|
||||
if (iter.m_sz == 0)
|
||||
goto done;
|
||||
iter.m_sz--;
|
||||
iter.m_pos >>= 8;
|
||||
|
||||
m_pool.release(ptrI);
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
iter.m_pos = (iter.m_pos & ~255) + p0;
|
||||
return false;
|
||||
}
|
||||
assert(p0 == 256);
|
||||
m_pool.release(ptrI);
|
||||
iter.m_sz --;
|
||||
iter.m_pos >>= 8;
|
||||
return 2;
|
||||
}
|
||||
|
||||
done:
|
||||
if (m_head.m_ptr_i != RNIL)
|
||||
{
|
||||
m_pool.release(m_head.m_ptr_i);
|
||||
}
|
||||
|
||||
new (&m_head) Head();
|
||||
return true;
|
||||
return 0;
|
||||
|
||||
err:
|
||||
require(false);
|
||||
@ -638,6 +645,7 @@ static
|
||||
void
|
||||
simple(DynArr256 & arr, int argc, char* argv[])
|
||||
{
|
||||
ndbout_c("argc: %d", argc);
|
||||
for (Uint32 i = 1; i<(Uint32)argc; i++)
|
||||
{
|
||||
Uint32 * s = arr.set(atoi(argv[i]));
|
||||
@ -865,7 +873,8 @@ write(DynArr256& arr, int argc, char ** argv)
|
||||
ndbout_c("Elapsed %lldus -> %f us/set", start, uspg);
|
||||
DynArr256::ReleaseIterator iter;
|
||||
arr.init(iter);
|
||||
while(!arr.release(iter));
|
||||
Uint32 val;
|
||||
while(arr.release(iter, &val));
|
||||
}
|
||||
}
|
||||
|
||||
@ -902,7 +911,7 @@ main(int argc, char** argv)
|
||||
DynArr256::Head head;
|
||||
DynArr256 arr(pool, head);
|
||||
|
||||
if (strcmp(argv[1], "--args") == 0)
|
||||
if (strcmp(argv[1], "--simple") == 0)
|
||||
simple(arr, argc, argv);
|
||||
else if (strcmp(argv[1], "--basic") == 0)
|
||||
basic(arr, argc, argv);
|
||||
@ -913,8 +922,8 @@ main(int argc, char** argv)
|
||||
|
||||
DynArr256::ReleaseIterator iter;
|
||||
arr.init(iter);
|
||||
Uint32 cnt = 0;
|
||||
while (!arr.release(iter)) cnt++;
|
||||
Uint32 cnt = 0, val;
|
||||
while (arr.release(iter, &val)) cnt++;
|
||||
|
||||
ndbout_c("allocatedpages: %d allocatednodes: %d releasednodes: %d"
|
||||
" releasecnt: %d",
|
||||
|
@ -49,6 +49,8 @@ public:
|
||||
|
||||
Uint32 m_ptr_i;
|
||||
Uint32 m_sz;
|
||||
|
||||
bool isEmpty() const { return m_sz == 0;}
|
||||
};
|
||||
|
||||
DynArr256(DynArr256Pool & pool, Head& head) :
|
||||
@ -61,12 +63,16 @@ public:
|
||||
{
|
||||
Uint32 m_sz;
|
||||
Uint32 m_pos;
|
||||
Uint32 m_ptr_i[4];
|
||||
Uint32 m_ptr_i[5];
|
||||
};
|
||||
|
||||
void init(ReleaseIterator&);
|
||||
bool release(ReleaseIterator&);
|
||||
|
||||
/**
|
||||
* return 0 - done
|
||||
* 1 - data (in retptr)
|
||||
* 2 - nodata
|
||||
*/
|
||||
Uint32 release(ReleaseIterator&, Uint32* retptr);
|
||||
protected:
|
||||
Head & m_head;
|
||||
DynArr256Pool & m_pool;
|
||||
|
@ -2897,25 +2897,50 @@ static bool fixNodeId(InitConfigFileParser::Context & ctx, const char * data)
|
||||
char buf[] = "NodeIdX"; buf[6] = data[sizeof("NodeI")];
|
||||
char sysbuf[] = "SystemX"; sysbuf[6] = data[sizeof("NodeI")];
|
||||
const char* nodeId;
|
||||
require(ctx.m_currentSection->get(buf, &nodeId));
|
||||
if(!ctx.m_currentSection->get(buf, &nodeId))
|
||||
{
|
||||
ctx.reportError("Mandatory parameter %s missing from section"
|
||||
"[%s] starting at line: %d",
|
||||
buf, ctx.fname, ctx.m_sectionLineno);
|
||||
return false;
|
||||
}
|
||||
|
||||
char tmpLine[MAX_LINE_LENGTH];
|
||||
strncpy(tmpLine, nodeId, MAX_LINE_LENGTH);
|
||||
char* token1 = strtok(tmpLine, ".");
|
||||
char* token2 = strtok(NULL, ".");
|
||||
Uint32 id;
|
||||
|
||||
|
||||
if(!token1)
|
||||
{
|
||||
ctx.reportError("Value for mandatory parameter %s missing from section "
|
||||
"[%s] starting at line: %d",
|
||||
buf, ctx.fname, ctx.m_sectionLineno);
|
||||
return false;
|
||||
}
|
||||
if (token2 == NULL) { // Only a number given
|
||||
errno = 0;
|
||||
char* p;
|
||||
id = strtol(token1, &p, 10);
|
||||
if (errno != 0) warning("STRTOK1", nodeId);
|
||||
if (errno != 0 || id <= 0x0 || id > MAX_NODES)
|
||||
{
|
||||
ctx.reportError("Illegal value for mandatory parameter %s from section "
|
||||
"[%s] starting at line: %d",
|
||||
buf, ctx.fname, ctx.m_sectionLineno);
|
||||
return false;
|
||||
}
|
||||
require(ctx.m_currentSection->put(buf, id, true));
|
||||
} else { // A pair given (e.g. "uppsala.32")
|
||||
errno = 0;
|
||||
char* p;
|
||||
id = strtol(token2, &p, 10);
|
||||
if (errno != 0) warning("STRTOK2", nodeId);
|
||||
if (errno != 0 || id <= 0x0 || id > MAX_NODES)
|
||||
{
|
||||
ctx.reportError("Illegal value for mandatory parameter %s from section "
|
||||
"[%s] starting at line: %d",
|
||||
buf, ctx.fname, ctx.m_sectionLineno);
|
||||
return false;
|
||||
}
|
||||
require(ctx.m_currentSection->put(buf, id, true));
|
||||
require(ctx.m_currentSection->put(sysbuf, token1));
|
||||
}
|
||||
|
@ -628,6 +628,16 @@ MgmtSrvr::start(BaseString &error_string)
|
||||
ndbout_c("This is probably a bug.");
|
||||
}
|
||||
|
||||
/*
|
||||
set api reg req frequency quite high:
|
||||
|
||||
100 ms interval to make sure we have fairly up-to-date
|
||||
info from the nodes. This to make sure that this info
|
||||
is not dependent on heart beat settings in the
|
||||
configuration
|
||||
*/
|
||||
theFacade->theClusterMgr->set_max_api_reg_req_interval(100);
|
||||
|
||||
TransporterRegistry *reg = theFacade->get_registry();
|
||||
for(unsigned int i=0;i<reg->m_transporter_interface.size();i++) {
|
||||
BaseString msg;
|
||||
|
@ -61,6 +61,7 @@ ClusterMgr::ClusterMgr(TransporterFacade & _facade):
|
||||
clusterMgrThreadMutex = NdbMutex_Create();
|
||||
waitForHBCond= NdbCondition_Create();
|
||||
waitingForHB= false;
|
||||
m_max_api_reg_req_interval= 0xFFFFFFFF; // MAX_INT
|
||||
noOfAliveNodes= 0;
|
||||
noOfConnectedNodes= 0;
|
||||
theClusterMgrThread= 0;
|
||||
@ -243,7 +244,7 @@ ClusterMgr::threadMain( ){
|
||||
}
|
||||
|
||||
theFacade.lock_mutex();
|
||||
for (int i = 1; i < MAX_NODES; i++){
|
||||
for (int i = 1; i < MAX_NDB_NODES; i++){
|
||||
/**
|
||||
* Send register request (heartbeat) to all available nodes
|
||||
* at specified timing intervals
|
||||
@ -264,7 +265,8 @@ ClusterMgr::threadMain( ){
|
||||
}
|
||||
|
||||
theNode.hbCounter += timeSlept;
|
||||
if (theNode.hbCounter >= theNode.hbFrequency) {
|
||||
if (theNode.hbCounter >= m_max_api_reg_req_interval ||
|
||||
theNode.hbCounter >= theNode.hbFrequency) {
|
||||
/**
|
||||
* It is now time to send a new Heartbeat
|
||||
*/
|
||||
|
@ -50,6 +50,7 @@ public:
|
||||
void startThread();
|
||||
|
||||
void forceHB();
|
||||
void set_max_api_reg_req_interval(unsigned int millisec) { m_max_api_reg_req_interval = millisec; }
|
||||
|
||||
private:
|
||||
void threadMain();
|
||||
@ -89,6 +90,7 @@ public:
|
||||
|
||||
Uint32 m_connect_count;
|
||||
private:
|
||||
Uint32 m_max_api_reg_req_interval;
|
||||
Uint32 noOfAliveNodes;
|
||||
Uint32 noOfConnectedNodes;
|
||||
Node theNodes[MAX_NODES];
|
||||
|
@ -270,7 +270,7 @@ ndbrecattr_print_formatted(NdbOut& out, const NdbRecAttr &r,
|
||||
out << r.u_short_value();
|
||||
break;
|
||||
case NdbDictionary::Column::Tinyunsigned:
|
||||
out << (unsigned) r.u_char_value();
|
||||
out << (unsigned) r.u_8_value();
|
||||
break;
|
||||
case NdbDictionary::Column::Bigint:
|
||||
out << r.int64_value();
|
||||
@ -285,7 +285,7 @@ ndbrecattr_print_formatted(NdbOut& out, const NdbRecAttr &r,
|
||||
out << r.short_value();
|
||||
break;
|
||||
case NdbDictionary::Column::Tinyint:
|
||||
out << (int) r.char_value();
|
||||
out << (int) r.int8_value();
|
||||
break;
|
||||
case NdbDictionary::Column::Binary:
|
||||
if (!f.hex_format)
|
||||
@ -411,7 +411,7 @@ ndbrecattr_print_formatted(NdbOut& out, const NdbRecAttr &r,
|
||||
break;
|
||||
case NdbDictionary::Column::Year:
|
||||
{
|
||||
uint year = 1900 + r.u_char_value();
|
||||
uint year = 1900 + r.u_8_value();
|
||||
char buf[40];
|
||||
sprintf(buf, "%04d", year);
|
||||
out << buf;
|
||||
|
@ -151,7 +151,7 @@ ErrorBundle ErrorCodes[] = {
|
||||
*/
|
||||
{ 4007, DMEC, UR, "Send to ndbd node failed" },
|
||||
{ 4008, DMEC, UR, "Receive from NDB failed" },
|
||||
{ 4009, DMEC, UR, "Cluster Failure" },
|
||||
{ 4009, HA_ERR_NO_CONNECTION, UR, "Cluster Failure" },
|
||||
{ 4012, DMEC, UR,
|
||||
"Request ndbd time-out, maybe due to high load or communication problems"},
|
||||
{ 4013, DMEC, UR, "Request timed out in waiting for node failure"},
|
||||
|
@ -1290,17 +1290,7 @@ runDeleteRead(NDBT_Context* ctx, NDBT_Step* step){
|
||||
NdbTransaction* pTrans = pNdb->startTransaction();
|
||||
NdbOperation* pOp = pTrans->getNdbOperation(tab->getName());
|
||||
pOp->deleteTuple();
|
||||
for(a = 0; a<tab->getNoOfColumns(); a++)
|
||||
{
|
||||
if (tab->getColumn(a)->getPrimaryKey() == true)
|
||||
{
|
||||
if(tmp.equalForAttr(pOp, a, 0) != 0)
|
||||
{
|
||||
ERR(pTrans->getNdbError());
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
}
|
||||
}
|
||||
tmp.equalForRow(pOp, loops);
|
||||
|
||||
// Define attributes to read
|
||||
for(a = 0; a<tab->getNoOfColumns(); a++)
|
||||
@ -1313,6 +1303,30 @@ runDeleteRead(NDBT_Context* ctx, NDBT_Step* step){
|
||||
|
||||
pTrans->execute(Commit);
|
||||
pTrans->close();
|
||||
|
||||
pTrans = pNdb->startTransaction();
|
||||
pOp = pTrans->getNdbOperation(tab->getName());
|
||||
pOp->insertTuple();
|
||||
tmp.setValues(pOp, loops, 0);
|
||||
|
||||
pOp = pTrans->getNdbOperation(tab->getName());
|
||||
pOp->deleteTuple();
|
||||
tmp.equalForRow(pOp, loops);
|
||||
for(a = 0; a<tab->getNoOfColumns(); a++)
|
||||
{
|
||||
if((row.attributeStore(a) = pOp->getValue(tab->getColumn(a)->getName())) == 0)
|
||||
{
|
||||
ERR(pTrans->getNdbError());
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
}
|
||||
if (pTrans->execute(Commit) != 0)
|
||||
{
|
||||
ERR(pTrans->getNdbError());
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
pTrans->close();
|
||||
}
|
||||
|
||||
return NDBT_OK;
|
||||
@ -1444,6 +1458,91 @@ runBug28073(NDBT_Context *ctx, NDBT_Step* step)
|
||||
|
||||
template class Vector<Uint64>;
|
||||
|
||||
int
|
||||
runBug20535(NDBT_Context* ctx, NDBT_Step* step)
|
||||
{
|
||||
Uint32 i;
|
||||
Ndb* pNdb = GETNDB(step);
|
||||
const NdbDictionary::Table * tab = ctx->getTab();
|
||||
NdbDictionary::Dictionary * dict = pNdb->getDictionary();
|
||||
|
||||
bool null = false;
|
||||
for (i = 0; i<tab->getNoOfColumns(); i++)
|
||||
{
|
||||
if (tab->getColumn(i)->getNullable())
|
||||
{
|
||||
null = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!null)
|
||||
return NDBT_OK;
|
||||
|
||||
HugoTransactions hugoTrans(* tab);
|
||||
hugoTrans.loadTable(pNdb, 1);
|
||||
|
||||
NdbTransaction* pTrans = pNdb->startTransaction();
|
||||
NdbOperation* pOp = pTrans->getNdbOperation(tab->getName());
|
||||
pOp->deleteTuple();
|
||||
hugoTrans.equalForRow(pOp, 0);
|
||||
if (pTrans->execute(NoCommit) != 0)
|
||||
return NDBT_FAILED;
|
||||
|
||||
pOp = pTrans->getNdbOperation(tab->getName());
|
||||
pOp->insertTuple();
|
||||
hugoTrans.equalForRow(pOp, 0);
|
||||
for (i = 0; i<tab->getNoOfColumns(); i++)
|
||||
{
|
||||
if (!tab->getColumn(i)->getPrimaryKey() &&
|
||||
!tab->getColumn(i)->getNullable())
|
||||
{
|
||||
hugoTrans.setValueForAttr(pOp, i, 0, 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (pTrans->execute(Commit) != 0)
|
||||
return NDBT_FAILED;
|
||||
|
||||
pTrans->close();
|
||||
|
||||
pTrans = pNdb->startTransaction();
|
||||
pOp = pTrans->getNdbOperation(tab->getName());
|
||||
pOp->readTuple();
|
||||
hugoTrans.equalForRow(pOp, 0);
|
||||
Vector<NdbRecAttr*> values;
|
||||
for (i = 0; i<tab->getNoOfColumns(); i++)
|
||||
{
|
||||
if (!tab->getColumn(i)->getPrimaryKey() &&
|
||||
tab->getColumn(i)->getNullable())
|
||||
{
|
||||
values.push_back(pOp->getValue(i));
|
||||
}
|
||||
}
|
||||
|
||||
if (pTrans->execute(Commit) != 0)
|
||||
return NDBT_FAILED;
|
||||
|
||||
null = true;
|
||||
for (i = 0; i<values.size(); i++)
|
||||
{
|
||||
if (!values[i]->isNULL())
|
||||
{
|
||||
null = false;
|
||||
ndbout_c("column %s is not NULL", values[i]->getColumn()->getName());
|
||||
}
|
||||
}
|
||||
|
||||
pTrans->close();
|
||||
|
||||
if (null)
|
||||
return NDBT_OK;
|
||||
else
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
template class Vector<NdbRecAttr*>;
|
||||
|
||||
NDBT_TESTSUITE(testBasic);
|
||||
TESTCASE("PkInsert",
|
||||
"Verify that we can insert and delete from this table using PK"
|
||||
@ -1728,6 +1827,10 @@ TESTCASE("Bug28073",
|
||||
"Infinite loop in lock queue" ){
|
||||
STEP(runBug28073);
|
||||
}
|
||||
TESTCASE("Bug20535",
|
||||
"Verify what happens when we fill the db" ){
|
||||
STEP(runBug20535);
|
||||
}
|
||||
NDBT_TESTSUITE_END(testBasic);
|
||||
|
||||
#if 0
|
||||
|
@ -247,6 +247,10 @@ max-time: 500
|
||||
cmd: testBasic
|
||||
args: -n Bug28073
|
||||
|
||||
max-time: 500
|
||||
cmd: testBasic
|
||||
args: -n Bug20535
|
||||
|
||||
max-time: 500
|
||||
cmd: testIndex
|
||||
args: -n Bug25059 -r 3000 T1
|
||||
|
@ -894,6 +894,21 @@ BackupRestore::table(const TableS & table){
|
||||
{
|
||||
copy.setMaxRows(table.getNoOfRecords());
|
||||
}
|
||||
|
||||
NdbTableImpl &tableImpl = NdbTableImpl::getImpl(copy);
|
||||
if (table.getBackupVersion() < MAKE_VERSION(5,1,0) && !m_no_upgrade){
|
||||
for(int i= 0; i < copy.getNoOfColumns(); i++)
|
||||
{
|
||||
NdbDictionary::Column::Type t = copy.getColumn(i)->getType();
|
||||
|
||||
if (t == NdbDictionary::Column::Varchar ||
|
||||
t == NdbDictionary::Column::Varbinary)
|
||||
tableImpl.getColumn(i)->setArrayType(NdbDictionary::Column::ArrayTypeShortVar);
|
||||
if (t == NdbDictionary::Column::Longvarchar ||
|
||||
t == NdbDictionary::Column::Longvarbinary)
|
||||
tableImpl.getColumn(i)->setArrayType(NdbDictionary::Column::ArrayTypeMediumVar);
|
||||
}
|
||||
}
|
||||
|
||||
if (dict->createTable(copy) == -1)
|
||||
{
|
||||
@ -1141,8 +1156,22 @@ void BackupRestore::tuple_a(restore_callback_t *cb)
|
||||
int size = attr_desc->size;
|
||||
int arraySize = attr_desc->arraySize;
|
||||
char * dataPtr = attr_data->string_value;
|
||||
Uint32 length = attr_data->size;
|
||||
|
||||
Uint32 length = 0;
|
||||
|
||||
const unsigned char * src = (const unsigned char *)dataPtr;
|
||||
switch(attr_desc->m_column->getType()){
|
||||
case NdbDictionary::Column::Varchar:
|
||||
case NdbDictionary::Column::Varbinary:
|
||||
length = src[0] + 1;
|
||||
break;
|
||||
case NdbDictionary::Column::Longvarchar:
|
||||
case NdbDictionary::Column::Longvarbinary:
|
||||
length = src[0] + (src[1] << 8) + 2;
|
||||
break;
|
||||
default:
|
||||
length = attr_data->size;
|
||||
break;
|
||||
}
|
||||
if (j == 0 && tup.getTable()->have_auto_inc(i))
|
||||
tup.getTable()->update_max_auto_val(dataPtr,size*arraySize);
|
||||
|
||||
@ -1162,7 +1191,7 @@ void BackupRestore::tuple_a(restore_callback_t *cb)
|
||||
if (ret < 0) {
|
||||
ndbout_c("Column: %d type %d %d %d %d",i,
|
||||
attr_desc->m_column->getType(),
|
||||
size, arraySize, attr_data->size);
|
||||
size, arraySize, length);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -51,6 +51,7 @@ public:
|
||||
m_callback = 0;
|
||||
m_free_callback = 0;
|
||||
m_temp_error = false;
|
||||
m_no_upgrade = false;
|
||||
m_transactions = 0;
|
||||
m_cache.m_old_table = 0;
|
||||
}
|
||||
@ -91,6 +92,7 @@ public:
|
||||
bool m_restore_meta;
|
||||
bool m_no_restore_disk;
|
||||
bool m_restore_epoch;
|
||||
bool m_no_upgrade; // for upgrade ArrayType from 5.0 backup file.
|
||||
Uint32 m_logCount;
|
||||
Uint32 m_dataCount;
|
||||
|
||||
|
@ -34,6 +34,7 @@ static int ga_nodeId = 0;
|
||||
static int ga_nParallelism = 128;
|
||||
static int ga_backupId = 0;
|
||||
static bool ga_dont_ignore_systab_0 = false;
|
||||
static bool ga_no_upgrade = false;
|
||||
static Vector<class BackupConsumer *> g_consumers;
|
||||
static BackupPrinter* g_printer = NULL;
|
||||
|
||||
@ -115,6 +116,10 @@ static struct my_option my_long_options[] =
|
||||
"Restore meta data into NDB Cluster using NDBAPI",
|
||||
(gptr*) &_restore_meta, (gptr*) &_restore_meta, 0,
|
||||
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
|
||||
{ "no-upgrade", 'u',
|
||||
"Don't upgrade array type for var attributes, which don't resize VAR data and don't change column attributes",
|
||||
(gptr*) &ga_no_upgrade, (gptr*) &ga_no_upgrade, 0,
|
||||
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
|
||||
{ "no-restore-disk-objects", 'd',
|
||||
"Dont restore disk objects (tablespace/logfilegroups etc)",
|
||||
(gptr*) &_no_restore_disk, (gptr*) &_no_restore_disk, 0,
|
||||
@ -460,6 +465,11 @@ o verify nodegroup mapping
|
||||
restore->m_no_restore_disk = true;
|
||||
}
|
||||
|
||||
if (ga_no_upgrade)
|
||||
{
|
||||
restore->m_no_upgrade = true;
|
||||
}
|
||||
|
||||
if (ga_restore_epoch)
|
||||
{
|
||||
restore->m_restore_epoch = true;
|
||||
@ -644,6 +654,8 @@ main(int argc, char** argv)
|
||||
g_options.appfmt(" -n %d", ga_nodeId);
|
||||
if (_restore_meta)
|
||||
g_options.appfmt(" -m");
|
||||
if (ga_no_upgrade)
|
||||
g_options.appfmt(" -u");
|
||||
if (ga_skip_table_check)
|
||||
g_options.appfmt(" -s");
|
||||
if (_restore_data)
|
||||
@ -655,7 +667,6 @@ main(int argc, char** argv)
|
||||
g_options.appfmt(" -p %d", ga_nParallelism);
|
||||
|
||||
g_connect_string = opt_connect_str;
|
||||
|
||||
/**
|
||||
* we must always load meta data, even if we will only print it to stdout
|
||||
*/
|
||||
|
Loading…
x
Reference in New Issue
Block a user