Removed compiler warnings
Ensure that my_size_t is always unsigned (to get predictiable results from system to system) Removed some %lld, as these are not portable
This commit is contained in:
parent
313f23a93a
commit
0e149b7170
@ -5,7 +5,7 @@ configure="./configure $base_configs $extra_configs"
|
||||
|
||||
commands="\
|
||||
$make -k distclean || true
|
||||
/bin/rm -rf */.deps/*.P config.cache storage/innobase/config.cache autom4te.cache innobase/autom4te.cache;
|
||||
/bin/rm -rf */.deps/*.P configure config.cache storage/*/configure storage/*/config.cache autom4te.cache storage/*/autom4te.cache;
|
||||
|
||||
path=`dirname $0`
|
||||
. \"$path/autorun.sh\""
|
||||
|
@ -1560,8 +1560,8 @@ static uint dump_routines_for_db(char *db)
|
||||
if the user has EXECUTE privilege he see routine names, but NOT the
|
||||
routine body of other routines that are not the creator of!
|
||||
*/
|
||||
DBUG_PRINT("info",("length of body for %s row[2] '%s' is %d",
|
||||
routine_name, row[2], strlen(row[2])));
|
||||
DBUG_PRINT("info",("length of body for %s row[2] '%s' is %ld",
|
||||
routine_name, row[2], (long) strlen(row[2])));
|
||||
if (strlen(row[2]))
|
||||
{
|
||||
char *query_str= NULL;
|
||||
|
@ -592,7 +592,7 @@ get_random_string(char *buf)
|
||||
DBUG_ENTER("get_random_string");
|
||||
for (x= RAND_STRING_SIZE; x > 0; x--)
|
||||
*buf_ptr++= ALPHANUMERICS[random() % ALPHANUMERICS_SIZE];
|
||||
DBUG_PRINT("info", ("random string: '%*s'", buf_ptr - buf, buf));
|
||||
DBUG_PRINT("info", ("random string: '%*s'", (int) (buf_ptr - buf), buf));
|
||||
DBUG_RETURN(buf_ptr - buf);
|
||||
}
|
||||
|
||||
|
@ -893,8 +893,8 @@ int dyn_string_cmp(DYNAMIC_STRING* ds, const char *fname)
|
||||
die(NullS);
|
||||
if (!eval_result && (uint) stat_info.st_size != ds->length)
|
||||
{
|
||||
DBUG_PRINT("info",("Size differs: result size: %u file size: %llu",
|
||||
ds->length, stat_info.st_size));
|
||||
DBUG_PRINT("info",("Size differs: result size: %u file size: %lu",
|
||||
ds->length, (ulong) stat_info.st_size));
|
||||
DBUG_PRINT("info",("result: '%s'", ds->str));
|
||||
DBUG_RETURN(RESULT_LENGTH_MISMATCH);
|
||||
}
|
||||
@ -3077,14 +3077,14 @@ void do_connect(struct st_command *command)
|
||||
else if (!strncmp(con_options, "COMPRESS", 8))
|
||||
con_compress= 1;
|
||||
else
|
||||
die("Illegal option to connect: %.*s", end - con_options, con_options);
|
||||
die("Illegal option to connect: %.*s", (int) (end - con_options), con_options);
|
||||
/* Process next option */
|
||||
con_options= end;
|
||||
}
|
||||
|
||||
if (next_con == connections_end)
|
||||
die("Connection limit exhausted, you can have max %d connections",
|
||||
(sizeof(connections)/sizeof(struct st_connection)));
|
||||
die("Connection limit exhausted, you can have max %ld connections",
|
||||
(long) (sizeof(connections)/sizeof(struct st_connection)));
|
||||
|
||||
if (find_connection_by_name(ds_connection_name.str))
|
||||
die("Connection %s already exists", ds_connection_name.str);
|
||||
|
@ -735,7 +735,8 @@ _rl_read_file (filename, sizep)
|
||||
file_size = (size_t)finfo.st_size;
|
||||
|
||||
/* check for overflow on very large files */
|
||||
if (file_size != finfo.st_size || file_size + 1 < file_size)
|
||||
if ((long long) file_size != (long long) finfo.st_size ||
|
||||
file_size + 1 < file_size)
|
||||
{
|
||||
if (file >= 0)
|
||||
close (file);
|
||||
|
@ -184,7 +184,8 @@ read_history_range (filename, from, to)
|
||||
file_size = (size_t)finfo.st_size;
|
||||
|
||||
/* check for overflow on very large files */
|
||||
if (file_size != finfo.st_size || file_size + 1 < file_size)
|
||||
if ((long long) file_size != (long long) finfo.st_size ||
|
||||
file_size + 1 < file_size)
|
||||
{
|
||||
errno = overflow_errno;
|
||||
goto error_and_exit;
|
||||
@ -333,7 +334,8 @@ history_truncate_file (fname, lines)
|
||||
file_size = (size_t)finfo.st_size;
|
||||
|
||||
/* check for overflow on very large files */
|
||||
if (file_size != finfo.st_size || file_size + 1 < file_size)
|
||||
if ((long long) file_size != (long long) finfo.st_size ||
|
||||
file_size + 1 < file_size)
|
||||
{
|
||||
close (file);
|
||||
#if defined (EFBIG)
|
||||
|
@ -869,9 +869,8 @@ typedef long my_ptrdiff_t;
|
||||
typedef long long my_ptrdiff_t;
|
||||
#endif
|
||||
|
||||
#if HAVE_SIZE_T
|
||||
typedef size_t my_size_t;
|
||||
#elif SIZEOF_CHARP <= SIZEOF_LONG
|
||||
/* We can't set my_size_t to size_t as we want my_size_t to be unsigned */
|
||||
#if SIZEOF_CHARP <= SIZEOF_LONG
|
||||
typedef unsigned long my_size_t;
|
||||
#else
|
||||
typedef unsigned long long my_size_t;
|
||||
@ -886,6 +885,22 @@ typedef unsigned long long my_size_t;
|
||||
#define ADD_TO_PTR(ptr,size,type) (type) ((byte*) (ptr)+size)
|
||||
#define PTR_BYTE_DIFF(A,B) (my_ptrdiff_t) ((byte*) (A) - (byte*) (B))
|
||||
|
||||
/*
|
||||
Custom version of standard offsetof() macro which can be used to get
|
||||
offsets of members in class for non-POD types (according to the current
|
||||
version of C++ standard offsetof() macro can't be used in such cases and
|
||||
attempt to do so causes warnings to be emitted, OTOH in many cases it is
|
||||
still OK to assume that all instances of the class has the same offsets
|
||||
for the same members).
|
||||
|
||||
This is temporary solution which should be removed once File_parser class
|
||||
and related routines are refactored.
|
||||
*/
|
||||
|
||||
#define my_offsetof(TYPE, MEMBER) \
|
||||
((size_t)((char *)&(((TYPE *)0x10)->MEMBER) - (char*)0x10))
|
||||
|
||||
|
||||
#define NullS (char *) 0
|
||||
/* Nowdays we do not support MessyDos */
|
||||
#ifndef NEAR
|
||||
|
@ -1218,7 +1218,8 @@ Event_queue_element::compute_next_execution_time()
|
||||
|
||||
my_tz_UTC->gmt_sec_to_TIME(&time_now, current_thd->query_start());
|
||||
|
||||
DBUG_PRINT("info",("NOW=[%llu]", TIME_to_ulonglong_datetime(&time_now)));
|
||||
DBUG_PRINT("info",("NOW: [%lu]",
|
||||
(ulong) TIME_to_ulonglong_datetime(&time_now)));
|
||||
|
||||
/* if time_now is after ends don't execute anymore */
|
||||
if (!ends_null && (tmp= my_time_compare(&ends, &time_now)) == -1)
|
||||
@ -1300,7 +1301,8 @@ Event_queue_element::compute_next_execution_time()
|
||||
}
|
||||
else
|
||||
{
|
||||
DBUG_PRINT("info",("Next[%llu]",TIME_to_ulonglong_datetime(&next_exec)));
|
||||
DBUG_PRINT("info",("Next[%lu]",
|
||||
(ulong) TIME_to_ulonglong_datetime(&next_exec)));
|
||||
execute_at= next_exec;
|
||||
execute_at_null= FALSE;
|
||||
}
|
||||
@ -1322,7 +1324,8 @@ Event_queue_element::compute_next_execution_time()
|
||||
expression, interval))
|
||||
goto err;
|
||||
execute_at= next_exec;
|
||||
DBUG_PRINT("info",("Next[%llu]",TIME_to_ulonglong_datetime(&next_exec)));
|
||||
DBUG_PRINT("info",("Next[%lu]",
|
||||
(ulong) TIME_to_ulonglong_datetime(&next_exec)));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1356,7 +1359,8 @@ Event_queue_element::compute_next_execution_time()
|
||||
expression, interval))
|
||||
goto err;
|
||||
execute_at= next_exec;
|
||||
DBUG_PRINT("info",("Next[%llu]",TIME_to_ulonglong_datetime(&next_exec)));
|
||||
DBUG_PRINT("info",("Next[%lu]",
|
||||
(ulong) TIME_to_ulonglong_datetime(&next_exec)));
|
||||
}
|
||||
execute_at_null= FALSE;
|
||||
}
|
||||
@ -1393,8 +1397,8 @@ Event_queue_element::compute_next_execution_time()
|
||||
}
|
||||
else
|
||||
{
|
||||
DBUG_PRINT("info", ("Next[%llu]",
|
||||
TIME_to_ulonglong_datetime(&next_exec)));
|
||||
DBUG_PRINT("info", ("Next[%lu]",
|
||||
(ulong) TIME_to_ulonglong_datetime(&next_exec)));
|
||||
execute_at= next_exec;
|
||||
execute_at_null= FALSE;
|
||||
}
|
||||
|
@ -776,7 +776,7 @@ Event_scheduler::dump_internal_status()
|
||||
mutex_last_unlocked_at_line);
|
||||
printf("WOC : %s\n", waiting_on_cond? "YES":"NO");
|
||||
printf("Workers : %u\n", workers_count());
|
||||
printf("Executed : %llu\n", started_events);
|
||||
printf("Executed : %lu\n", (ulong) started_events);
|
||||
printf("Data locked: %s\n", mutex_scheduler_data_locked ? "YES":"NO");
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
|
@ -239,7 +239,7 @@ public:
|
||||
*/
|
||||
my_size_t last_null_byte() const {
|
||||
my_size_t bytes= do_last_null_byte();
|
||||
DBUG_PRINT("debug", ("last_null_byte() ==> %d", bytes));
|
||||
DBUG_PRINT("debug", ("last_null_byte() ==> %ld", (long) bytes));
|
||||
DBUG_ASSERT(bytes <= table->s->null_bytes);
|
||||
return bytes;
|
||||
}
|
||||
|
@ -3563,9 +3563,11 @@ restart:
|
||||
if (do_ndbcluster_binlog_close_connection)
|
||||
{
|
||||
DBUG_PRINT("info", ("do_ndbcluster_binlog_close_connection: %d, "
|
||||
"ndb_latest_handled_binlog_epoch: %llu, "
|
||||
"*p_latest_trans_gci: %llu", do_ndbcluster_binlog_close_connection,
|
||||
ndb_latest_handled_binlog_epoch, *p_latest_trans_gci));
|
||||
"ndb_latest_handled_binlog_epoch: %lu, "
|
||||
"*p_latest_trans_gci: %lu",
|
||||
do_ndbcluster_binlog_close_connection,
|
||||
(ulong) ndb_latest_handled_binlog_epoch,
|
||||
(ulong) *p_latest_trans_gci));
|
||||
}
|
||||
#endif
|
||||
#ifdef RUN_NDB_BINLOG_TIMER
|
||||
@ -3653,9 +3655,10 @@ restart:
|
||||
do_ndbcluster_binlog_close_connection= BCCC_restart;
|
||||
if (ndb_latest_received_binlog_epoch < *p_latest_trans_gci && ndb_binlog_running)
|
||||
{
|
||||
sql_print_error("NDB Binlog: latest transaction in epoch %lld not in binlog "
|
||||
"as latest received epoch is %lld",
|
||||
*p_latest_trans_gci, ndb_latest_received_binlog_epoch);
|
||||
sql_print_error("NDB Binlog: latest transaction in epoch %lu not in binlog "
|
||||
"as latest received epoch is %lu",
|
||||
(ulong) *p_latest_trans_gci,
|
||||
(ulong) ndb_latest_received_binlog_epoch);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3841,9 +3844,10 @@ restart:
|
||||
do_ndbcluster_binlog_close_connection= BCCC_restart;
|
||||
if (ndb_latest_received_binlog_epoch < *p_latest_trans_gci && ndb_binlog_running)
|
||||
{
|
||||
sql_print_error("NDB Binlog: latest transaction in epoch %lld not in binlog "
|
||||
"as latest received epoch is %lld",
|
||||
*p_latest_trans_gci, ndb_latest_received_binlog_epoch);
|
||||
sql_print_error("NDB Binlog: latest transaction in epoch %lu not in binlog "
|
||||
"as latest received epoch is %lu",
|
||||
(ulong) *p_latest_trans_gci,
|
||||
(ulong) ndb_latest_received_binlog_epoch);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3875,7 +3879,7 @@ restart:
|
||||
row.master_log_file= start.file_name();
|
||||
row.master_log_pos= start.file_pos();
|
||||
|
||||
DBUG_PRINT("info", ("COMMIT gci: %lld", gci));
|
||||
DBUG_PRINT("info", ("COMMIT gci: %lu", (ulong) gci));
|
||||
if (ndb_update_binlog_index)
|
||||
ndb_add_binlog_index(thd, &row);
|
||||
ndb_latest_applied_binlog_epoch= gci;
|
||||
|
@ -2427,7 +2427,7 @@ repeat:
|
||||
do
|
||||
{
|
||||
DBUG_PRINT("info", ("external_lock(thd, %d) iteration %d",
|
||||
lock_type, (file - m_file)));
|
||||
lock_type, (int) (file - m_file)));
|
||||
if ((error= (*file)->external_lock(thd, lock_type)))
|
||||
{
|
||||
if (F_UNLCK != lock_type)
|
||||
@ -2508,7 +2508,7 @@ THR_LOCK_DATA **ha_partition::store_lock(THD *thd,
|
||||
file= m_file;
|
||||
do
|
||||
{
|
||||
DBUG_PRINT("info", ("store lock %d iteration", (file - m_file)));
|
||||
DBUG_PRINT("info", ("store lock %d iteration", (int) (file - m_file)));
|
||||
to= (*file)->store_lock(thd, to, lock_type);
|
||||
} while (*(++file));
|
||||
DBUG_RETURN(to);
|
||||
|
@ -978,8 +978,8 @@ String *Item_func_insert::val_str(String *str)
|
||||
if (length > res->length() - start)
|
||||
length= res->length() - start;
|
||||
|
||||
if (res->length() - length + res2->length() >
|
||||
current_thd->variables.max_allowed_packet)
|
||||
if ((ulonglong) (res->length() - length + res2->length()) >
|
||||
(ulonglong) current_thd->variables.max_allowed_packet)
|
||||
{
|
||||
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
||||
ER_WARN_ALLOWED_PACKET_OVERFLOWED,
|
||||
@ -2426,7 +2426,7 @@ String *Item_func_lpad::val_str(String *str)
|
||||
pad_char_length= pad->numchars();
|
||||
byte_count= count * collation.collation->mbmaxlen;
|
||||
|
||||
if (byte_count > current_thd->variables.max_allowed_packet)
|
||||
if ((ulonglong) byte_count > current_thd->variables.max_allowed_packet)
|
||||
{
|
||||
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
||||
ER_WARN_ALLOWED_PACKET_OVERFLOWED,
|
||||
|
@ -5376,7 +5376,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len,
|
||||
const byte* const ptr_rows_data= var_start + byte_count + 1;
|
||||
|
||||
my_size_t const data_size= event_len - (ptr_rows_data - (const byte *) buf);
|
||||
DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %u",
|
||||
DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %lu",
|
||||
m_table_id, m_flags, m_width, data_size));
|
||||
|
||||
m_rows_buf= (byte*)my_malloc(data_size, MYF(MY_WME));
|
||||
@ -5416,8 +5416,8 @@ int Rows_log_event::do_add_row_data(byte *const row_data,
|
||||
would save binlog space. TODO
|
||||
*/
|
||||
DBUG_ENTER("Rows_log_event::do_add_row_data");
|
||||
DBUG_PRINT("enter", ("row_data: 0x%lx length: %u", (ulong) row_data,
|
||||
length));
|
||||
DBUG_PRINT("enter", ("row_data: 0x%lx length: %lu", (ulong) row_data,
|
||||
(ulong) length));
|
||||
/*
|
||||
Don't print debug messages when running valgrind since they can
|
||||
trigger false warnings.
|
||||
@ -5597,7 +5597,8 @@ unpack_row(RELAY_LOG_INFO *rli,
|
||||
uint32 const mask= NOT_NULL_FLAG | NO_DEFAULT_VALUE_FLAG;
|
||||
Field *const f= *field_ptr;
|
||||
|
||||
DBUG_PRINT("info", ("processing column '%s' @ 0x%lx", f->field_name, f->ptr));
|
||||
DBUG_PRINT("info", ("processing column '%s' @ 0x%lx", f->field_name,
|
||||
(long) f->ptr));
|
||||
if (event_type == WRITE_ROWS_EVENT && (f->flags & mask) == mask)
|
||||
{
|
||||
slave_print_msg(ERROR_LEVEL, rli, ER_NO_DEFAULT_FOR_FIELD,
|
||||
@ -6121,7 +6122,7 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len,
|
||||
uchar *ptr_after_colcnt= (uchar*) ptr_colcnt;
|
||||
m_colcnt= net_field_length(&ptr_after_colcnt);
|
||||
|
||||
DBUG_PRINT("info",("m_dblen: %d off: %ld m_tbllen: %d off: %ld m_colcnt: %lu off: %ld",
|
||||
DBUG_PRINT("info",("m_dblen: %lu off: %ld m_tbllen: %lu off: %ld m_colcnt: %lu off: %ld",
|
||||
m_dblen, (long) (ptr_dblen-(const byte*)vpart),
|
||||
m_tbllen, (long) (ptr_tbllen-(const byte*)vpart),
|
||||
m_colcnt, (long) (ptr_colcnt-(const byte*)vpart)));
|
||||
@ -6527,10 +6528,10 @@ copy_extra_record_fields(TABLE *table,
|
||||
my_ptrdiff_t master_fields)
|
||||
{
|
||||
DBUG_PRINT("info", ("Copying to 0x%lx "
|
||||
"from field %ld at offset %u "
|
||||
"from field %lu at offset %lu "
|
||||
"to field %d at offset %lu",
|
||||
(long) table->record[0],
|
||||
master_fields, master_reclength,
|
||||
(ulong) master_fields, (ulong) master_reclength,
|
||||
table->s->fields, table->s->reclength));
|
||||
/*
|
||||
Copying the extra fields of the slave that does not exist on
|
||||
|
@ -1610,7 +1610,7 @@ static void network_init(void)
|
||||
if (strlen(mysqld_unix_port) > (sizeof(UNIXaddr.sun_path) - 1))
|
||||
{
|
||||
sql_print_error("The socket file path is too long (> %u): %s",
|
||||
sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port);
|
||||
(uint) sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port);
|
||||
unireg_abort(1);
|
||||
}
|
||||
if ((unix_sock= socket(AF_UNIX, SOCK_STREAM, 0)) < 0)
|
||||
|
@ -106,21 +106,4 @@ public:
|
||||
MEM_ROOT *mem_root,
|
||||
bool bad_format_errors);
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
Custom version of standard offsetof() macro which can be used to get
|
||||
offsets of members in class for non-POD types (according to the current
|
||||
version of C++ standard offsetof() macro can't be used in such cases and
|
||||
attempt to do so causes warnings to be emitted, OTOH in many cases it is
|
||||
still OK to assume that all instances of the class has the same offsets
|
||||
for the same members).
|
||||
|
||||
This is temporary solution which should be removed once File_parser class
|
||||
and related routines are refactored.
|
||||
*/
|
||||
|
||||
#define my_offsetof(TYPE, MEMBER) \
|
||||
((size_t)((char *)&(((TYPE *)0x10)->MEMBER) - (char*)0x10))
|
||||
|
||||
#endif /* _PARSE_FILE_H_ */
|
||||
|
@ -25,7 +25,7 @@ field_length_from_packed(enum_field_types const field_type,
|
||||
switch (field_type) {
|
||||
case MYSQL_TYPE_DECIMAL:
|
||||
case MYSQL_TYPE_NEWDECIMAL:
|
||||
length= ~0UL;
|
||||
length= ~(uint32) 0;
|
||||
break;
|
||||
case MYSQL_TYPE_YEAR:
|
||||
case MYSQL_TYPE_TINY:
|
||||
@ -71,7 +71,7 @@ field_length_from_packed(enum_field_types const field_type,
|
||||
break;
|
||||
break;
|
||||
case MYSQL_TYPE_BIT:
|
||||
length= ~0UL;
|
||||
length= ~(uint32) 0;
|
||||
break;
|
||||
default:
|
||||
/* This case should never be chosen */
|
||||
@ -85,7 +85,7 @@ field_length_from_packed(enum_field_types const field_type,
|
||||
case MYSQL_TYPE_SET:
|
||||
case MYSQL_TYPE_VAR_STRING:
|
||||
case MYSQL_TYPE_VARCHAR:
|
||||
length= ~0UL; // NYI
|
||||
length= ~(uint32) 0; // NYI
|
||||
break;
|
||||
|
||||
case MYSQL_TYPE_TINY_BLOB:
|
||||
@ -93,7 +93,7 @@ field_length_from_packed(enum_field_types const field_type,
|
||||
case MYSQL_TYPE_LONG_BLOB:
|
||||
case MYSQL_TYPE_BLOB:
|
||||
case MYSQL_TYPE_GEOMETRY:
|
||||
length= ~0UL; // NYI
|
||||
length= ~(uint32) 0; // NYI
|
||||
break;
|
||||
}
|
||||
|
||||
@ -131,7 +131,8 @@ table_def::compatible_with(RELAY_LOG_INFO *rli, TABLE *table)
|
||||
slave_print_msg(ERROR_LEVEL, rli, ER_BINLOG_ROW_WRONG_TABLE_DEF,
|
||||
"Table width mismatch - "
|
||||
"received %u columns, %s.%s has %u columns",
|
||||
size(), tsh->db.str, tsh->table_name.str, tsh->fields);
|
||||
(uint) size(), tsh->db.str, tsh->table_name.str,
|
||||
tsh->fields);
|
||||
}
|
||||
|
||||
for (uint col= 0 ; col < cols_to_check ; ++col)
|
||||
|
@ -114,8 +114,8 @@ void mysql_client_binlog_statement(THD* thd)
|
||||
order to be able to read exactly what is necessary.
|
||||
*/
|
||||
|
||||
DBUG_PRINT("info",("binlog base64 decoded_len=%d, bytes_decoded=%d",
|
||||
decoded_len, bytes_decoded));
|
||||
DBUG_PRINT("info",("binlog base64 decoded_len: %lu bytes_decoded: %d",
|
||||
(ulong) decoded_len, bytes_decoded));
|
||||
|
||||
/*
|
||||
Now we start to read events of the buffer, until there are no
|
||||
@ -161,7 +161,7 @@ void mysql_client_binlog_statement(THD* thd)
|
||||
(long) (bufptr+EVENT_TYPE_OFFSET)));
|
||||
DBUG_PRINT("info", ("bytes_decoded: %d bufptr: 0x%lx buf[EVENT_LEN_OFFSET]: %lu",
|
||||
bytes_decoded, (long) bufptr,
|
||||
uint4korr(bufptr+EVENT_LEN_OFFSET)));
|
||||
(ulong) uint4korr(bufptr+EVENT_LEN_OFFSET)));
|
||||
#endif
|
||||
ev->thd= thd;
|
||||
if (int err= ev->exec_event(thd->rli_fake))
|
||||
|
@ -1227,9 +1227,9 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu",
|
||||
if (engine_data != table->engine_data())
|
||||
{
|
||||
DBUG_PRINT("qcache",
|
||||
("Handler require invalidation queries of %s.%s %lld-%lld",
|
||||
table_list.db, table_list.alias,
|
||||
engine_data, table->engine_data()));
|
||||
("Handler require invalidation queries of %s.%s %lu-%lu",
|
||||
table_list.db, table_list.alias,
|
||||
(ulong) engine_data, (ulong) table->engine_data()));
|
||||
invalidate_table((byte *) table->db(), table->key_length());
|
||||
}
|
||||
else
|
||||
@ -1250,10 +1250,10 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu",
|
||||
#ifndef EMBEDDED_LIBRARY
|
||||
do
|
||||
{
|
||||
DBUG_PRINT("qcache", ("Results (len: %lu used: %lu headers: %u)",
|
||||
DBUG_PRINT("qcache", ("Results (len: %lu used: %lu headers: %lu)",
|
||||
result_block->length, result_block->used,
|
||||
result_block->headers_len()+
|
||||
ALIGN_SIZE(sizeof(Query_cache_result))));
|
||||
(ulong) (result_block->headers_len()+
|
||||
ALIGN_SIZE(sizeof(Query_cache_result)))));
|
||||
|
||||
Query_cache_result *result = result_block->result();
|
||||
if (net_real_write(&thd->net, result->data(),
|
||||
@ -2469,11 +2469,11 @@ Query_cache::insert_table(uint key_len, char *key,
|
||||
table_block->table()->engine_data() != engine_data)
|
||||
{
|
||||
DBUG_PRINT("qcache",
|
||||
("Handler require invalidation queries of %s.%s %lld-%lld",
|
||||
("Handler require invalidation queries of %s.%s %lu-%lu",
|
||||
table_block->table()->db(),
|
||||
table_block->table()->table(),
|
||||
engine_data,
|
||||
table_block->table()->engine_data()));
|
||||
(ulong) engine_data,
|
||||
(ulong) table_block->table()->engine_data()));
|
||||
/*
|
||||
as far as we delete all queries with this table, table block will be
|
||||
deleted, too
|
||||
@ -3759,7 +3759,7 @@ my_bool Query_cache::check_integrity(bool locked)
|
||||
{
|
||||
DBUG_PRINT("error",
|
||||
("block 0x%lx do not aligned by %d", (ulong) block,
|
||||
ALIGN_SIZE(1)));
|
||||
(int) ALIGN_SIZE(1)));
|
||||
result = 1;
|
||||
}
|
||||
// Check memory allocation
|
||||
|
@ -1743,8 +1743,8 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
|
||||
tz_leapcnt++;
|
||||
|
||||
DBUG_PRINT("info",
|
||||
("time_zone_leap_second table: tz_leapcnt=%u tt_time=%lld offset=%ld",
|
||||
tz_leapcnt, (longlong)tz_lsis[tz_leapcnt-1].ls_trans,
|
||||
("time_zone_leap_second table: tz_leapcnt:%u tt_time: %lu offset: %ld",
|
||||
tz_leapcnt, (ulong) tz_lsis[tz_leapcnt-1].ls_trans,
|
||||
tz_lsis[tz_leapcnt-1].ls_corr));
|
||||
|
||||
res= table->file->index_next(table->record[0]);
|
||||
@ -2057,8 +2057,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
|
||||
tz_info->timecnt++;
|
||||
|
||||
DBUG_PRINT("info",
|
||||
("time_zone_transition table: tz_id=%u tt_time=%lld tt_id=%u",
|
||||
tzid, (longlong)ttime, ttid));
|
||||
("time_zone_transition table: tz_id: %u tt_time:%lu tt_id: %u",
|
||||
tzid, (ulong) ttime, ttid));
|
||||
|
||||
res= table->file->index_next_same(table->record[0],
|
||||
(byte*)table->field[0]->ptr, 4);
|
||||
|
@ -329,10 +329,12 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
|
||||
|
||||
DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0]));
|
||||
DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1]));
|
||||
DBUG_PRINT("ha_archive::read_meta_file", ("Rows %llu", *rows));
|
||||
DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %llu", check_point));
|
||||
DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %llu", *auto_increment));
|
||||
DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %llu", *forced_flushes));
|
||||
DBUG_PRINT("ha_archive::read_meta_file", ("Rows %lu", (ulong) *rows));
|
||||
DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %lu", (ulong) check_point));
|
||||
DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %lu",
|
||||
(ulong) *auto_increment));
|
||||
DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %lu",
|
||||
(ulong) *forced_flushes));
|
||||
DBUG_PRINT("ha_archive::read_meta_file", ("Real Path %s", real_path));
|
||||
DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr)));
|
||||
|
||||
@ -385,12 +387,12 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows,
|
||||
(uint)ARCHIVE_CHECK_HEADER));
|
||||
DBUG_PRINT("ha_archive::write_meta_file", ("Version %d",
|
||||
(uint)ARCHIVE_VERSION));
|
||||
DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", (ulonglong)rows));
|
||||
DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", check_point));
|
||||
DBUG_PRINT("ha_archive::write_meta_file", ("Auto Increment %llu",
|
||||
auto_increment));
|
||||
DBUG_PRINT("ha_archive::write_meta_file", ("Forced Flushes %llu",
|
||||
forced_flushes));
|
||||
DBUG_PRINT("ha_archive::write_meta_file", ("Rows %lu", (ulong) rows));
|
||||
DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %lu", (ulong) check_point));
|
||||
DBUG_PRINT("ha_archive::write_meta_file", ("Auto Increment %lu",
|
||||
(ulong) auto_increment));
|
||||
DBUG_PRINT("ha_archive::write_meta_file", ("Forced Flushes %lu",
|
||||
(ulong) forced_flushes));
|
||||
DBUG_PRINT("ha_archive::write_meta_file", ("Real path %s",
|
||||
real_path));
|
||||
DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty));
|
||||
@ -1057,7 +1059,7 @@ int ha_archive::rnd_init(bool scan)
|
||||
if (scan)
|
||||
{
|
||||
scan_rows= share->rows_recorded;
|
||||
DBUG_PRINT("info", ("archive will retrieve %llu rows", scan_rows));
|
||||
DBUG_PRINT("info", ("archive will retrieve %lu rows", (ulong) scan_rows));
|
||||
stats.records= 0;
|
||||
|
||||
/*
|
||||
@ -1318,7 +1320,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
|
||||
share->rows_recorded++;
|
||||
}
|
||||
}
|
||||
DBUG_PRINT("info", ("recovered %llu archive rows", share->rows_recorded));
|
||||
DBUG_PRINT("info", ("recovered %lu archive rows", (ulong) share->rows_recorded));
|
||||
|
||||
my_free((char*)buf, MYF(0));
|
||||
if (rc && rc != HA_ERR_END_OF_FILE)
|
||||
|
@ -68,7 +68,7 @@ int heap_write(HP_INFO *info, const byte *record)
|
||||
DBUG_RETURN(0);
|
||||
|
||||
err:
|
||||
DBUG_PRINT("info",("Duplicate key: %d", keydef - share->keydef));
|
||||
DBUG_PRINT("info",("Duplicate key: %d", (int) (keydef - share->keydef)));
|
||||
info->errkey= keydef - share->keydef;
|
||||
if (keydef->algorithm == HA_KEY_ALG_BTREE)
|
||||
{
|
||||
|
@ -1733,7 +1733,7 @@ os_file_set_size(
|
||||
}
|
||||
|
||||
/* Print about progress for each 100 MB written */
|
||||
if ((current_size + n_bytes) / (ib_longlong)(100 * 1024 * 1024)
|
||||
if ((ib_longlong) (current_size + n_bytes) / (ib_longlong)(100 * 1024 * 1024)
|
||||
!= current_size / (ib_longlong)(100 * 1024 * 1024)) {
|
||||
|
||||
fprintf(stderr, " %lu00",
|
||||
|
@ -1105,18 +1105,18 @@ static int get_statistic(PACK_MRG_INFO *mrg,HUFF_COUNTS *huff_counts)
|
||||
my_off_t total_count;
|
||||
char llbuf[32];
|
||||
|
||||
DBUG_PRINT("info", ("column: %3u", count - huff_counts + 1));
|
||||
DBUG_PRINT("info", ("column: %3u", (uint) (count - huff_counts) + 1));
|
||||
if (verbose >= 2)
|
||||
VOID(printf("column: %3u\n", count - huff_counts + 1));
|
||||
VOID(printf("column: %3u\n", (uint) (count - huff_counts) + 1));
|
||||
if (count->tree_buff)
|
||||
{
|
||||
DBUG_PRINT("info", ("number of distinct values: %u",
|
||||
(count->tree_pos - count->tree_buff) /
|
||||
count->field_length));
|
||||
(uint) ((count->tree_pos - count->tree_buff) /
|
||||
count->field_length)));
|
||||
if (verbose >= 2)
|
||||
VOID(printf("number of distinct values: %u\n",
|
||||
(count->tree_pos - count->tree_buff) /
|
||||
count->field_length));
|
||||
(uint) ((count->tree_pos - count->tree_buff) /
|
||||
count->field_length)));
|
||||
}
|
||||
total_count= 0;
|
||||
for (idx= 0; idx < 256; idx++)
|
||||
@ -2279,8 +2279,8 @@ static my_off_t write_huff_tree(HUFF_TREE *huff_tree, uint trees)
|
||||
if (bits > 8 * sizeof(code))
|
||||
{
|
||||
VOID(fflush(stdout));
|
||||
VOID(fprintf(stderr, "error: Huffman code too long: %u/%u\n",
|
||||
bits, 8 * sizeof(code)));
|
||||
VOID(fprintf(stderr, "error: Huffman code too long: %u/%lu\n",
|
||||
bits, (ulong) (8 * sizeof(code))));
|
||||
errors++;
|
||||
break;
|
||||
}
|
||||
|
@ -87,8 +87,8 @@ int myrg_rkey(MYRG_INFO *info,byte *buf,int inx, const byte *key,
|
||||
|
||||
mi=(info->current_table=(MYRG_TABLE *)queue_top(&(info->by_key)))->table;
|
||||
mi->once_flags|= RRND_PRESERVE_LASTINX;
|
||||
DBUG_PRINT("info", ("using table no: %d",
|
||||
info->current_table - info->open_tables + 1));
|
||||
DBUG_PRINT("info", ("using table no: %u",
|
||||
(uint) (info->current_table - info->open_tables) + 1));
|
||||
DBUG_DUMP("result key", (byte*) mi->lastkey, mi->lastkey_length);
|
||||
DBUG_RETURN(_myrg_mi_read_record(mi,buf));
|
||||
}
|
||||
|
@ -47,17 +47,17 @@ inline int my_decimal_get_binary_size(uint precision, uint scale)
|
||||
#endif
|
||||
|
||||
#define DTIMAP(x, y, z) \
|
||||
{ DictTabInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 }
|
||||
{ DictTabInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 }
|
||||
|
||||
#define DTIMAP2(x, y, z, u, v) \
|
||||
{ DictTabInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 }
|
||||
{ DictTabInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 }
|
||||
|
||||
#define DTIMAPS(x, y, z, u, v) \
|
||||
{ DictTabInfo::y, offsetof(x, z), SimpleProperties::StringValue, u, v, 0 }
|
||||
{ DictTabInfo::y, my_offsetof(x, z), SimpleProperties::StringValue, u, v, 0 }
|
||||
|
||||
#define DTIMAPB(x, y, z, u, v, l) \
|
||||
{ DictTabInfo::y, offsetof(x, z), SimpleProperties::BinaryValue, u, v, \
|
||||
offsetof(x, l) }
|
||||
{ DictTabInfo::y, my_offsetof(x, z), SimpleProperties::BinaryValue, u, v, \
|
||||
my_offsetof(x, l) }
|
||||
|
||||
#define DTIBREAK(x) \
|
||||
{ DictTabInfo::x, 0, SimpleProperties::InvalidValue, 0, 0, 0 }
|
||||
@ -602,17 +602,17 @@ public:
|
||||
};
|
||||
|
||||
#define DFGIMAP(x, y, z) \
|
||||
{ DictFilegroupInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 }
|
||||
{ DictFilegroupInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 }
|
||||
|
||||
#define DFGIMAP2(x, y, z, u, v) \
|
||||
{ DictFilegroupInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 }
|
||||
{ DictFilegroupInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 }
|
||||
|
||||
#define DFGIMAPS(x, y, z, u, v) \
|
||||
{ DictFilegroupInfo::y, offsetof(x, z), SimpleProperties::StringValue, u, v, 0 }
|
||||
{ DictFilegroupInfo::y, my_offsetof(x, z), SimpleProperties::StringValue, u, v, 0 }
|
||||
|
||||
#define DFGIMAPB(x, y, z, u, v, l) \
|
||||
{ DictFilegroupInfo::y, offsetof(x, z), SimpleProperties::BinaryValue, u, v, \
|
||||
offsetof(x, l) }
|
||||
{ DictFilegroupInfo::y, my_offsetof(x, z), SimpleProperties::BinaryValue, u, v, \
|
||||
my_offsetof(x, l) }
|
||||
|
||||
#define DFGIBREAK(x) \
|
||||
{ DictFilegroupInfo::x, 0, SimpleProperties::InvalidValue, 0, 0, 0 }
|
||||
|
@ -68,7 +68,7 @@ print_head(const char * filename, const SchemaFile * sf)
|
||||
if (! checkonly) {
|
||||
ndbout << "----- Schemafile: " << filename << " -----" << endl;
|
||||
ndbout_c("Magic: %.*s ByteOrder: %.8x NdbVersion: %s FileSize: %d",
|
||||
sizeof(sf->Magic),
|
||||
(int) sizeof(sf->Magic),
|
||||
sf->Magic,
|
||||
sf->ByteOrder,
|
||||
version(sf->NdbVersion),
|
||||
|
@ -1066,6 +1066,7 @@ Dbtup::updateVarSizeNotNULL(Uint32* in_buffer,
|
||||
terrorCode= ZAI_INCONSISTENCY_ERROR;
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
@ -1485,6 +1486,7 @@ Dbtup::updateDiskVarSizeNotNULL(Uint32* in_buffer,
|
||||
terrorCode= ZAI_INCONSISTENCY_ERROR;
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -30,8 +30,8 @@ ConstRope::copy(char* buf) const {
|
||||
int
|
||||
ConstRope::compare(const char * str, size_t len) const {
|
||||
if(DEBUG_ROPE)
|
||||
ndbout_c("ConstRope[ %d 0x%x 0x%x ]::compare(%s, %d)",
|
||||
head.used, head.firstItem, head.lastItem, str, len);
|
||||
ndbout_c("ConstRope[ %d 0x%x 0x%x ]::compare(%s, %d)",
|
||||
head.used, head.firstItem, head.lastItem, str, (int) len);
|
||||
Uint32 left = head.used > len ? len : head.used;
|
||||
Ptr<Segment> curr;
|
||||
curr.i = head.firstItem;
|
||||
@ -60,7 +60,7 @@ ConstRope::compare(const char * str, size_t len) const {
|
||||
}
|
||||
}
|
||||
if(DEBUG_ROPE)
|
||||
ndbout_c("ConstRope::compare(%s, %d) -> %d", str, len, head.used > len);
|
||||
ndbout_c("ConstRope::compare(%s, %d) -> %d", str, (int) len, head.used > len);
|
||||
return head.used > len;
|
||||
}
|
||||
|
||||
@ -91,7 +91,7 @@ Rope::copy(char* buf) const {
|
||||
int
|
||||
Rope::compare(const char * str, size_t len) const {
|
||||
if(DEBUG_ROPE)
|
||||
ndbout_c("Rope::compare(%s, %d)", str, len);
|
||||
ndbout_c("Rope::compare(%s, %d)", str, (int) len);
|
||||
Uint32 left = head.used > len ? len : head.used;
|
||||
Ptr<Segment> curr;
|
||||
curr.i = head.firstItem;
|
||||
@ -100,7 +100,7 @@ Rope::compare(const char * str, size_t len) const {
|
||||
int res = memcmp(str, (const char*)curr.p->data, 4 * getSegmentSize());
|
||||
if(res != 0){
|
||||
if(DEBUG_ROPE)
|
||||
ndbout_c("Rope::compare(%s, %d, %s) -> %d", str, len,
|
||||
ndbout_c("Rope::compare(%s, %d, %s) -> %d", str, (int) len,
|
||||
(const char*)curr.p->data, res);
|
||||
return res;
|
||||
}
|
||||
@ -120,14 +120,14 @@ Rope::compare(const char * str, size_t len) const {
|
||||
}
|
||||
}
|
||||
if(DEBUG_ROPE)
|
||||
ndbout_c("Rope::compare(%s, %d) -> %d", str, len, head.used > len);
|
||||
ndbout_c("Rope::compare(%s, %d) -> %d", str, (int) len, head.used > len);
|
||||
return head.used > len;
|
||||
}
|
||||
|
||||
bool
|
||||
Rope::assign(const char * s, size_t len, Uint32 hash){
|
||||
if(DEBUG_ROPE)
|
||||
ndbout_c("Rope::assign(%s, %d, 0x%x)", s, len, hash);
|
||||
ndbout_c("Rope::assign(%s, %d, 0x%x)", s, (int) len, hash);
|
||||
m_hash = hash;
|
||||
head.used = (head.used + 3) / 4;
|
||||
release();
|
||||
|
@ -184,7 +184,7 @@ ndb_mgm_create_handle()
|
||||
h->mgmd_version_minor= -1;
|
||||
h->mgmd_version_build= -1;
|
||||
|
||||
DBUG_PRINT("info", ("handle=0x%x", (UintPtr)h));
|
||||
DBUG_PRINT("info", ("handle: 0x%lx", (ulong) h));
|
||||
DBUG_RETURN(h);
|
||||
}
|
||||
|
||||
@ -201,7 +201,7 @@ int
|
||||
ndb_mgm_set_connectstring(NdbMgmHandle handle, const char * mgmsrv)
|
||||
{
|
||||
DBUG_ENTER("ndb_mgm_set_connectstring");
|
||||
DBUG_PRINT("info", ("handle=0x%x", (UintPtr)handle));
|
||||
DBUG_PRINT("info", ("handle: 0x%lx", (ulong) handle));
|
||||
handle->cfg.~LocalConfig();
|
||||
new (&(handle->cfg)) LocalConfig;
|
||||
if (!handle->cfg.init(mgmsrv, 0) ||
|
||||
@ -243,7 +243,7 @@ ndb_mgm_destroy_handle(NdbMgmHandle * handle)
|
||||
DBUG_ENTER("ndb_mgm_destroy_handle");
|
||||
if(!handle)
|
||||
DBUG_VOID_RETURN;
|
||||
DBUG_PRINT("info", ("handle=0x%x", (UintPtr)(* handle)));
|
||||
DBUG_PRINT("info", ("handle: 0x%lx", (ulong) (* handle)));
|
||||
/**
|
||||
* important! only disconnect if connected
|
||||
* other code relies on this
|
||||
|
@ -768,7 +768,7 @@ Ndb::getAutoIncrementValue(const char* aTableName,
|
||||
TupleIdRange & range = info->m_tuple_id_range;
|
||||
if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1)
|
||||
DBUG_RETURN(-1);
|
||||
DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId));
|
||||
DBUG_PRINT("info", ("value %lu", (ulong) tupleId));
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
@ -791,7 +791,7 @@ Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable,
|
||||
TupleIdRange & range = info->m_tuple_id_range;
|
||||
if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1)
|
||||
DBUG_RETURN(-1);
|
||||
DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId));
|
||||
DBUG_PRINT("info", ("value %lu", (ulong)tupleId));
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
@ -806,7 +806,7 @@ Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable,
|
||||
|
||||
if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1)
|
||||
DBUG_RETURN(-1);
|
||||
DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId));
|
||||
DBUG_PRINT("info", ("value %lu", (ulong)tupleId));
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
@ -819,7 +819,7 @@ Ndb::getTupleIdFromNdb(const NdbTableImpl* table,
|
||||
{
|
||||
assert(range.m_first_tuple_id < range.m_last_tuple_id);
|
||||
tupleId = ++range.m_first_tuple_id;
|
||||
DBUG_PRINT("info", ("next cached value %llu", (ulonglong)tupleId));
|
||||
DBUG_PRINT("info", ("next cached value %lu", (ulong)tupleId));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -856,7 +856,7 @@ Ndb::readAutoIncrementValue(const char* aTableName,
|
||||
TupleIdRange & range = info->m_tuple_id_range;
|
||||
if (readTupleIdFromNdb(table, range, tupleId) == -1)
|
||||
DBUG_RETURN(-1);
|
||||
DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId));
|
||||
DBUG_PRINT("info", ("value %lu", (ulong)tupleId));
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
@ -879,7 +879,7 @@ Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable,
|
||||
TupleIdRange & range = info->m_tuple_id_range;
|
||||
if (readTupleIdFromNdb(table, range, tupleId) == -1)
|
||||
DBUG_RETURN(-1);
|
||||
DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId));
|
||||
DBUG_PRINT("info", ("value %lu", (ulong)tupleId));
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
@ -893,7 +893,7 @@ Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable,
|
||||
|
||||
if (readTupleIdFromNdb(table, range, tupleId) == -1)
|
||||
DBUG_RETURN(-1);
|
||||
DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId));
|
||||
DBUG_PRINT("info", ("value %lu", (ulong)tupleId));
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
@ -994,8 +994,8 @@ Ndb::setTupleIdInNdb(const NdbTableImpl* table,
|
||||
{
|
||||
range.m_first_tuple_id = tupleId - 1;
|
||||
DBUG_PRINT("info",
|
||||
("Setting next auto increment cached value to %llu",
|
||||
(ulonglong)tupleId));
|
||||
("Setting next auto increment cached value to %lu",
|
||||
(ulong)tupleId));
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
}
|
||||
@ -1049,7 +1049,8 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table,
|
||||
{
|
||||
DBUG_ENTER("Ndb::opTupleIdOnNdb");
|
||||
Uint32 aTableId = table->m_id;
|
||||
DBUG_PRINT("enter", ("table=%u value=%llu op=%u", aTableId, opValue, op));
|
||||
DBUG_PRINT("enter", ("table: %u value: %lu op: %u",
|
||||
aTableId, (ulong) opValue, op));
|
||||
|
||||
NdbTransaction* tConnection = NULL;
|
||||
NdbOperation* tOperation = NULL;
|
||||
@ -1117,8 +1118,8 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table,
|
||||
else
|
||||
{
|
||||
DBUG_PRINT("info",
|
||||
("Setting next auto increment value (db) to %llu",
|
||||
(ulonglong)opValue));
|
||||
("Setting next auto increment value (db) to %lu",
|
||||
(ulong) opValue));
|
||||
range.m_first_tuple_id = range.m_last_tuple_id = opValue - 1;
|
||||
}
|
||||
break;
|
||||
@ -1244,9 +1245,9 @@ int Ndb::setDatabaseAndSchemaName(const NdbDictionary::Table* t)
|
||||
if (s2 && s2 != s1 + 1) {
|
||||
char buf[NAME_LEN + 1];
|
||||
if (s1 - s0 <= NAME_LEN && s2 - (s1 + 1) <= NAME_LEN) {
|
||||
sprintf(buf, "%.*s", s1 - s0, s0);
|
||||
sprintf(buf, "%.*s", (int) (s1 - s0), s0);
|
||||
setDatabaseName(buf);
|
||||
sprintf(buf, "%.*s", s2 - (s1 + 1), s1 + 1);
|
||||
sprintf(buf, "%.*s", (int) (s2 - (s1 + 1)), s1 + 1);
|
||||
setDatabaseSchemaName(buf);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1508,9 +1508,9 @@ NdbEventBuffer::execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep)
|
||||
else
|
||||
{
|
||||
/** out of order something */
|
||||
ndbout_c("out of order bucket: %d gci: %lld m_latestGCI: %lld",
|
||||
bucket-(Gci_container*)m_active_gci.getBase(),
|
||||
gci, m_latestGCI);
|
||||
ndbout_c("out of order bucket: %d gci: %ld m_latestGCI: %ld",
|
||||
(int) (bucket-(Gci_container*)m_active_gci.getBase()),
|
||||
(long) gci, (long) m_latestGCI);
|
||||
bucket->m_state = Gci_container::GC_COMPLETE;
|
||||
bucket->m_gcp_complete_rep_count = 1; // Prevent from being reused
|
||||
m_latest_complete_GCI = gci;
|
||||
|
@ -669,9 +669,9 @@ NdbScanOperation::doSend(int ProcessorId)
|
||||
void NdbScanOperation::close(bool forceSend, bool releaseOp)
|
||||
{
|
||||
DBUG_ENTER("NdbScanOperation::close");
|
||||
DBUG_PRINT("enter", ("this=%x tcon=%x con=%x force=%d release=%d",
|
||||
(UintPtr)this,
|
||||
(UintPtr)m_transConnection, (UintPtr)theNdbCon,
|
||||
DBUG_PRINT("enter", ("this: 0x%lx tcon: 0x%lx con: 0x%lx force: %d release: %d",
|
||||
(long) this,
|
||||
(long) m_transConnection, (long) theNdbCon,
|
||||
forceSend, releaseOp));
|
||||
|
||||
if(m_transConnection){
|
||||
|
@ -1010,7 +1010,7 @@ void
|
||||
NdbTransaction::releaseExecutedScanOperation(NdbIndexScanOperation* cursorOp)
|
||||
{
|
||||
DBUG_ENTER("NdbTransaction::releaseExecutedScanOperation");
|
||||
DBUG_PRINT("enter", ("this=0x%x op=0x%x", (UintPtr)this, (UintPtr)cursorOp));
|
||||
DBUG_PRINT("enter", ("this: 0x%lx op: 0x%lx", (ulong) this, (ulong) cursorOp));
|
||||
|
||||
releaseScanOperation(&m_firstExecutedScanOp, 0, cursorOp);
|
||||
|
||||
|
@ -361,7 +361,7 @@ void
|
||||
Ndb::releaseScanOperation(NdbIndexScanOperation* aScanOperation)
|
||||
{
|
||||
DBUG_ENTER("Ndb::releaseScanOperation");
|
||||
DBUG_PRINT("enter", ("op=%x", (UintPtr)aScanOperation));
|
||||
DBUG_PRINT("enter", ("op: 0x%lx", (ulong) aScanOperation));
|
||||
#ifdef ndb_release_check_dup
|
||||
{ NdbIndexScanOperation* tOp = theScanOpIdleList;
|
||||
while (tOp != NULL) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user