Removed compiler warnings

Ensure that my_size_t is always unsigned (to get predictiable results from system to system)
Removed some %lld, as these are not portable
This commit is contained in:
monty@mysql.com/nosik.monty.fi 2006-11-27 18:16:08 +02:00
parent 313f23a93a
commit 0e149b7170
35 changed files with 168 additions and 152 deletions

View File

@ -5,7 +5,7 @@ configure="./configure $base_configs $extra_configs"
commands="\ commands="\
$make -k distclean || true $make -k distclean || true
/bin/rm -rf */.deps/*.P config.cache storage/innobase/config.cache autom4te.cache innobase/autom4te.cache; /bin/rm -rf */.deps/*.P configure config.cache storage/*/configure storage/*/config.cache autom4te.cache storage/*/autom4te.cache;
path=`dirname $0` path=`dirname $0`
. \"$path/autorun.sh\"" . \"$path/autorun.sh\""

View File

@ -1560,8 +1560,8 @@ static uint dump_routines_for_db(char *db)
if the user has EXECUTE privilege he see routine names, but NOT the if the user has EXECUTE privilege he see routine names, but NOT the
routine body of other routines that are not the creator of! routine body of other routines that are not the creator of!
*/ */
DBUG_PRINT("info",("length of body for %s row[2] '%s' is %d", DBUG_PRINT("info",("length of body for %s row[2] '%s' is %ld",
routine_name, row[2], strlen(row[2]))); routine_name, row[2], (long) strlen(row[2])));
if (strlen(row[2])) if (strlen(row[2]))
{ {
char *query_str= NULL; char *query_str= NULL;

View File

@ -592,7 +592,7 @@ get_random_string(char *buf)
DBUG_ENTER("get_random_string"); DBUG_ENTER("get_random_string");
for (x= RAND_STRING_SIZE; x > 0; x--) for (x= RAND_STRING_SIZE; x > 0; x--)
*buf_ptr++= ALPHANUMERICS[random() % ALPHANUMERICS_SIZE]; *buf_ptr++= ALPHANUMERICS[random() % ALPHANUMERICS_SIZE];
DBUG_PRINT("info", ("random string: '%*s'", buf_ptr - buf, buf)); DBUG_PRINT("info", ("random string: '%*s'", (int) (buf_ptr - buf), buf));
DBUG_RETURN(buf_ptr - buf); DBUG_RETURN(buf_ptr - buf);
} }

View File

@ -893,8 +893,8 @@ int dyn_string_cmp(DYNAMIC_STRING* ds, const char *fname)
die(NullS); die(NullS);
if (!eval_result && (uint) stat_info.st_size != ds->length) if (!eval_result && (uint) stat_info.st_size != ds->length)
{ {
DBUG_PRINT("info",("Size differs: result size: %u file size: %llu", DBUG_PRINT("info",("Size differs: result size: %u file size: %lu",
ds->length, stat_info.st_size)); ds->length, (ulong) stat_info.st_size));
DBUG_PRINT("info",("result: '%s'", ds->str)); DBUG_PRINT("info",("result: '%s'", ds->str));
DBUG_RETURN(RESULT_LENGTH_MISMATCH); DBUG_RETURN(RESULT_LENGTH_MISMATCH);
} }
@ -3077,14 +3077,14 @@ void do_connect(struct st_command *command)
else if (!strncmp(con_options, "COMPRESS", 8)) else if (!strncmp(con_options, "COMPRESS", 8))
con_compress= 1; con_compress= 1;
else else
die("Illegal option to connect: %.*s", end - con_options, con_options); die("Illegal option to connect: %.*s", (int) (end - con_options), con_options);
/* Process next option */ /* Process next option */
con_options= end; con_options= end;
} }
if (next_con == connections_end) if (next_con == connections_end)
die("Connection limit exhausted, you can have max %d connections", die("Connection limit exhausted, you can have max %ld connections",
(sizeof(connections)/sizeof(struct st_connection))); (long) (sizeof(connections)/sizeof(struct st_connection)));
if (find_connection_by_name(ds_connection_name.str)) if (find_connection_by_name(ds_connection_name.str))
die("Connection %s already exists", ds_connection_name.str); die("Connection %s already exists", ds_connection_name.str);

View File

@ -735,7 +735,8 @@ _rl_read_file (filename, sizep)
file_size = (size_t)finfo.st_size; file_size = (size_t)finfo.st_size;
/* check for overflow on very large files */ /* check for overflow on very large files */
if (file_size != finfo.st_size || file_size + 1 < file_size) if ((long long) file_size != (long long) finfo.st_size ||
file_size + 1 < file_size)
{ {
if (file >= 0) if (file >= 0)
close (file); close (file);

View File

@ -184,7 +184,8 @@ read_history_range (filename, from, to)
file_size = (size_t)finfo.st_size; file_size = (size_t)finfo.st_size;
/* check for overflow on very large files */ /* check for overflow on very large files */
if (file_size != finfo.st_size || file_size + 1 < file_size) if ((long long) file_size != (long long) finfo.st_size ||
file_size + 1 < file_size)
{ {
errno = overflow_errno; errno = overflow_errno;
goto error_and_exit; goto error_and_exit;
@ -333,7 +334,8 @@ history_truncate_file (fname, lines)
file_size = (size_t)finfo.st_size; file_size = (size_t)finfo.st_size;
/* check for overflow on very large files */ /* check for overflow on very large files */
if (file_size != finfo.st_size || file_size + 1 < file_size) if ((long long) file_size != (long long) finfo.st_size ||
file_size + 1 < file_size)
{ {
close (file); close (file);
#if defined (EFBIG) #if defined (EFBIG)

View File

@ -869,9 +869,8 @@ typedef long my_ptrdiff_t;
typedef long long my_ptrdiff_t; typedef long long my_ptrdiff_t;
#endif #endif
#if HAVE_SIZE_T /* We can't set my_size_t to size_t as we want my_size_t to be unsigned */
typedef size_t my_size_t; #if SIZEOF_CHARP <= SIZEOF_LONG
#elif SIZEOF_CHARP <= SIZEOF_LONG
typedef unsigned long my_size_t; typedef unsigned long my_size_t;
#else #else
typedef unsigned long long my_size_t; typedef unsigned long long my_size_t;
@ -886,6 +885,22 @@ typedef unsigned long long my_size_t;
#define ADD_TO_PTR(ptr,size,type) (type) ((byte*) (ptr)+size) #define ADD_TO_PTR(ptr,size,type) (type) ((byte*) (ptr)+size)
#define PTR_BYTE_DIFF(A,B) (my_ptrdiff_t) ((byte*) (A) - (byte*) (B)) #define PTR_BYTE_DIFF(A,B) (my_ptrdiff_t) ((byte*) (A) - (byte*) (B))
/*
Custom version of standard offsetof() macro which can be used to get
offsets of members in class for non-POD types (according to the current
version of C++ standard offsetof() macro can't be used in such cases and
attempt to do so causes warnings to be emitted, OTOH in many cases it is
still OK to assume that all instances of the class has the same offsets
for the same members).
This is temporary solution which should be removed once File_parser class
and related routines are refactored.
*/
#define my_offsetof(TYPE, MEMBER) \
((size_t)((char *)&(((TYPE *)0x10)->MEMBER) - (char*)0x10))
#define NullS (char *) 0 #define NullS (char *) 0
/* Nowdays we do not support MessyDos */ /* Nowdays we do not support MessyDos */
#ifndef NEAR #ifndef NEAR

View File

@ -1218,7 +1218,8 @@ Event_queue_element::compute_next_execution_time()
my_tz_UTC->gmt_sec_to_TIME(&time_now, current_thd->query_start()); my_tz_UTC->gmt_sec_to_TIME(&time_now, current_thd->query_start());
DBUG_PRINT("info",("NOW=[%llu]", TIME_to_ulonglong_datetime(&time_now))); DBUG_PRINT("info",("NOW: [%lu]",
(ulong) TIME_to_ulonglong_datetime(&time_now)));
/* if time_now is after ends don't execute anymore */ /* if time_now is after ends don't execute anymore */
if (!ends_null && (tmp= my_time_compare(&ends, &time_now)) == -1) if (!ends_null && (tmp= my_time_compare(&ends, &time_now)) == -1)
@ -1300,7 +1301,8 @@ Event_queue_element::compute_next_execution_time()
} }
else else
{ {
DBUG_PRINT("info",("Next[%llu]",TIME_to_ulonglong_datetime(&next_exec))); DBUG_PRINT("info",("Next[%lu]",
(ulong) TIME_to_ulonglong_datetime(&next_exec)));
execute_at= next_exec; execute_at= next_exec;
execute_at_null= FALSE; execute_at_null= FALSE;
} }
@ -1322,7 +1324,8 @@ Event_queue_element::compute_next_execution_time()
expression, interval)) expression, interval))
goto err; goto err;
execute_at= next_exec; execute_at= next_exec;
DBUG_PRINT("info",("Next[%llu]",TIME_to_ulonglong_datetime(&next_exec))); DBUG_PRINT("info",("Next[%lu]",
(ulong) TIME_to_ulonglong_datetime(&next_exec)));
} }
else else
{ {
@ -1356,7 +1359,8 @@ Event_queue_element::compute_next_execution_time()
expression, interval)) expression, interval))
goto err; goto err;
execute_at= next_exec; execute_at= next_exec;
DBUG_PRINT("info",("Next[%llu]",TIME_to_ulonglong_datetime(&next_exec))); DBUG_PRINT("info",("Next[%lu]",
(ulong) TIME_to_ulonglong_datetime(&next_exec)));
} }
execute_at_null= FALSE; execute_at_null= FALSE;
} }
@ -1393,8 +1397,8 @@ Event_queue_element::compute_next_execution_time()
} }
else else
{ {
DBUG_PRINT("info", ("Next[%llu]", DBUG_PRINT("info", ("Next[%lu]",
TIME_to_ulonglong_datetime(&next_exec))); (ulong) TIME_to_ulonglong_datetime(&next_exec)));
execute_at= next_exec; execute_at= next_exec;
execute_at_null= FALSE; execute_at_null= FALSE;
} }

View File

@ -776,7 +776,7 @@ Event_scheduler::dump_internal_status()
mutex_last_unlocked_at_line); mutex_last_unlocked_at_line);
printf("WOC : %s\n", waiting_on_cond? "YES":"NO"); printf("WOC : %s\n", waiting_on_cond? "YES":"NO");
printf("Workers : %u\n", workers_count()); printf("Workers : %u\n", workers_count());
printf("Executed : %llu\n", started_events); printf("Executed : %lu\n", (ulong) started_events);
printf("Data locked: %s\n", mutex_scheduler_data_locked ? "YES":"NO"); printf("Data locked: %s\n", mutex_scheduler_data_locked ? "YES":"NO");
DBUG_VOID_RETURN; DBUG_VOID_RETURN;

View File

@ -239,7 +239,7 @@ public:
*/ */
my_size_t last_null_byte() const { my_size_t last_null_byte() const {
my_size_t bytes= do_last_null_byte(); my_size_t bytes= do_last_null_byte();
DBUG_PRINT("debug", ("last_null_byte() ==> %d", bytes)); DBUG_PRINT("debug", ("last_null_byte() ==> %ld", (long) bytes));
DBUG_ASSERT(bytes <= table->s->null_bytes); DBUG_ASSERT(bytes <= table->s->null_bytes);
return bytes; return bytes;
} }

View File

@ -3563,9 +3563,11 @@ restart:
if (do_ndbcluster_binlog_close_connection) if (do_ndbcluster_binlog_close_connection)
{ {
DBUG_PRINT("info", ("do_ndbcluster_binlog_close_connection: %d, " DBUG_PRINT("info", ("do_ndbcluster_binlog_close_connection: %d, "
"ndb_latest_handled_binlog_epoch: %llu, " "ndb_latest_handled_binlog_epoch: %lu, "
"*p_latest_trans_gci: %llu", do_ndbcluster_binlog_close_connection, "*p_latest_trans_gci: %lu",
ndb_latest_handled_binlog_epoch, *p_latest_trans_gci)); do_ndbcluster_binlog_close_connection,
(ulong) ndb_latest_handled_binlog_epoch,
(ulong) *p_latest_trans_gci));
} }
#endif #endif
#ifdef RUN_NDB_BINLOG_TIMER #ifdef RUN_NDB_BINLOG_TIMER
@ -3653,9 +3655,10 @@ restart:
do_ndbcluster_binlog_close_connection= BCCC_restart; do_ndbcluster_binlog_close_connection= BCCC_restart;
if (ndb_latest_received_binlog_epoch < *p_latest_trans_gci && ndb_binlog_running) if (ndb_latest_received_binlog_epoch < *p_latest_trans_gci && ndb_binlog_running)
{ {
sql_print_error("NDB Binlog: latest transaction in epoch %lld not in binlog " sql_print_error("NDB Binlog: latest transaction in epoch %lu not in binlog "
"as latest received epoch is %lld", "as latest received epoch is %lu",
*p_latest_trans_gci, ndb_latest_received_binlog_epoch); (ulong) *p_latest_trans_gci,
(ulong) ndb_latest_received_binlog_epoch);
} }
} }
} }
@ -3841,9 +3844,10 @@ restart:
do_ndbcluster_binlog_close_connection= BCCC_restart; do_ndbcluster_binlog_close_connection= BCCC_restart;
if (ndb_latest_received_binlog_epoch < *p_latest_trans_gci && ndb_binlog_running) if (ndb_latest_received_binlog_epoch < *p_latest_trans_gci && ndb_binlog_running)
{ {
sql_print_error("NDB Binlog: latest transaction in epoch %lld not in binlog " sql_print_error("NDB Binlog: latest transaction in epoch %lu not in binlog "
"as latest received epoch is %lld", "as latest received epoch is %lu",
*p_latest_trans_gci, ndb_latest_received_binlog_epoch); (ulong) *p_latest_trans_gci,
(ulong) ndb_latest_received_binlog_epoch);
} }
} }
} }
@ -3875,7 +3879,7 @@ restart:
row.master_log_file= start.file_name(); row.master_log_file= start.file_name();
row.master_log_pos= start.file_pos(); row.master_log_pos= start.file_pos();
DBUG_PRINT("info", ("COMMIT gci: %lld", gci)); DBUG_PRINT("info", ("COMMIT gci: %lu", (ulong) gci));
if (ndb_update_binlog_index) if (ndb_update_binlog_index)
ndb_add_binlog_index(thd, &row); ndb_add_binlog_index(thd, &row);
ndb_latest_applied_binlog_epoch= gci; ndb_latest_applied_binlog_epoch= gci;

View File

@ -2427,7 +2427,7 @@ repeat:
do do
{ {
DBUG_PRINT("info", ("external_lock(thd, %d) iteration %d", DBUG_PRINT("info", ("external_lock(thd, %d) iteration %d",
lock_type, (file - m_file))); lock_type, (int) (file - m_file)));
if ((error= (*file)->external_lock(thd, lock_type))) if ((error= (*file)->external_lock(thd, lock_type)))
{ {
if (F_UNLCK != lock_type) if (F_UNLCK != lock_type)
@ -2508,7 +2508,7 @@ THR_LOCK_DATA **ha_partition::store_lock(THD *thd,
file= m_file; file= m_file;
do do
{ {
DBUG_PRINT("info", ("store lock %d iteration", (file - m_file))); DBUG_PRINT("info", ("store lock %d iteration", (int) (file - m_file)));
to= (*file)->store_lock(thd, to, lock_type); to= (*file)->store_lock(thd, to, lock_type);
} while (*(++file)); } while (*(++file));
DBUG_RETURN(to); DBUG_RETURN(to);

View File

@ -978,8 +978,8 @@ String *Item_func_insert::val_str(String *str)
if (length > res->length() - start) if (length > res->length() - start)
length= res->length() - start; length= res->length() - start;
if (res->length() - length + res2->length() > if ((ulonglong) (res->length() - length + res2->length()) >
current_thd->variables.max_allowed_packet) (ulonglong) current_thd->variables.max_allowed_packet)
{ {
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_WARN_ALLOWED_PACKET_OVERFLOWED, ER_WARN_ALLOWED_PACKET_OVERFLOWED,
@ -2426,7 +2426,7 @@ String *Item_func_lpad::val_str(String *str)
pad_char_length= pad->numchars(); pad_char_length= pad->numchars();
byte_count= count * collation.collation->mbmaxlen; byte_count= count * collation.collation->mbmaxlen;
if (byte_count > current_thd->variables.max_allowed_packet) if ((ulonglong) byte_count > current_thd->variables.max_allowed_packet)
{ {
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_WARN_ALLOWED_PACKET_OVERFLOWED, ER_WARN_ALLOWED_PACKET_OVERFLOWED,

View File

@ -5376,7 +5376,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len,
const byte* const ptr_rows_data= var_start + byte_count + 1; const byte* const ptr_rows_data= var_start + byte_count + 1;
my_size_t const data_size= event_len - (ptr_rows_data - (const byte *) buf); my_size_t const data_size= event_len - (ptr_rows_data - (const byte *) buf);
DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %u", DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %lu",
m_table_id, m_flags, m_width, data_size)); m_table_id, m_flags, m_width, data_size));
m_rows_buf= (byte*)my_malloc(data_size, MYF(MY_WME)); m_rows_buf= (byte*)my_malloc(data_size, MYF(MY_WME));
@ -5416,8 +5416,8 @@ int Rows_log_event::do_add_row_data(byte *const row_data,
would save binlog space. TODO would save binlog space. TODO
*/ */
DBUG_ENTER("Rows_log_event::do_add_row_data"); DBUG_ENTER("Rows_log_event::do_add_row_data");
DBUG_PRINT("enter", ("row_data: 0x%lx length: %u", (ulong) row_data, DBUG_PRINT("enter", ("row_data: 0x%lx length: %lu", (ulong) row_data,
length)); (ulong) length));
/* /*
Don't print debug messages when running valgrind since they can Don't print debug messages when running valgrind since they can
trigger false warnings. trigger false warnings.
@ -5597,7 +5597,8 @@ unpack_row(RELAY_LOG_INFO *rli,
uint32 const mask= NOT_NULL_FLAG | NO_DEFAULT_VALUE_FLAG; uint32 const mask= NOT_NULL_FLAG | NO_DEFAULT_VALUE_FLAG;
Field *const f= *field_ptr; Field *const f= *field_ptr;
DBUG_PRINT("info", ("processing column '%s' @ 0x%lx", f->field_name, f->ptr)); DBUG_PRINT("info", ("processing column '%s' @ 0x%lx", f->field_name,
(long) f->ptr));
if (event_type == WRITE_ROWS_EVENT && (f->flags & mask) == mask) if (event_type == WRITE_ROWS_EVENT && (f->flags & mask) == mask)
{ {
slave_print_msg(ERROR_LEVEL, rli, ER_NO_DEFAULT_FOR_FIELD, slave_print_msg(ERROR_LEVEL, rli, ER_NO_DEFAULT_FOR_FIELD,
@ -6121,7 +6122,7 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len,
uchar *ptr_after_colcnt= (uchar*) ptr_colcnt; uchar *ptr_after_colcnt= (uchar*) ptr_colcnt;
m_colcnt= net_field_length(&ptr_after_colcnt); m_colcnt= net_field_length(&ptr_after_colcnt);
DBUG_PRINT("info",("m_dblen: %d off: %ld m_tbllen: %d off: %ld m_colcnt: %lu off: %ld", DBUG_PRINT("info",("m_dblen: %lu off: %ld m_tbllen: %lu off: %ld m_colcnt: %lu off: %ld",
m_dblen, (long) (ptr_dblen-(const byte*)vpart), m_dblen, (long) (ptr_dblen-(const byte*)vpart),
m_tbllen, (long) (ptr_tbllen-(const byte*)vpart), m_tbllen, (long) (ptr_tbllen-(const byte*)vpart),
m_colcnt, (long) (ptr_colcnt-(const byte*)vpart))); m_colcnt, (long) (ptr_colcnt-(const byte*)vpart)));
@ -6527,10 +6528,10 @@ copy_extra_record_fields(TABLE *table,
my_ptrdiff_t master_fields) my_ptrdiff_t master_fields)
{ {
DBUG_PRINT("info", ("Copying to 0x%lx " DBUG_PRINT("info", ("Copying to 0x%lx "
"from field %ld at offset %u " "from field %lu at offset %lu "
"to field %d at offset %lu", "to field %d at offset %lu",
(long) table->record[0], (long) table->record[0],
master_fields, master_reclength, (ulong) master_fields, (ulong) master_reclength,
table->s->fields, table->s->reclength)); table->s->fields, table->s->reclength));
/* /*
Copying the extra fields of the slave that does not exist on Copying the extra fields of the slave that does not exist on

View File

@ -1610,7 +1610,7 @@ static void network_init(void)
if (strlen(mysqld_unix_port) > (sizeof(UNIXaddr.sun_path) - 1)) if (strlen(mysqld_unix_port) > (sizeof(UNIXaddr.sun_path) - 1))
{ {
sql_print_error("The socket file path is too long (> %u): %s", sql_print_error("The socket file path is too long (> %u): %s",
sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port); (uint) sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port);
unireg_abort(1); unireg_abort(1);
} }
if ((unix_sock= socket(AF_UNIX, SOCK_STREAM, 0)) < 0) if ((unix_sock= socket(AF_UNIX, SOCK_STREAM, 0)) < 0)

View File

@ -106,21 +106,4 @@ public:
MEM_ROOT *mem_root, MEM_ROOT *mem_root,
bool bad_format_errors); bool bad_format_errors);
}; };
/*
Custom version of standard offsetof() macro which can be used to get
offsets of members in class for non-POD types (according to the current
version of C++ standard offsetof() macro can't be used in such cases and
attempt to do so causes warnings to be emitted, OTOH in many cases it is
still OK to assume that all instances of the class has the same offsets
for the same members).
This is temporary solution which should be removed once File_parser class
and related routines are refactored.
*/
#define my_offsetof(TYPE, MEMBER) \
((size_t)((char *)&(((TYPE *)0x10)->MEMBER) - (char*)0x10))
#endif /* _PARSE_FILE_H_ */ #endif /* _PARSE_FILE_H_ */

View File

@ -25,7 +25,7 @@ field_length_from_packed(enum_field_types const field_type,
switch (field_type) { switch (field_type) {
case MYSQL_TYPE_DECIMAL: case MYSQL_TYPE_DECIMAL:
case MYSQL_TYPE_NEWDECIMAL: case MYSQL_TYPE_NEWDECIMAL:
length= ~0UL; length= ~(uint32) 0;
break; break;
case MYSQL_TYPE_YEAR: case MYSQL_TYPE_YEAR:
case MYSQL_TYPE_TINY: case MYSQL_TYPE_TINY:
@ -71,7 +71,7 @@ field_length_from_packed(enum_field_types const field_type,
break; break;
break; break;
case MYSQL_TYPE_BIT: case MYSQL_TYPE_BIT:
length= ~0UL; length= ~(uint32) 0;
break; break;
default: default:
/* This case should never be chosen */ /* This case should never be chosen */
@ -85,7 +85,7 @@ field_length_from_packed(enum_field_types const field_type,
case MYSQL_TYPE_SET: case MYSQL_TYPE_SET:
case MYSQL_TYPE_VAR_STRING: case MYSQL_TYPE_VAR_STRING:
case MYSQL_TYPE_VARCHAR: case MYSQL_TYPE_VARCHAR:
length= ~0UL; // NYI length= ~(uint32) 0; // NYI
break; break;
case MYSQL_TYPE_TINY_BLOB: case MYSQL_TYPE_TINY_BLOB:
@ -93,7 +93,7 @@ field_length_from_packed(enum_field_types const field_type,
case MYSQL_TYPE_LONG_BLOB: case MYSQL_TYPE_LONG_BLOB:
case MYSQL_TYPE_BLOB: case MYSQL_TYPE_BLOB:
case MYSQL_TYPE_GEOMETRY: case MYSQL_TYPE_GEOMETRY:
length= ~0UL; // NYI length= ~(uint32) 0; // NYI
break; break;
} }
@ -131,7 +131,8 @@ table_def::compatible_with(RELAY_LOG_INFO *rli, TABLE *table)
slave_print_msg(ERROR_LEVEL, rli, ER_BINLOG_ROW_WRONG_TABLE_DEF, slave_print_msg(ERROR_LEVEL, rli, ER_BINLOG_ROW_WRONG_TABLE_DEF,
"Table width mismatch - " "Table width mismatch - "
"received %u columns, %s.%s has %u columns", "received %u columns, %s.%s has %u columns",
size(), tsh->db.str, tsh->table_name.str, tsh->fields); (uint) size(), tsh->db.str, tsh->table_name.str,
tsh->fields);
} }
for (uint col= 0 ; col < cols_to_check ; ++col) for (uint col= 0 ; col < cols_to_check ; ++col)

View File

@ -114,8 +114,8 @@ void mysql_client_binlog_statement(THD* thd)
order to be able to read exactly what is necessary. order to be able to read exactly what is necessary.
*/ */
DBUG_PRINT("info",("binlog base64 decoded_len=%d, bytes_decoded=%d", DBUG_PRINT("info",("binlog base64 decoded_len: %lu bytes_decoded: %d",
decoded_len, bytes_decoded)); (ulong) decoded_len, bytes_decoded));
/* /*
Now we start to read events of the buffer, until there are no Now we start to read events of the buffer, until there are no
@ -161,7 +161,7 @@ void mysql_client_binlog_statement(THD* thd)
(long) (bufptr+EVENT_TYPE_OFFSET))); (long) (bufptr+EVENT_TYPE_OFFSET)));
DBUG_PRINT("info", ("bytes_decoded: %d bufptr: 0x%lx buf[EVENT_LEN_OFFSET]: %lu", DBUG_PRINT("info", ("bytes_decoded: %d bufptr: 0x%lx buf[EVENT_LEN_OFFSET]: %lu",
bytes_decoded, (long) bufptr, bytes_decoded, (long) bufptr,
uint4korr(bufptr+EVENT_LEN_OFFSET))); (ulong) uint4korr(bufptr+EVENT_LEN_OFFSET)));
#endif #endif
ev->thd= thd; ev->thd= thd;
if (int err= ev->exec_event(thd->rli_fake)) if (int err= ev->exec_event(thd->rli_fake))

View File

@ -1227,9 +1227,9 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu",
if (engine_data != table->engine_data()) if (engine_data != table->engine_data())
{ {
DBUG_PRINT("qcache", DBUG_PRINT("qcache",
("Handler require invalidation queries of %s.%s %lld-%lld", ("Handler require invalidation queries of %s.%s %lu-%lu",
table_list.db, table_list.alias, table_list.db, table_list.alias,
engine_data, table->engine_data())); (ulong) engine_data, (ulong) table->engine_data()));
invalidate_table((byte *) table->db(), table->key_length()); invalidate_table((byte *) table->db(), table->key_length());
} }
else else
@ -1250,10 +1250,10 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu",
#ifndef EMBEDDED_LIBRARY #ifndef EMBEDDED_LIBRARY
do do
{ {
DBUG_PRINT("qcache", ("Results (len: %lu used: %lu headers: %u)", DBUG_PRINT("qcache", ("Results (len: %lu used: %lu headers: %lu)",
result_block->length, result_block->used, result_block->length, result_block->used,
result_block->headers_len()+ (ulong) (result_block->headers_len()+
ALIGN_SIZE(sizeof(Query_cache_result)))); ALIGN_SIZE(sizeof(Query_cache_result)))));
Query_cache_result *result = result_block->result(); Query_cache_result *result = result_block->result();
if (net_real_write(&thd->net, result->data(), if (net_real_write(&thd->net, result->data(),
@ -2469,11 +2469,11 @@ Query_cache::insert_table(uint key_len, char *key,
table_block->table()->engine_data() != engine_data) table_block->table()->engine_data() != engine_data)
{ {
DBUG_PRINT("qcache", DBUG_PRINT("qcache",
("Handler require invalidation queries of %s.%s %lld-%lld", ("Handler require invalidation queries of %s.%s %lu-%lu",
table_block->table()->db(), table_block->table()->db(),
table_block->table()->table(), table_block->table()->table(),
engine_data, (ulong) engine_data,
table_block->table()->engine_data())); (ulong) table_block->table()->engine_data()));
/* /*
as far as we delete all queries with this table, table block will be as far as we delete all queries with this table, table block will be
deleted, too deleted, too
@ -3759,7 +3759,7 @@ my_bool Query_cache::check_integrity(bool locked)
{ {
DBUG_PRINT("error", DBUG_PRINT("error",
("block 0x%lx do not aligned by %d", (ulong) block, ("block 0x%lx do not aligned by %d", (ulong) block,
ALIGN_SIZE(1))); (int) ALIGN_SIZE(1)));
result = 1; result = 1;
} }
// Check memory allocation // Check memory allocation

View File

@ -1743,8 +1743,8 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
tz_leapcnt++; tz_leapcnt++;
DBUG_PRINT("info", DBUG_PRINT("info",
("time_zone_leap_second table: tz_leapcnt=%u tt_time=%lld offset=%ld", ("time_zone_leap_second table: tz_leapcnt:%u tt_time: %lu offset: %ld",
tz_leapcnt, (longlong)tz_lsis[tz_leapcnt-1].ls_trans, tz_leapcnt, (ulong) tz_lsis[tz_leapcnt-1].ls_trans,
tz_lsis[tz_leapcnt-1].ls_corr)); tz_lsis[tz_leapcnt-1].ls_corr));
res= table->file->index_next(table->record[0]); res= table->file->index_next(table->record[0]);
@ -2057,8 +2057,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
tz_info->timecnt++; tz_info->timecnt++;
DBUG_PRINT("info", DBUG_PRINT("info",
("time_zone_transition table: tz_id=%u tt_time=%lld tt_id=%u", ("time_zone_transition table: tz_id: %u tt_time:%lu tt_id: %u",
tzid, (longlong)ttime, ttid)); tzid, (ulong) ttime, ttid));
res= table->file->index_next_same(table->record[0], res= table->file->index_next_same(table->record[0],
(byte*)table->field[0]->ptr, 4); (byte*)table->field[0]->ptr, 4);

View File

@ -329,10 +329,12 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0])); DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0]));
DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1])); DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1]));
DBUG_PRINT("ha_archive::read_meta_file", ("Rows %llu", *rows)); DBUG_PRINT("ha_archive::read_meta_file", ("Rows %lu", (ulong) *rows));
DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %llu", check_point)); DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %lu", (ulong) check_point));
DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %llu", *auto_increment)); DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %lu",
DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %llu", *forced_flushes)); (ulong) *auto_increment));
DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %lu",
(ulong) *forced_flushes));
DBUG_PRINT("ha_archive::read_meta_file", ("Real Path %s", real_path)); DBUG_PRINT("ha_archive::read_meta_file", ("Real Path %s", real_path));
DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr))); DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr)));
@ -385,12 +387,12 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows,
(uint)ARCHIVE_CHECK_HEADER)); (uint)ARCHIVE_CHECK_HEADER));
DBUG_PRINT("ha_archive::write_meta_file", ("Version %d", DBUG_PRINT("ha_archive::write_meta_file", ("Version %d",
(uint)ARCHIVE_VERSION)); (uint)ARCHIVE_VERSION));
DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", (ulonglong)rows)); DBUG_PRINT("ha_archive::write_meta_file", ("Rows %lu", (ulong) rows));
DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", check_point)); DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %lu", (ulong) check_point));
DBUG_PRINT("ha_archive::write_meta_file", ("Auto Increment %llu", DBUG_PRINT("ha_archive::write_meta_file", ("Auto Increment %lu",
auto_increment)); (ulong) auto_increment));
DBUG_PRINT("ha_archive::write_meta_file", ("Forced Flushes %llu", DBUG_PRINT("ha_archive::write_meta_file", ("Forced Flushes %lu",
forced_flushes)); (ulong) forced_flushes));
DBUG_PRINT("ha_archive::write_meta_file", ("Real path %s", DBUG_PRINT("ha_archive::write_meta_file", ("Real path %s",
real_path)); real_path));
DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty)); DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty));
@ -1057,7 +1059,7 @@ int ha_archive::rnd_init(bool scan)
if (scan) if (scan)
{ {
scan_rows= share->rows_recorded; scan_rows= share->rows_recorded;
DBUG_PRINT("info", ("archive will retrieve %llu rows", scan_rows)); DBUG_PRINT("info", ("archive will retrieve %lu rows", (ulong) scan_rows));
stats.records= 0; stats.records= 0;
/* /*
@ -1318,7 +1320,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
share->rows_recorded++; share->rows_recorded++;
} }
} }
DBUG_PRINT("info", ("recovered %llu archive rows", share->rows_recorded)); DBUG_PRINT("info", ("recovered %lu archive rows", (ulong) share->rows_recorded));
my_free((char*)buf, MYF(0)); my_free((char*)buf, MYF(0));
if (rc && rc != HA_ERR_END_OF_FILE) if (rc && rc != HA_ERR_END_OF_FILE)

View File

@ -68,7 +68,7 @@ int heap_write(HP_INFO *info, const byte *record)
DBUG_RETURN(0); DBUG_RETURN(0);
err: err:
DBUG_PRINT("info",("Duplicate key: %d", keydef - share->keydef)); DBUG_PRINT("info",("Duplicate key: %d", (int) (keydef - share->keydef)));
info->errkey= keydef - share->keydef; info->errkey= keydef - share->keydef;
if (keydef->algorithm == HA_KEY_ALG_BTREE) if (keydef->algorithm == HA_KEY_ALG_BTREE)
{ {

View File

@ -1733,7 +1733,7 @@ os_file_set_size(
} }
/* Print about progress for each 100 MB written */ /* Print about progress for each 100 MB written */
if ((current_size + n_bytes) / (ib_longlong)(100 * 1024 * 1024) if ((ib_longlong) (current_size + n_bytes) / (ib_longlong)(100 * 1024 * 1024)
!= current_size / (ib_longlong)(100 * 1024 * 1024)) { != current_size / (ib_longlong)(100 * 1024 * 1024)) {
fprintf(stderr, " %lu00", fprintf(stderr, " %lu00",

View File

@ -1105,18 +1105,18 @@ static int get_statistic(PACK_MRG_INFO *mrg,HUFF_COUNTS *huff_counts)
my_off_t total_count; my_off_t total_count;
char llbuf[32]; char llbuf[32];
DBUG_PRINT("info", ("column: %3u", count - huff_counts + 1)); DBUG_PRINT("info", ("column: %3u", (uint) (count - huff_counts) + 1));
if (verbose >= 2) if (verbose >= 2)
VOID(printf("column: %3u\n", count - huff_counts + 1)); VOID(printf("column: %3u\n", (uint) (count - huff_counts) + 1));
if (count->tree_buff) if (count->tree_buff)
{ {
DBUG_PRINT("info", ("number of distinct values: %u", DBUG_PRINT("info", ("number of distinct values: %u",
(count->tree_pos - count->tree_buff) / (uint) ((count->tree_pos - count->tree_buff) /
count->field_length)); count->field_length)));
if (verbose >= 2) if (verbose >= 2)
VOID(printf("number of distinct values: %u\n", VOID(printf("number of distinct values: %u\n",
(count->tree_pos - count->tree_buff) / (uint) ((count->tree_pos - count->tree_buff) /
count->field_length)); count->field_length)));
} }
total_count= 0; total_count= 0;
for (idx= 0; idx < 256; idx++) for (idx= 0; idx < 256; idx++)
@ -2279,8 +2279,8 @@ static my_off_t write_huff_tree(HUFF_TREE *huff_tree, uint trees)
if (bits > 8 * sizeof(code)) if (bits > 8 * sizeof(code))
{ {
VOID(fflush(stdout)); VOID(fflush(stdout));
VOID(fprintf(stderr, "error: Huffman code too long: %u/%u\n", VOID(fprintf(stderr, "error: Huffman code too long: %u/%lu\n",
bits, 8 * sizeof(code))); bits, (ulong) (8 * sizeof(code))));
errors++; errors++;
break; break;
} }

View File

@ -87,8 +87,8 @@ int myrg_rkey(MYRG_INFO *info,byte *buf,int inx, const byte *key,
mi=(info->current_table=(MYRG_TABLE *)queue_top(&(info->by_key)))->table; mi=(info->current_table=(MYRG_TABLE *)queue_top(&(info->by_key)))->table;
mi->once_flags|= RRND_PRESERVE_LASTINX; mi->once_flags|= RRND_PRESERVE_LASTINX;
DBUG_PRINT("info", ("using table no: %d", DBUG_PRINT("info", ("using table no: %u",
info->current_table - info->open_tables + 1)); (uint) (info->current_table - info->open_tables) + 1));
DBUG_DUMP("result key", (byte*) mi->lastkey, mi->lastkey_length); DBUG_DUMP("result key", (byte*) mi->lastkey, mi->lastkey_length);
DBUG_RETURN(_myrg_mi_read_record(mi,buf)); DBUG_RETURN(_myrg_mi_read_record(mi,buf));
} }

View File

@ -47,17 +47,17 @@ inline int my_decimal_get_binary_size(uint precision, uint scale)
#endif #endif
#define DTIMAP(x, y, z) \ #define DTIMAP(x, y, z) \
{ DictTabInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 } { DictTabInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 }
#define DTIMAP2(x, y, z, u, v) \ #define DTIMAP2(x, y, z, u, v) \
{ DictTabInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 } { DictTabInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 }
#define DTIMAPS(x, y, z, u, v) \ #define DTIMAPS(x, y, z, u, v) \
{ DictTabInfo::y, offsetof(x, z), SimpleProperties::StringValue, u, v, 0 } { DictTabInfo::y, my_offsetof(x, z), SimpleProperties::StringValue, u, v, 0 }
#define DTIMAPB(x, y, z, u, v, l) \ #define DTIMAPB(x, y, z, u, v, l) \
{ DictTabInfo::y, offsetof(x, z), SimpleProperties::BinaryValue, u, v, \ { DictTabInfo::y, my_offsetof(x, z), SimpleProperties::BinaryValue, u, v, \
offsetof(x, l) } my_offsetof(x, l) }
#define DTIBREAK(x) \ #define DTIBREAK(x) \
{ DictTabInfo::x, 0, SimpleProperties::InvalidValue, 0, 0, 0 } { DictTabInfo::x, 0, SimpleProperties::InvalidValue, 0, 0, 0 }
@ -602,17 +602,17 @@ public:
}; };
#define DFGIMAP(x, y, z) \ #define DFGIMAP(x, y, z) \
{ DictFilegroupInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 } { DictFilegroupInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 }
#define DFGIMAP2(x, y, z, u, v) \ #define DFGIMAP2(x, y, z, u, v) \
{ DictFilegroupInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 } { DictFilegroupInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 }
#define DFGIMAPS(x, y, z, u, v) \ #define DFGIMAPS(x, y, z, u, v) \
{ DictFilegroupInfo::y, offsetof(x, z), SimpleProperties::StringValue, u, v, 0 } { DictFilegroupInfo::y, my_offsetof(x, z), SimpleProperties::StringValue, u, v, 0 }
#define DFGIMAPB(x, y, z, u, v, l) \ #define DFGIMAPB(x, y, z, u, v, l) \
{ DictFilegroupInfo::y, offsetof(x, z), SimpleProperties::BinaryValue, u, v, \ { DictFilegroupInfo::y, my_offsetof(x, z), SimpleProperties::BinaryValue, u, v, \
offsetof(x, l) } my_offsetof(x, l) }
#define DFGIBREAK(x) \ #define DFGIBREAK(x) \
{ DictFilegroupInfo::x, 0, SimpleProperties::InvalidValue, 0, 0, 0 } { DictFilegroupInfo::x, 0, SimpleProperties::InvalidValue, 0, 0, 0 }

View File

@ -68,7 +68,7 @@ print_head(const char * filename, const SchemaFile * sf)
if (! checkonly) { if (! checkonly) {
ndbout << "----- Schemafile: " << filename << " -----" << endl; ndbout << "----- Schemafile: " << filename << " -----" << endl;
ndbout_c("Magic: %.*s ByteOrder: %.8x NdbVersion: %s FileSize: %d", ndbout_c("Magic: %.*s ByteOrder: %.8x NdbVersion: %s FileSize: %d",
sizeof(sf->Magic), (int) sizeof(sf->Magic),
sf->Magic, sf->Magic,
sf->ByteOrder, sf->ByteOrder,
version(sf->NdbVersion), version(sf->NdbVersion),

View File

@ -1066,6 +1066,7 @@ Dbtup::updateVarSizeNotNULL(Uint32* in_buffer,
terrorCode= ZAI_INCONSISTENCY_ERROR; terrorCode= ZAI_INCONSISTENCY_ERROR;
return false; return false;
} }
return false;
} }
bool bool
@ -1485,6 +1486,7 @@ Dbtup::updateDiskVarSizeNotNULL(Uint32* in_buffer,
terrorCode= ZAI_INCONSISTENCY_ERROR; terrorCode= ZAI_INCONSISTENCY_ERROR;
return false; return false;
} }
return false;
} }
bool bool

View File

@ -30,8 +30,8 @@ ConstRope::copy(char* buf) const {
int int
ConstRope::compare(const char * str, size_t len) const { ConstRope::compare(const char * str, size_t len) const {
if(DEBUG_ROPE) if(DEBUG_ROPE)
ndbout_c("ConstRope[ %d 0x%x 0x%x ]::compare(%s, %d)", ndbout_c("ConstRope[ %d 0x%x 0x%x ]::compare(%s, %d)",
head.used, head.firstItem, head.lastItem, str, len); head.used, head.firstItem, head.lastItem, str, (int) len);
Uint32 left = head.used > len ? len : head.used; Uint32 left = head.used > len ? len : head.used;
Ptr<Segment> curr; Ptr<Segment> curr;
curr.i = head.firstItem; curr.i = head.firstItem;
@ -60,7 +60,7 @@ ConstRope::compare(const char * str, size_t len) const {
} }
} }
if(DEBUG_ROPE) if(DEBUG_ROPE)
ndbout_c("ConstRope::compare(%s, %d) -> %d", str, len, head.used > len); ndbout_c("ConstRope::compare(%s, %d) -> %d", str, (int) len, head.used > len);
return head.used > len; return head.used > len;
} }
@ -91,7 +91,7 @@ Rope::copy(char* buf) const {
int int
Rope::compare(const char * str, size_t len) const { Rope::compare(const char * str, size_t len) const {
if(DEBUG_ROPE) if(DEBUG_ROPE)
ndbout_c("Rope::compare(%s, %d)", str, len); ndbout_c("Rope::compare(%s, %d)", str, (int) len);
Uint32 left = head.used > len ? len : head.used; Uint32 left = head.used > len ? len : head.used;
Ptr<Segment> curr; Ptr<Segment> curr;
curr.i = head.firstItem; curr.i = head.firstItem;
@ -100,7 +100,7 @@ Rope::compare(const char * str, size_t len) const {
int res = memcmp(str, (const char*)curr.p->data, 4 * getSegmentSize()); int res = memcmp(str, (const char*)curr.p->data, 4 * getSegmentSize());
if(res != 0){ if(res != 0){
if(DEBUG_ROPE) if(DEBUG_ROPE)
ndbout_c("Rope::compare(%s, %d, %s) -> %d", str, len, ndbout_c("Rope::compare(%s, %d, %s) -> %d", str, (int) len,
(const char*)curr.p->data, res); (const char*)curr.p->data, res);
return res; return res;
} }
@ -120,14 +120,14 @@ Rope::compare(const char * str, size_t len) const {
} }
} }
if(DEBUG_ROPE) if(DEBUG_ROPE)
ndbout_c("Rope::compare(%s, %d) -> %d", str, len, head.used > len); ndbout_c("Rope::compare(%s, %d) -> %d", str, (int) len, head.used > len);
return head.used > len; return head.used > len;
} }
bool bool
Rope::assign(const char * s, size_t len, Uint32 hash){ Rope::assign(const char * s, size_t len, Uint32 hash){
if(DEBUG_ROPE) if(DEBUG_ROPE)
ndbout_c("Rope::assign(%s, %d, 0x%x)", s, len, hash); ndbout_c("Rope::assign(%s, %d, 0x%x)", s, (int) len, hash);
m_hash = hash; m_hash = hash;
head.used = (head.used + 3) / 4; head.used = (head.used + 3) / 4;
release(); release();

View File

@ -184,7 +184,7 @@ ndb_mgm_create_handle()
h->mgmd_version_minor= -1; h->mgmd_version_minor= -1;
h->mgmd_version_build= -1; h->mgmd_version_build= -1;
DBUG_PRINT("info", ("handle=0x%x", (UintPtr)h)); DBUG_PRINT("info", ("handle: 0x%lx", (ulong) h));
DBUG_RETURN(h); DBUG_RETURN(h);
} }
@ -201,7 +201,7 @@ int
ndb_mgm_set_connectstring(NdbMgmHandle handle, const char * mgmsrv) ndb_mgm_set_connectstring(NdbMgmHandle handle, const char * mgmsrv)
{ {
DBUG_ENTER("ndb_mgm_set_connectstring"); DBUG_ENTER("ndb_mgm_set_connectstring");
DBUG_PRINT("info", ("handle=0x%x", (UintPtr)handle)); DBUG_PRINT("info", ("handle: 0x%lx", (ulong) handle));
handle->cfg.~LocalConfig(); handle->cfg.~LocalConfig();
new (&(handle->cfg)) LocalConfig; new (&(handle->cfg)) LocalConfig;
if (!handle->cfg.init(mgmsrv, 0) || if (!handle->cfg.init(mgmsrv, 0) ||
@ -243,7 +243,7 @@ ndb_mgm_destroy_handle(NdbMgmHandle * handle)
DBUG_ENTER("ndb_mgm_destroy_handle"); DBUG_ENTER("ndb_mgm_destroy_handle");
if(!handle) if(!handle)
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
DBUG_PRINT("info", ("handle=0x%x", (UintPtr)(* handle))); DBUG_PRINT("info", ("handle: 0x%lx", (ulong) (* handle)));
/** /**
* important! only disconnect if connected * important! only disconnect if connected
* other code relies on this * other code relies on this

View File

@ -768,7 +768,7 @@ Ndb::getAutoIncrementValue(const char* aTableName,
TupleIdRange & range = info->m_tuple_id_range; TupleIdRange & range = info->m_tuple_id_range;
if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1) if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1)
DBUG_RETURN(-1); DBUG_RETURN(-1);
DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId)); DBUG_PRINT("info", ("value %lu", (ulong) tupleId));
DBUG_RETURN(0); DBUG_RETURN(0);
} }
@ -791,7 +791,7 @@ Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable,
TupleIdRange & range = info->m_tuple_id_range; TupleIdRange & range = info->m_tuple_id_range;
if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1) if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1)
DBUG_RETURN(-1); DBUG_RETURN(-1);
DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId)); DBUG_PRINT("info", ("value %lu", (ulong)tupleId));
DBUG_RETURN(0); DBUG_RETURN(0);
} }
@ -806,7 +806,7 @@ Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable,
if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1) if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1)
DBUG_RETURN(-1); DBUG_RETURN(-1);
DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId)); DBUG_PRINT("info", ("value %lu", (ulong)tupleId));
DBUG_RETURN(0); DBUG_RETURN(0);
} }
@ -819,7 +819,7 @@ Ndb::getTupleIdFromNdb(const NdbTableImpl* table,
{ {
assert(range.m_first_tuple_id < range.m_last_tuple_id); assert(range.m_first_tuple_id < range.m_last_tuple_id);
tupleId = ++range.m_first_tuple_id; tupleId = ++range.m_first_tuple_id;
DBUG_PRINT("info", ("next cached value %llu", (ulonglong)tupleId)); DBUG_PRINT("info", ("next cached value %lu", (ulong)tupleId));
} }
else else
{ {
@ -856,7 +856,7 @@ Ndb::readAutoIncrementValue(const char* aTableName,
TupleIdRange & range = info->m_tuple_id_range; TupleIdRange & range = info->m_tuple_id_range;
if (readTupleIdFromNdb(table, range, tupleId) == -1) if (readTupleIdFromNdb(table, range, tupleId) == -1)
DBUG_RETURN(-1); DBUG_RETURN(-1);
DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId)); DBUG_PRINT("info", ("value %lu", (ulong)tupleId));
DBUG_RETURN(0); DBUG_RETURN(0);
} }
@ -879,7 +879,7 @@ Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable,
TupleIdRange & range = info->m_tuple_id_range; TupleIdRange & range = info->m_tuple_id_range;
if (readTupleIdFromNdb(table, range, tupleId) == -1) if (readTupleIdFromNdb(table, range, tupleId) == -1)
DBUG_RETURN(-1); DBUG_RETURN(-1);
DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId)); DBUG_PRINT("info", ("value %lu", (ulong)tupleId));
DBUG_RETURN(0); DBUG_RETURN(0);
} }
@ -893,7 +893,7 @@ Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable,
if (readTupleIdFromNdb(table, range, tupleId) == -1) if (readTupleIdFromNdb(table, range, tupleId) == -1)
DBUG_RETURN(-1); DBUG_RETURN(-1);
DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId)); DBUG_PRINT("info", ("value %lu", (ulong)tupleId));
DBUG_RETURN(0); DBUG_RETURN(0);
} }
@ -994,8 +994,8 @@ Ndb::setTupleIdInNdb(const NdbTableImpl* table,
{ {
range.m_first_tuple_id = tupleId - 1; range.m_first_tuple_id = tupleId - 1;
DBUG_PRINT("info", DBUG_PRINT("info",
("Setting next auto increment cached value to %llu", ("Setting next auto increment cached value to %lu",
(ulonglong)tupleId)); (ulong)tupleId));
DBUG_RETURN(0); DBUG_RETURN(0);
} }
} }
@ -1049,7 +1049,8 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table,
{ {
DBUG_ENTER("Ndb::opTupleIdOnNdb"); DBUG_ENTER("Ndb::opTupleIdOnNdb");
Uint32 aTableId = table->m_id; Uint32 aTableId = table->m_id;
DBUG_PRINT("enter", ("table=%u value=%llu op=%u", aTableId, opValue, op)); DBUG_PRINT("enter", ("table: %u value: %lu op: %u",
aTableId, (ulong) opValue, op));
NdbTransaction* tConnection = NULL; NdbTransaction* tConnection = NULL;
NdbOperation* tOperation = NULL; NdbOperation* tOperation = NULL;
@ -1117,8 +1118,8 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table,
else else
{ {
DBUG_PRINT("info", DBUG_PRINT("info",
("Setting next auto increment value (db) to %llu", ("Setting next auto increment value (db) to %lu",
(ulonglong)opValue)); (ulong) opValue));
range.m_first_tuple_id = range.m_last_tuple_id = opValue - 1; range.m_first_tuple_id = range.m_last_tuple_id = opValue - 1;
} }
break; break;
@ -1244,9 +1245,9 @@ int Ndb::setDatabaseAndSchemaName(const NdbDictionary::Table* t)
if (s2 && s2 != s1 + 1) { if (s2 && s2 != s1 + 1) {
char buf[NAME_LEN + 1]; char buf[NAME_LEN + 1];
if (s1 - s0 <= NAME_LEN && s2 - (s1 + 1) <= NAME_LEN) { if (s1 - s0 <= NAME_LEN && s2 - (s1 + 1) <= NAME_LEN) {
sprintf(buf, "%.*s", s1 - s0, s0); sprintf(buf, "%.*s", (int) (s1 - s0), s0);
setDatabaseName(buf); setDatabaseName(buf);
sprintf(buf, "%.*s", s2 - (s1 + 1), s1 + 1); sprintf(buf, "%.*s", (int) (s2 - (s1 + 1)), s1 + 1);
setDatabaseSchemaName(buf); setDatabaseSchemaName(buf);
return 0; return 0;
} }

View File

@ -1508,9 +1508,9 @@ NdbEventBuffer::execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep)
else else
{ {
/** out of order something */ /** out of order something */
ndbout_c("out of order bucket: %d gci: %lld m_latestGCI: %lld", ndbout_c("out of order bucket: %d gci: %ld m_latestGCI: %ld",
bucket-(Gci_container*)m_active_gci.getBase(), (int) (bucket-(Gci_container*)m_active_gci.getBase()),
gci, m_latestGCI); (long) gci, (long) m_latestGCI);
bucket->m_state = Gci_container::GC_COMPLETE; bucket->m_state = Gci_container::GC_COMPLETE;
bucket->m_gcp_complete_rep_count = 1; // Prevent from being reused bucket->m_gcp_complete_rep_count = 1; // Prevent from being reused
m_latest_complete_GCI = gci; m_latest_complete_GCI = gci;

View File

@ -669,9 +669,9 @@ NdbScanOperation::doSend(int ProcessorId)
void NdbScanOperation::close(bool forceSend, bool releaseOp) void NdbScanOperation::close(bool forceSend, bool releaseOp)
{ {
DBUG_ENTER("NdbScanOperation::close"); DBUG_ENTER("NdbScanOperation::close");
DBUG_PRINT("enter", ("this=%x tcon=%x con=%x force=%d release=%d", DBUG_PRINT("enter", ("this: 0x%lx tcon: 0x%lx con: 0x%lx force: %d release: %d",
(UintPtr)this, (long) this,
(UintPtr)m_transConnection, (UintPtr)theNdbCon, (long) m_transConnection, (long) theNdbCon,
forceSend, releaseOp)); forceSend, releaseOp));
if(m_transConnection){ if(m_transConnection){

View File

@ -1010,7 +1010,7 @@ void
NdbTransaction::releaseExecutedScanOperation(NdbIndexScanOperation* cursorOp) NdbTransaction::releaseExecutedScanOperation(NdbIndexScanOperation* cursorOp)
{ {
DBUG_ENTER("NdbTransaction::releaseExecutedScanOperation"); DBUG_ENTER("NdbTransaction::releaseExecutedScanOperation");
DBUG_PRINT("enter", ("this=0x%x op=0x%x", (UintPtr)this, (UintPtr)cursorOp)); DBUG_PRINT("enter", ("this: 0x%lx op: 0x%lx", (ulong) this, (ulong) cursorOp));
releaseScanOperation(&m_firstExecutedScanOp, 0, cursorOp); releaseScanOperation(&m_firstExecutedScanOp, 0, cursorOp);

View File

@ -361,7 +361,7 @@ void
Ndb::releaseScanOperation(NdbIndexScanOperation* aScanOperation) Ndb::releaseScanOperation(NdbIndexScanOperation* aScanOperation)
{ {
DBUG_ENTER("Ndb::releaseScanOperation"); DBUG_ENTER("Ndb::releaseScanOperation");
DBUG_PRINT("enter", ("op=%x", (UintPtr)aScanOperation)); DBUG_PRINT("enter", ("op: 0x%lx", (ulong) aScanOperation));
#ifdef ndb_release_check_dup #ifdef ndb_release_check_dup
{ NdbIndexScanOperation* tOp = theScanOpIdleList; { NdbIndexScanOperation* tOp = theScanOpIdleList;
while (tOp != NULL) { while (tOp != NULL) {