Fixed a LOT of compiler warnings

Added missing DBUG_RETURN statements (in mysqldump.c)
Added missing enums
Fixed a lot of wrong DBUG_PRINT() statements, some of which could cause crashes
Removed usage of %lld and %p in printf strings as these are not portable or produces different results on different systems.
This commit is contained in:
monty@mysql.com/nosik.monty.fi 2006-11-27 01:47:38 +02:00
parent 89570bf966
commit fa81a82e7f
97 changed files with 504 additions and 417 deletions

View File

@ -30,14 +30,14 @@
** master/autocommit code by Brian Aker <brian@tangent.org> ** master/autocommit code by Brian Aker <brian@tangent.org>
** SSL by ** SSL by
** Andrei Errapart <andreie@no.spam.ee> ** Andrei Errapart <andreie@no.spam.ee>
** ƒÂµnu Samuel <tonu@please.do.not.remove.this.spam.ee> ** µnu Samuel <tonu@please.do.not.remove.this.spam.ee>
** XML by Gary Huntress <ghuntress@mediaone.net> 10/10/01, cleaned up ** XML by Gary Huntress <ghuntress@mediaone.net> 10/10/01, cleaned up
** and adapted to mysqldump 05/11/01 by Jani Tolonen ** and adapted to mysqldump 05/11/01 by Jani Tolonen
** Added --single-transaction option 06/06/2002 by Peter Zaitsev ** Added --single-transaction option 06/06/2002 by Peter Zaitsev
** 10 Jun 2003: SET NAMES and --no-set-names by Alexander Barkov ** 10 Jun 2003: SET NAMES and --no-set-names by Alexander Barkov
*/ */
#define DUMP_VERSION "10.11" #define DUMP_VERSION "10.12"
#include <my_global.h> #include <my_global.h>
#include <my_sys.h> #include <my_sys.h>
@ -540,8 +540,10 @@ static void write_header(FILE *sql_file, char *db_name)
if (opt_xml) if (opt_xml)
{ {
fputs("<?xml version=\"1.0\"?>\n", sql_file); fputs("<?xml version=\"1.0\"?>\n", sql_file);
/* Schema reference. Allows use of xsi:nil for NULL values and /*
xsi:type to define an element's data type. */ Schema reference. Allows use of xsi:nil for NULL values and
xsi:type to define an element's data type.
*/
fputs("<mysqldump ", sql_file); fputs("<mysqldump ", sql_file);
fputs("xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"", fputs("xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"",
sql_file); sql_file);
@ -2349,7 +2351,7 @@ static void dump_table(char *table, char *db)
The "table" could be a view. If so, we don't do anything here. The "table" could be a view. If so, we don't do anything here.
*/ */
if (strcmp (table_type, "VIEW") == 0) if (strcmp (table_type, "VIEW") == 0)
return; DBUG_VOID_RETURN;
/* Check --no-data flag */ /* Check --no-data flag */
if (opt_no_data) if (opt_no_data)
@ -2657,16 +2659,16 @@ static void dump_table(char *table, char *db)
{ {
if (opt_hex_blob && is_blob && length) if (opt_hex_blob && is_blob && length)
{ {
/* Define xsi:type="xs:hexBinary" for hex encoded data */ /* Define xsi:type="xs:hexBinary" for hex encoded data */
print_xml_tag(md_result_file, "\t\t", "", "field", "name=", print_xml_tag(md_result_file, "\t\t", "", "field", "name=",
field->name, "xsi:type=", "xs:hexBinary", NullS); field->name, "xsi:type=", "xs:hexBinary", NullS);
print_blob_as_hex(md_result_file, row[i], length); print_blob_as_hex(md_result_file, row[i], length);
} }
else else
{ {
print_xml_tag(md_result_file, "\t\t", "", "field", "name=", print_xml_tag(md_result_file, "\t\t", "", "field", "name=",
field->name, NullS); field->name, NullS);
print_quoted_xml(md_result_file, row[i], length); print_quoted_xml(md_result_file, row[i], length);
} }
fputs("</field>\n", md_result_file); fputs("</field>\n", md_result_file);
} }
@ -3155,10 +3157,8 @@ static int dump_all_tables_in_db(char *database)
afterdot= strmov(hash_key, database); afterdot= strmov(hash_key, database);
*afterdot++= '.'; *afterdot++= '.';
if (!strcmp(database, NDB_REP_DB)) /* Skip cluster internal database */
return 0;
if (init_dumping(database, init_dumping_tables)) if (init_dumping(database, init_dumping_tables))
return 1; DBUG_RETURN(1);
if (opt_xml) if (opt_xml)
print_xml_tag(md_result_file, "", "\n", "database", "name=", database, NullS); print_xml_tag(md_result_file, "", "\n", "database", "name=", database, NullS);
if (lock_tables) if (lock_tables)
@ -3218,7 +3218,7 @@ static int dump_all_tables_in_db(char *database)
fprintf(md_result_file,"\n--\n-- Flush Grant Tables \n--\n"); fprintf(md_result_file,"\n--\n-- Flush Grant Tables \n--\n");
fprintf(md_result_file,"\n/*! FLUSH PRIVILEGES */;\n"); fprintf(md_result_file,"\n/*! FLUSH PRIVILEGES */;\n");
} }
return 0; DBUG_RETURN(0);
} /* dump_all_tables_in_db */ } /* dump_all_tables_in_db */

View File

@ -1031,7 +1031,7 @@ run_scheduler(stats *sptr, statement *stmts, uint concur, ulonglong limit)
for (x= 0; x < concur; x++) for (x= 0; x < concur; x++)
{ {
int pid; int pid;
DBUG_PRINT("info", ("x %d concurrency %d", x, concurrency)); DBUG_PRINT("info", ("x: %d concurrency: %u", x, *concurrency));
pid= fork(); pid= fork();
switch(pid) switch(pid)
{ {

View File

@ -80,7 +80,7 @@ enum {
OPT_SSL_CA, OPT_SSL_CAPATH, OPT_SSL_CIPHER, OPT_PS_PROTOCOL, OPT_SSL_CA, OPT_SSL_CAPATH, OPT_SSL_CIPHER, OPT_PS_PROTOCOL,
OPT_SP_PROTOCOL, OPT_CURSOR_PROTOCOL, OPT_VIEW_PROTOCOL, OPT_SP_PROTOCOL, OPT_CURSOR_PROTOCOL, OPT_VIEW_PROTOCOL,
OPT_SSL_VERIFY_SERVER_CERT, OPT_MAX_CONNECT_RETRIES, OPT_SSL_VERIFY_SERVER_CERT, OPT_MAX_CONNECT_RETRIES,
OPT_MARK_PROGRESS, OPT_CHARSETS_DIR, OPT_LOG_DIR, OPT_DEBUG_INFO}; OPT_MARK_PROGRESS, OPT_CHARSETS_DIR, OPT_LOG_DIR, OPT_DEBUG_INFO
}; };
static int record= 0, opt_sleep= -1; static int record= 0, opt_sleep= -1;

View File

@ -75,7 +75,7 @@ public:
typedef Integer Element; typedef Integer Element;
AbstractRing() : AbstractGroup() {m_mg.m_pRing = this;} AbstractRing() : AbstractGroup() {m_mg.m_pRing = this;}
AbstractRing(const AbstractRing &source) {m_mg.m_pRing = this;} AbstractRing(const AbstractRing &source) :AbstractGroup() {m_mg.m_pRing = this;}
AbstractRing& operator=(const AbstractRing &source) {return *this;} AbstractRing& operator=(const AbstractRing &source) {return *this;}
virtual bool IsUnit(const Element &a) const =0; virtual bool IsUnit(const Element &a) const =0;

View File

@ -22,4 +22,5 @@ SHOW VARIABLES LIKE 'server_id';
# Check that IM understands that mysqld1 is online, while mysqld2 is # Check that IM understands that mysqld1 is online, while mysqld2 is
# offline. # offline.
--replace_result starting XXXXX online XXXXX
SHOW INSTANCES; SHOW INSTANCES;

View File

@ -1576,7 +1576,8 @@ sub environment_setup () {
if ( $opt_source_dist ) if ( $opt_source_dist )
{ {
push(@ld_library_paths, "$glob_basedir/libmysql/.libs/", push(@ld_library_paths, "$glob_basedir/libmysql/.libs/",
"$glob_basedir/libmysql_r/.libs/"); "$glob_basedir/libmysql_r/.libs/",
"$glob_basedir/zlib.libs/");
} }
else else
{ {
@ -2992,10 +2993,6 @@ sub do_after_run_mysqltest($)
# Save info from this testcase run to mysqltest.log # Save info from this testcase run to mysqltest.log
mtr_appendfile_to_file($path_timefile, $path_mysqltest_log) mtr_appendfile_to_file($path_timefile, $path_mysqltest_log)
if -f $path_timefile; if -f $path_timefile;
# Remove the file that mysqltest writes info to
unlink($path_timefile);
} }
@ -3183,6 +3180,9 @@ sub run_testcase ($) {
} }
} }
# Remove the file that mysqltest writes info to
unlink($path_timefile);
# ---------------------------------------------------------------------- # ----------------------------------------------------------------------
# Stop Instance Manager if we are processing an IM-test case. # Stop Instance Manager if we are processing an IM-test case.
# ---------------------------------------------------------------------- # ----------------------------------------------------------------------
@ -4094,7 +4094,6 @@ sub run_testcase_start_servers($) {
} }
if ( $clusters->[0]->{'pid'} and ! $master->[1]->{'pid'} ) if ( $clusters->[0]->{'pid'} and ! $master->[1]->{'pid'} )
{
{ {
# Test needs cluster, start an extra mysqld connected to cluster # Test needs cluster, start an extra mysqld connected to cluster
@ -4848,4 +4847,3 @@ HERE
mtr_exit(1); mtr_exit(1);
} }

View File

@ -1,3 +1,4 @@
drop table if exists t1;
DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t1;
SHOW COLLATION LIKE 'cp1250_czech_cs'; SHOW COLLATION LIKE 'cp1250_czech_cs';
Collation Charset Id Default Compiled Sortlen Collation Charset Id Default Compiled Sortlen

View File

@ -3,7 +3,7 @@ Variable_name Value
server_id 1 server_id 1
SHOW INSTANCES; SHOW INSTANCES;
instance_name state instance_name state
mysqld1 starting mysqld1 XXXXX
mysqld2 offline mysqld2 offline
--> Listing users... --> Listing users...
im_admin im_admin

View File

@ -3,7 +3,7 @@ Variable_name Value
server_id 1 server_id 1
SHOW INSTANCES; SHOW INSTANCES;
instance_name state instance_name state
mysqld1 online mysqld1 XXXXX
mysqld2 offline mysqld2 offline
Killing the process... Killing the process...
Sleeping... Sleeping...

View File

@ -3,7 +3,7 @@ Variable_name Value
server_id 1 server_id 1
SHOW INSTANCES; SHOW INSTANCES;
instance_name state instance_name state
mysqld1 online mysqld1 XXXXX
mysqld2 offline mysqld2 offline
-------------------------------------------------------------------- --------------------------------------------------------------------
server_id = 1 server_id = 1

View File

@ -3,7 +3,7 @@ Variable_name Value
server_id 1 server_id 1
SHOW INSTANCES; SHOW INSTANCES;
instance_name state instance_name state
mysqld1 online mysqld1 XXXXX
mysqld2 offline mysqld2 offline
-------------------------------------------------------------------- --------------------------------------------------------------------

View File

@ -3,7 +3,7 @@ Variable_name Value
server_id 1 server_id 1
SHOW INSTANCES; SHOW INSTANCES;
instance_name state instance_name state
mysqld1 online mysqld1 XXXXX
mysqld2 offline mysqld2 offline
SHOW INSTANCE OPTIONS mysqld1; SHOW INSTANCE OPTIONS mysqld1;
option_name value option_name value

View File

@ -280,6 +280,7 @@ create table general_log_new like general_log;
create table slow_log_new like slow_log; create table slow_log_new like slow_log;
show tables like "%log%"; show tables like "%log%";
Tables_in_mysql (%log%) Tables_in_mysql (%log%)
binlog_index
general_log general_log
general_log_new general_log_new
slow_log slow_log

View File

@ -16,6 +16,9 @@ concurrent_innodb : BUG#21579 2006-08-11 mleich innodb_concurrent random
ndb_autodiscover : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog ndb_autodiscover : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog
ndb_autodiscover2 : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog ndb_autodiscover2 : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog
ndb_load : BUG#17233 2006-05-04 tomas failed load data from infile causes mysqld dbug_assert, binlog not flushed ndb_load : BUG#17233 2006-05-04 tomas failed load data from infile causes mysqld dbug_assert, binlog not flushed
ndb_restore_partition : Problem with cluster/def/schema table that is in std_data/ndb_backup51; Pekka will schdule this to someone
rpl_ndb_sync : Problem with cluster/def/schema table that is in std_data/ndb_backup51; Pekka will schdule this to someone
partition_03ndb : BUG#16385 2006-03-24 mikael Partitions: crash when updating a range partitioned NDB table partition_03ndb : BUG#16385 2006-03-24 mikael Partitions: crash when updating a range partitioned NDB table
ps_7ndb : BUG#18950 2006-02-16 jmiller create table like does not obtain LOCK_open ps_7ndb : BUG#18950 2006-02-16 jmiller create table like does not obtain LOCK_open
rpl_ndb_2innodb : BUG#19227 2006-04-20 pekka pk delete apparently not replicated rpl_ndb_2innodb : BUG#19227 2006-04-20 pekka pk delete apparently not replicated

View File

@ -138,14 +138,14 @@ int packfrm(const void *data, uint len,
uint blob_len; uint blob_len;
struct frm_blob_struct *blob; struct frm_blob_struct *blob;
DBUG_ENTER("packfrm"); DBUG_ENTER("packfrm");
DBUG_PRINT("enter", ("data: %x, len: %d", data, len)); DBUG_PRINT("enter", ("data: 0x%lx, len: %d", (long) data, len));
error= 1; error= 1;
org_len= len; org_len= len;
if (my_compress((byte*)data, &org_len, &comp_len)) if (my_compress((byte*)data, &org_len, &comp_len))
goto err; goto err;
DBUG_PRINT("info", ("org_len: %d, comp_len: %d", org_len, comp_len)); DBUG_PRINT("info", ("org_len: %lu comp_len: %lu", org_len, comp_len));
DBUG_DUMP("compressed", (char*)data, org_len); DBUG_DUMP("compressed", (char*)data, org_len);
error= 2; error= 2;
@ -165,7 +165,8 @@ int packfrm(const void *data, uint len,
*pack_len= blob_len; *pack_len= blob_len;
error= 0; error= 0;
DBUG_PRINT("exit", ("pack_data: %x, pack_len: %d", *pack_data, *pack_len)); DBUG_PRINT("exit", ("pack_data: 0x%lx pack_len: %d",
(long) *pack_data, *pack_len));
err: err:
DBUG_RETURN(error); DBUG_RETURN(error);
@ -194,13 +195,13 @@ int unpackfrm(const void **unpack_data, uint *unpack_len,
byte *data; byte *data;
ulong complen, orglen, ver; ulong complen, orglen, ver;
DBUG_ENTER("unpackfrm"); DBUG_ENTER("unpackfrm");
DBUG_PRINT("enter", ("pack_data: %x", pack_data)); DBUG_PRINT("enter", ("pack_data: 0x%lx", (long) pack_data));
complen= uint4korr((char*)&blob->head.complen); complen= uint4korr((char*)&blob->head.complen);
orglen= uint4korr((char*)&blob->head.orglen); orglen= uint4korr((char*)&blob->head.orglen);
ver= uint4korr((char*)&blob->head.ver); ver= uint4korr((char*)&blob->head.ver);
DBUG_PRINT("blob",("ver: %d complen: %d orglen: %d", DBUG_PRINT("blob",("ver: %lu complen: %lu orglen: %lu",
ver,complen,orglen)); ver,complen,orglen));
DBUG_DUMP("blob->data", (char*) blob->data, complen); DBUG_DUMP("blob->data", (char*) blob->data, complen);
@ -220,7 +221,7 @@ int unpackfrm(const void **unpack_data, uint *unpack_len,
*unpack_data= data; *unpack_data= data;
*unpack_len= complen; *unpack_len= complen;
DBUG_PRINT("exit", ("frmdata: %x, len: %d", *unpack_data, *unpack_len)); DBUG_PRINT("exit", ("frmdata: 0x%lx len: %d", (long) *unpack_data, *unpack_len));
DBUG_RETURN(0); DBUG_RETURN(0);
} }
#endif /* HAVE_COMPRESS */ #endif /* HAVE_COMPRESS */

View File

@ -936,8 +936,8 @@ void my_print_variables(const struct my_option *options)
(*getopt_get_addr)("", 0, optp) : optp->value); (*getopt_get_addr)("", 0, optp) : optp->value);
if (value) if (value)
{ {
printf("%s", optp->name); printf("%s ", optp->name);
length= (uint) strlen(optp->name); length= (uint) strlen(optp->name)+1;
for (; length < name_space; length++) for (; length < name_space; length++)
putchar(' '); putchar(' ');
switch ((optp->var_type & GET_TYPE_MASK)) { switch ((optp->var_type & GET_TYPE_MASK)) {

View File

@ -62,7 +62,7 @@ static long number_of_calls= 0; /* for SHOW STATUS, see below */
1 failure (cannot happen) 1 failure (cannot happen)
*/ */
static int simple_parser_plugin_init(void) static int simple_parser_plugin_init(void *arg __attribute__((unused)))
{ {
return(0); return(0);
} }
@ -81,7 +81,7 @@ static int simple_parser_plugin_init(void)
*/ */
static int simple_parser_plugin_deinit(void) static int simple_parser_plugin_deinit(void *arg __attribute__((unused)))
{ {
return(0); return(0);
} }

View File

@ -334,7 +334,6 @@ int Mysql_connection_thread::dispatch_command(enum enum_server_command command,
case COM_QUERY: case COM_QUERY:
{ {
log_info("query for connection %lu : ----\n%s\n-------------------------", log_info("query for connection %lu : ----\n%s\n-------------------------",
log_info("query for connection %d : ----\n%s\n-------------------------",
connection_id,packet); connection_id,packet);
if (Command *command= parse_command(&instance_map, packet)) if (Command *command= parse_command(&instance_map, packet))
{ {

View File

@ -124,8 +124,8 @@ void
Event_parse_data::init_body(THD *thd) Event_parse_data::init_body(THD *thd)
{ {
DBUG_ENTER("Event_parse_data::init_body"); DBUG_ENTER("Event_parse_data::init_body");
DBUG_PRINT("info", ("body=[%s] body_begin=0x%lx end=0x%lx", body_begin, DBUG_PRINT("info", ("body: '%s' body_begin: 0x%lx end: 0x%lx", body_begin,
body_begin, thd->lex->ptr)); (long) body_begin, (long) thd->lex->ptr));
body.length= thd->lex->ptr - body_begin; body.length= thd->lex->ptr - body_begin;
const uchar *body_end= body_begin + body.length - 1; const uchar *body_end= body_begin + body.length - 1;
@ -399,8 +399,9 @@ Event_parse_data::init_starts(THD *thd)
thd->variables.time_zone->gmt_sec_to_TIME(&time_tmp, thd->variables.time_zone->gmt_sec_to_TIME(&time_tmp,
(my_time_t) thd->query_start()); (my_time_t) thd->query_start());
DBUG_PRINT("info",("now =%lld", TIME_to_ulonglong_datetime(&time_tmp))); DBUG_PRINT("info",("now: %ld starts: %ld",
DBUG_PRINT("info",("starts=%lld", TIME_to_ulonglong_datetime(&ltime))); (long) TIME_to_ulonglong_datetime(&time_tmp),
(long) TIME_to_ulonglong_datetime(&ltime)));
if (TIME_to_ulonglong_datetime(&ltime) < if (TIME_to_ulonglong_datetime(&ltime) <
TIME_to_ulonglong_datetime(&time_tmp)) TIME_to_ulonglong_datetime(&time_tmp))
goto wrong_value; goto wrong_value;
@ -536,8 +537,9 @@ Event_parse_data::check_parse_data(THD *thd)
{ {
bool ret; bool ret;
DBUG_ENTER("Event_parse_data::check_parse_data"); DBUG_ENTER("Event_parse_data::check_parse_data");
DBUG_PRINT("info", ("execute_at=0x%lx expr=0x%lx starts=0x%lx ends=0x%lx", DBUG_PRINT("info", ("execute_at: 0x%lx expr=0x%lx starts=0x%lx ends=0x%lx",
item_execute_at, item_expression, item_starts, item_ends)); (long) item_execute_at, (long) item_expression,
(long) item_starts, (long) item_ends));
init_name(thd, identifier); init_name(thd, identifier);
@ -564,9 +566,9 @@ Event_parse_data::init_definer(THD *thd)
int definer_host_len; int definer_host_len;
DBUG_ENTER("Event_parse_data::init_definer"); DBUG_ENTER("Event_parse_data::init_definer");
DBUG_PRINT("info",("init definer_user thd->mem_root=0x%lx " DBUG_PRINT("info",("init definer_user thd->mem_root: 0x%lx "
"thd->sec_ctx->priv_user=0x%lx", thd->mem_root, "thd->sec_ctx->priv_user: 0x%lx", (long) thd->mem_root,
thd->security_ctx->priv_user)); (long) thd->security_ctx->priv_user));
definer_user_len= strlen(thd->security_ctx->priv_user); definer_user_len= strlen(thd->security_ctx->priv_user);
definer_host_len= strlen(thd->security_ctx->priv_host); definer_host_len= strlen(thd->security_ctx->priv_host);
@ -1032,8 +1034,9 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec,
TIME tmp; TIME tmp;
longlong months=0, seconds=0; longlong months=0, seconds=0;
DBUG_ENTER("get_next_time"); DBUG_ENTER("get_next_time");
DBUG_PRINT("enter", ("start=%llu now=%llu", TIME_to_ulonglong_datetime(start), DBUG_PRINT("enter", ("start: %lu now: %lu",
TIME_to_ulonglong_datetime(time_now))); (long) TIME_to_ulonglong_datetime(start),
(long) TIME_to_ulonglong_datetime(time_now)));
bzero(&interval, sizeof(interval)); bzero(&interval, sizeof(interval));
@ -1081,7 +1084,7 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec,
case INTERVAL_LAST: case INTERVAL_LAST:
DBUG_ASSERT(0); DBUG_ASSERT(0);
} }
DBUG_PRINT("info", ("seconds=%ld months=%ld", seconds, months)); DBUG_PRINT("info", ("seconds: %ld months: %ld", (long) seconds, (long) months));
if (seconds) if (seconds)
{ {
longlong seconds_diff; longlong seconds_diff;
@ -1099,14 +1102,14 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec,
event two times for the same time event two times for the same time
get the next exec if the modulus is not get the next exec if the modulus is not
*/ */
DBUG_PRINT("info", ("multiplier=%d", multiplier)); DBUG_PRINT("info", ("multiplier: %d", multiplier));
if (seconds_diff % seconds || (!seconds_diff && last_exec->year) || if (seconds_diff % seconds || (!seconds_diff && last_exec->year) ||
TIME_to_ulonglong_datetime(time_now) == TIME_to_ulonglong_datetime(time_now) ==
TIME_to_ulonglong_datetime(last_exec)) TIME_to_ulonglong_datetime(last_exec))
++multiplier; ++multiplier;
interval.second= seconds * multiplier; interval.second= seconds * multiplier;
DBUG_PRINT("info", ("multiplier=%u interval.second=%u", multiplier, DBUG_PRINT("info", ("multiplier: %lu interval.second: %lu", (ulong) multiplier,
interval.second)); (ulong) interval.second));
tmp= *start; tmp= *start;
if (!(ret= date_add_interval(&tmp, INTERVAL_SECOND, interval))) if (!(ret= date_add_interval(&tmp, INTERVAL_SECOND, interval)))
*next= tmp; *next= tmp;
@ -1158,7 +1161,7 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec,
} }
done: done:
DBUG_PRINT("info", ("next=%llu", TIME_to_ulonglong_datetime(next))); DBUG_PRINT("info", ("next: %lu", (long) TIME_to_ulonglong_datetime(next)));
DBUG_RETURN(ret); DBUG_RETURN(ret);
} }
@ -1183,17 +1186,17 @@ Event_queue_element::compute_next_execution_time()
{ {
TIME time_now; TIME time_now;
int tmp; int tmp;
DBUG_ENTER("Event_queue_element::compute_next_execution_time"); DBUG_ENTER("Event_queue_element::compute_next_execution_time");
DBUG_PRINT("enter", ("starts=%llu ends=%llu last_executed=%llu this=0x%lx", DBUG_PRINT("enter", ("starts: %lu ends: %lu last_executed: %lu this: 0x%lx",
TIME_to_ulonglong_datetime(&starts), (long) TIME_to_ulonglong_datetime(&starts),
TIME_to_ulonglong_datetime(&ends), (long) TIME_to_ulonglong_datetime(&ends),
TIME_to_ulonglong_datetime(&last_executed), this)); (long) TIME_to_ulonglong_datetime(&last_executed),
(long) this));
if (status == Event_queue_element::DISABLED) if (status == Event_queue_element::DISABLED)
{ {
DBUG_PRINT("compute_next_execution_time", DBUG_PRINT("compute_next_execution_time",
("Event %s is DISABLED", name.str)); ("Event %s is DISABLED", name.str));
goto ret; goto ret;
} }
/* If one-time, no need to do computation */ /* If one-time, no need to do computation */
@ -1203,9 +1206,9 @@ Event_queue_element::compute_next_execution_time()
if (last_executed.year) if (last_executed.year)
{ {
DBUG_PRINT("info",("One-time event %s.%s of was already executed", DBUG_PRINT("info",("One-time event %s.%s of was already executed",
dbname.str, name.str, definer.str)); dbname.str, name.str));
dropped= (on_completion == Event_queue_element::ON_COMPLETION_DROP); dropped= (on_completion == Event_queue_element::ON_COMPLETION_DROP);
DBUG_PRINT("info",("One-time event will be dropped=%d.", dropped)); DBUG_PRINT("info",("One-time event will be dropped: %d.", dropped));
status= Event_queue_element::DISABLED; status= Event_queue_element::DISABLED;
status_changed= TRUE; status_changed= TRUE;
@ -1226,7 +1229,7 @@ Event_queue_element::compute_next_execution_time()
execute_at_null= TRUE; execute_at_null= TRUE;
if (on_completion == Event_queue_element::ON_COMPLETION_DROP) if (on_completion == Event_queue_element::ON_COMPLETION_DROP)
dropped= TRUE; dropped= TRUE;
DBUG_PRINT("info", ("Dropped=%d", dropped)); DBUG_PRINT("info", ("Dropped: %d", dropped));
status= Event_queue_element::DISABLED; status= Event_queue_element::DISABLED;
status_changed= TRUE; status_changed= TRUE;
@ -1400,8 +1403,8 @@ Event_queue_element::compute_next_execution_time()
goto ret; goto ret;
} }
ret: ret:
DBUG_PRINT("info", ("ret=0 execute_at=%llu", DBUG_PRINT("info", ("ret: 0 execute_at: %lu",
TIME_to_ulonglong_datetime(&execute_at))); (long) TIME_to_ulonglong_datetime(&execute_at)));
DBUG_RETURN(FALSE); DBUG_RETURN(FALSE);
err: err:
DBUG_PRINT("info", ("ret=1")); DBUG_PRINT("info", ("ret=1"));
@ -1688,7 +1691,7 @@ done:
thd->end_statement(); thd->end_statement();
thd->cleanup_after_query(); thd->cleanup_after_query();
DBUG_PRINT("info", ("EXECUTED %s.%s ret=%d", dbname.str, name.str, ret)); DBUG_PRINT("info", ("EXECUTED %s.%s ret: %d", dbname.str, name.str, ret));
DBUG_RETURN(ret); DBUG_RETURN(ret);
} }
@ -1752,7 +1755,7 @@ Event_job_data::compile(THD *thd, MEM_ROOT *mem_root)
thd->update_charset(); thd->update_charset();
DBUG_PRINT("info",("old_sql_mode=%d new_sql_mode=%d",old_sql_mode, sql_mode)); DBUG_PRINT("info",("old_sql_mode: %lu new_sql_mode: %lu",old_sql_mode, sql_mode));
thd->variables.sql_mode= this->sql_mode; thd->variables.sql_mode= this->sql_mode;
/* Change the memory root for the execution time */ /* Change the memory root for the execution time */
if (mem_root) if (mem_root)
@ -1769,7 +1772,7 @@ Event_job_data::compile(THD *thd, MEM_ROOT *mem_root)
thd->query= show_create.c_ptr_safe(); thd->query= show_create.c_ptr_safe();
thd->query_length= show_create.length(); thd->query_length= show_create.length();
DBUG_PRINT("info", ("query:%s",thd->query)); DBUG_PRINT("info", ("query: %s",thd->query));
event_change_security_context(thd, definer_user, definer_host, dbname, event_change_security_context(thd, definer_user, definer_host, dbname,
&save_ctx); &save_ctx);
@ -1777,14 +1780,14 @@ Event_job_data::compile(THD *thd, MEM_ROOT *mem_root)
mysql_init_query(thd, (uchar*) thd->query, thd->query_length); mysql_init_query(thd, (uchar*) thd->query, thd->query_length);
if (MYSQLparse((void *)thd) || thd->is_fatal_error) if (MYSQLparse((void *)thd) || thd->is_fatal_error)
{ {
DBUG_PRINT("error", ("error during compile or thd->is_fatal_error=%d", DBUG_PRINT("error", ("error during compile or thd->is_fatal_error: %d",
thd->is_fatal_error)); thd->is_fatal_error));
/* /*
Free lex associated resources Free lex associated resources
QQ: Do we really need all this stuff here? QQ: Do we really need all this stuff here?
*/ */
sql_print_error("SCHEDULER: Error during compilation of %s.%s or " sql_print_error("SCHEDULER: Error during compilation of %s.%s or "
"thd->is_fatal_error=%d", "thd->is_fatal_error: %d",
dbname.str, name.str, thd->is_fatal_error); dbname.str, name.str, thd->is_fatal_error);
lex.unit.cleanup(); lex.unit.cleanup();

View File

@ -111,14 +111,14 @@ public:
void *p; void *p;
DBUG_ENTER("Event_queue_element::new(size)"); DBUG_ENTER("Event_queue_element::new(size)");
p= my_malloc(size, MYF(0)); p= my_malloc(size, MYF(0));
DBUG_PRINT("info", ("alloc_ptr=0x%lx", p)); DBUG_PRINT("info", ("alloc_ptr: 0x%lx", (long) p));
DBUG_RETURN(p); DBUG_RETURN(p);
} }
static void operator delete(void *ptr, size_t size) static void operator delete(void *ptr, size_t size)
{ {
DBUG_ENTER("Event_queue_element::delete(ptr,size)"); DBUG_ENTER("Event_queue_element::delete(ptr,size)");
DBUG_PRINT("enter", ("free_ptr=0x%lx", ptr)); DBUG_PRINT("enter", ("free_ptr: 0x%lx", (long) ptr));
TRASH(ptr, size); TRASH(ptr, size);
my_free((gptr) ptr, MYF(0)); my_free((gptr) ptr, MYF(0));
DBUG_VOID_RETURN; DBUG_VOID_RETURN;

View File

@ -958,7 +958,7 @@ Event_db_repository::load_named_event(THD *thd, LEX_STRING dbname,
Open_tables_state backup; Open_tables_state backup;
DBUG_ENTER("Event_db_repository::load_named_event"); DBUG_ENTER("Event_db_repository::load_named_event");
DBUG_PRINT("enter",("thd=0x%lx name:%*s",thd, name.length, name.str)); DBUG_PRINT("enter",("thd: 0x%lx name: %*s", (long) thd, name.length, name.str));
thd->reset_n_backup_open_tables_state(&backup); thd->reset_n_backup_open_tables_state(&backup);

View File

@ -143,7 +143,7 @@ Event_queue::init_queue(THD *thd, Event_db_repository *db_repo)
struct event_queue_param *event_queue_param_value= NULL; struct event_queue_param *event_queue_param_value= NULL;
DBUG_ENTER("Event_queue::init_queue"); DBUG_ENTER("Event_queue::init_queue");
DBUG_PRINT("enter", ("this=0x%lx", this)); DBUG_PRINT("enter", ("this: 0x%lx", (long) this));
LOCK_QUEUE_DATA(); LOCK_QUEUE_DATA();
db_repository= db_repo; db_repository= db_repo;
@ -218,7 +218,7 @@ Event_queue::create_event(THD *thd, LEX_STRING dbname, LEX_STRING name)
int res; int res;
Event_queue_element *new_element; Event_queue_element *new_element;
DBUG_ENTER("Event_queue::create_event"); DBUG_ENTER("Event_queue::create_event");
DBUG_PRINT("enter", ("thd=0x%lx et=%s.%s",thd, dbname.str, name.str)); DBUG_PRINT("enter", ("thd: 0x%lx et=%s.%s", (long) thd, dbname.str, name.str));
new_element= new Event_queue_element(); new_element= new Event_queue_element();
res= db_repository->load_named_event(thd, dbname, name, new_element); res= db_repository->load_named_event(thd, dbname, name, new_element);
@ -229,7 +229,7 @@ Event_queue::create_event(THD *thd, LEX_STRING dbname, LEX_STRING name)
new_element->compute_next_execution_time(); new_element->compute_next_execution_time();
LOCK_QUEUE_DATA(); LOCK_QUEUE_DATA();
DBUG_PRINT("info", ("new event in the queue 0x%lx", new_element)); DBUG_PRINT("info", ("new event in the queue: 0x%lx", (long) new_element));
queue_insert_safe(&queue, (byte *) new_element); queue_insert_safe(&queue, (byte *) new_element);
dbug_dump_queue(thd->query_start()); dbug_dump_queue(thd->query_start());
pthread_cond_broadcast(&COND_queue_state); pthread_cond_broadcast(&COND_queue_state);
@ -264,7 +264,7 @@ Event_queue::update_event(THD *thd, LEX_STRING dbname, LEX_STRING name,
Event_queue_element *new_element; Event_queue_element *new_element;
DBUG_ENTER("Event_queue::update_event"); DBUG_ENTER("Event_queue::update_event");
DBUG_PRINT("enter", ("thd=0x%lx et=[%s.%s]", thd, dbname.str, name.str)); DBUG_PRINT("enter", ("thd: 0x%lx et=[%s.%s]", (long) thd, dbname.str, name.str));
new_element= new Event_queue_element(); new_element= new Event_queue_element();
@ -294,7 +294,7 @@ Event_queue::update_event(THD *thd, LEX_STRING dbname, LEX_STRING name,
/* If not disabled event */ /* If not disabled event */
if (new_element) if (new_element)
{ {
DBUG_PRINT("info", ("new event in the Q 0x%lx", new_element)); DBUG_PRINT("info", ("new event in the queue: 0x%lx", (long) new_element));
queue_insert_safe(&queue, (byte *) new_element); queue_insert_safe(&queue, (byte *) new_element);
pthread_cond_broadcast(&COND_queue_state); pthread_cond_broadcast(&COND_queue_state);
} }
@ -322,7 +322,8 @@ void
Event_queue::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name) Event_queue::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name)
{ {
DBUG_ENTER("Event_queue::drop_event"); DBUG_ENTER("Event_queue::drop_event");
DBUG_PRINT("enter", ("thd=0x%lx db=%s name=%s", thd, dbname.str, name.str)); DBUG_PRINT("enter", ("thd: 0x%lx db :%s name: %s", (long) thd,
dbname.str, name.str));
LOCK_QUEUE_DATA(); LOCK_QUEUE_DATA();
find_n_remove_event(dbname, name); find_n_remove_event(dbname, name);
@ -484,7 +485,7 @@ Event_queue::load_events_from_db(THD *thd)
bool clean_the_queue= TRUE; bool clean_the_queue= TRUE;
DBUG_ENTER("Event_queue::load_events_from_db"); DBUG_ENTER("Event_queue::load_events_from_db");
DBUG_PRINT("enter", ("thd=0x%lx", thd)); DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd));
if ((ret= db_repository->open_event_table(thd, TL_READ, &table))) if ((ret= db_repository->open_event_table(thd, TL_READ, &table)))
{ {
@ -555,7 +556,6 @@ Event_queue::load_events_from_db(THD *thd)
goto end; goto end;
} }
DBUG_PRINT("load_events_from_db", ("Adding 0x%lx to the exec list."));
queue_insert_safe(&queue, (byte *) et); queue_insert_safe(&queue, (byte *) et);
count++; count++;
} }
@ -663,16 +663,20 @@ Event_queue::dbug_dump_queue(time_t now)
for (i = 0; i < queue.elements; i++) for (i = 0; i < queue.elements; i++)
{ {
et= ((Event_queue_element*)queue_element(&queue, i)); et= ((Event_queue_element*)queue_element(&queue, i));
DBUG_PRINT("info",("et=0x%lx db=%s name=%s",et, et->dbname.str, et->name.str)); DBUG_PRINT("info", ("et: 0x%lx name: %s.%s", (long) et,
DBUG_PRINT("info", ("exec_at=%llu starts=%llu ends=%llu execs_so_far=%u" et->dbname.str, et->name.str));
" expr=%lld et.exec_at=%d now=%d (et.exec_at - now)=%d if=%d", DBUG_PRINT("info", ("exec_at: %lu starts: %lu ends: %lu execs_so_far: %u "
TIME_to_ulonglong_datetime(&et->execute_at), "expr: %ld et.exec_at: %ld now: %ld "
TIME_to_ulonglong_datetime(&et->starts), "(et.exec_at - now): %d if: %d",
TIME_to_ulonglong_datetime(&et->ends), (long) TIME_to_ulonglong_datetime(&et->execute_at),
et->execution_count, (long) TIME_to_ulonglong_datetime(&et->starts),
et->expression, sec_since_epoch_TIME(&et->execute_at), now, (long) TIME_to_ulonglong_datetime(&et->ends),
(int)(sec_since_epoch_TIME(&et->execute_at) - now), et->execution_count,
sec_since_epoch_TIME(&et->execute_at) <= now)); (long) et->expression,
(long) (sec_since_epoch_TIME(&et->execute_at)),
(long) now,
(int) (sec_since_epoch_TIME(&et->execute_at) - now),
sec_since_epoch_TIME(&et->execute_at) <= now));
} }
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
#endif #endif
@ -812,11 +816,11 @@ end:
if (to_free) if (to_free)
delete top; delete top;
DBUG_PRINT("info", ("returning %d. et_new=0x%lx abstime.tv_sec=%d ", DBUG_PRINT("info", ("returning %d et_new: 0x%lx abstime.tv_sec: %ld ",
ret, *job_data, abstime? abstime->tv_sec:0)); ret, (long) *job_data, abstime ? abstime->tv_sec : 0));
if (*job_data) if (*job_data)
DBUG_PRINT("info", ("db=%s name=%s definer=%s", (*job_data)->dbname.str, DBUG_PRINT("info", ("db: %s name: %s definer=%s", (*job_data)->dbname.str,
(*job_data)->name.str, (*job_data)->definer.str)); (*job_data)->name.str, (*job_data)->definer.str));
DBUG_RETURN(ret); DBUG_RETURN(ret);

View File

@ -264,8 +264,9 @@ event_worker_thread(void *arg)
if (!post_init_event_thread(thd)) if (!post_init_event_thread(thd))
{ {
DBUG_PRINT("info", ("Baikonur, time is %d, BURAN reporting and operational." DBUG_PRINT("info", ("Baikonur, time is %ld, BURAN reporting and operational."
"THD=0x%lx", time(NULL), thd)); "THD: 0x%lx",
(long) time(NULL), (long) thd));
sql_print_information("SCHEDULER: [%s.%s of %s] executing in thread %lu. " sql_print_information("SCHEDULER: [%s.%s of %s] executing in thread %lu. "
"Execution %u", "Execution %u",
@ -378,7 +379,7 @@ Event_scheduler::start()
DBUG_ENTER("Event_scheduler::start"); DBUG_ENTER("Event_scheduler::start");
LOCK_DATA(); LOCK_DATA();
DBUG_PRINT("info", ("state before action %s", scheduler_states_names[state])); DBUG_PRINT("info", ("state before action %s", scheduler_states_names[state].str));
if (state > INITIALIZED) if (state > INITIALIZED)
goto end; goto end;
@ -400,7 +401,7 @@ Event_scheduler::start()
scheduler_thd= new_thd; scheduler_thd= new_thd;
DBUG_PRINT("info", ("Setting state go RUNNING")); DBUG_PRINT("info", ("Setting state go RUNNING"));
state= RUNNING; state= RUNNING;
DBUG_PRINT("info", ("Forking new thread for scheduduler. THD=0x%lx", new_thd)); DBUG_PRINT("info", ("Forking new thread for scheduduler. THD: 0x%lx", (long) new_thd));
if (pthread_create(&th, &connection_attrib, event_scheduler_thread, if (pthread_create(&th, &connection_attrib, event_scheduler_thread,
(void*)scheduler_param_value)) (void*)scheduler_param_value))
{ {
@ -463,7 +464,7 @@ Event_scheduler::run(THD *thd)
break; break;
} }
DBUG_PRINT("info", ("get_top returned job_data=0x%lx", job_data)); DBUG_PRINT("info", ("get_top returned job_data: 0x%lx", (long) job_data));
if (job_data) if (job_data)
{ {
if ((res= execute_top(thd, job_data))) if ((res= execute_top(thd, job_data)))
@ -522,11 +523,11 @@ Event_scheduler::execute_top(THD *thd, Event_job_data *job_data)
++started_events; ++started_events;
DBUG_PRINT("info", ("Launch succeeded. BURAN is in THD=0x%lx", new_thd)); DBUG_PRINT("info", ("Launch succeeded. BURAN is in THD: 0x%lx", (long) new_thd));
DBUG_RETURN(FALSE); DBUG_RETURN(FALSE);
error: error:
DBUG_PRINT("error", ("Baikonur, we have a problem! res=%d", res)); DBUG_PRINT("error", ("Baikonur, we have a problem! res: %d", res));
if (new_thd) if (new_thd)
{ {
new_thd->proc_info= "Clearing"; new_thd->proc_info= "Clearing";
@ -581,10 +582,10 @@ Event_scheduler::stop()
{ {
THD *thd= current_thd; THD *thd= current_thd;
DBUG_ENTER("Event_scheduler::stop"); DBUG_ENTER("Event_scheduler::stop");
DBUG_PRINT("enter", ("thd=0x%lx", current_thd)); DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd));
LOCK_DATA(); LOCK_DATA();
DBUG_PRINT("info", ("state before action %s", scheduler_states_names[state])); DBUG_PRINT("info", ("state before action %s", scheduler_states_names[state].str));
if (state != RUNNING) if (state != RUNNING)
goto end; goto end;
@ -605,7 +606,7 @@ Event_scheduler::stop()
*/ */
state= STOPPING; state= STOPPING;
DBUG_PRINT("info", ("Manager thread has id %d", scheduler_thd->thread_id)); DBUG_PRINT("info", ("Manager thread has id %lu", scheduler_thd->thread_id));
/* Lock from delete */ /* Lock from delete */
pthread_mutex_lock(&scheduler_thd->LOCK_delete); pthread_mutex_lock(&scheduler_thd->LOCK_delete);
/* This will wake up the thread if it waits on Queue's conditional */ /* This will wake up the thread if it waits on Queue's conditional */

View File

@ -858,7 +858,7 @@ Events::check_system_tables(THD *thd)
bool ret= FALSE; bool ret= FALSE;
DBUG_ENTER("Events::check_system_tables"); DBUG_ENTER("Events::check_system_tables");
DBUG_PRINT("enter", ("thd=0x%lx", thd)); DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd));
thd->reset_n_backup_open_tables_state(&backup); thd->reset_n_backup_open_tables_state(&backup);

View File

@ -8180,8 +8180,8 @@ Field_bit::do_last_null_byte() const
bits. On systems with CHAR_BIT > 8 (not very common), the storage bits. On systems with CHAR_BIT > 8 (not very common), the storage
will lose the extra bits. will lose the extra bits.
*/ */
DBUG_PRINT("debug", ("bit_ofs=%d, bit_len=%d, bit_ptr=%p", DBUG_PRINT("test", ("bit_ofs: %d, bit_len: %d bit_ptr: 0x%lx",
bit_ofs, bit_len, bit_ptr)); bit_ofs, bit_len, (long) bit_ptr));
uchar *result; uchar *result;
if (bit_len == 0) if (bit_len == 0)
result= null_ptr; result= null_ptr;

View File

@ -413,7 +413,8 @@ Thd_ndb::get_open_table(THD *thd, const void *key)
thd_ndb_share->stat.no_uncommitted_rows_count= 0; thd_ndb_share->stat.no_uncommitted_rows_count= 0;
thd_ndb_share->stat.records= ~(ha_rows)0; thd_ndb_share->stat.records= ~(ha_rows)0;
} }
DBUG_PRINT("exit", ("thd_ndb_share: 0x%x key: 0x%x", thd_ndb_share, key)); DBUG_PRINT("exit", ("thd_ndb_share: 0x%lx key: 0x%lx",
(long) thd_ndb_share, (long) key));
DBUG_RETURN(thd_ndb_share); DBUG_RETURN(thd_ndb_share);
} }
@ -761,8 +762,8 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field,
blob_ptr= (char*)""; blob_ptr= (char*)"";
} }
DBUG_PRINT("value", ("set blob ptr=%p len=%u", DBUG_PRINT("value", ("set blob ptr: 0x%lx len: %u",
blob_ptr, blob_len)); (long) blob_ptr, blob_len));
DBUG_DUMP("value", (char*)blob_ptr, min(blob_len, 26)); DBUG_DUMP("value", (char*)blob_ptr, min(blob_len, 26));
if (set_blob_value) if (set_blob_value)
@ -847,8 +848,8 @@ int get_ndb_blobs_value(TABLE* table, NdbValue* value_array,
uint32 len= 0xffffffff; // Max uint32 uint32 len= 0xffffffff; // Max uint32
if (ndb_blob->readData(buf, len) != 0) if (ndb_blob->readData(buf, len) != 0)
ERR_RETURN(ndb_blob->getNdbError()); ERR_RETURN(ndb_blob->getNdbError());
DBUG_PRINT("info", ("[%u] offset=%u buf=%p len=%u [ptrdiff=%d]", DBUG_PRINT("info", ("[%u] offset: %u buf: 0x%lx len=%u [ptrdiff=%d]",
i, offset, buf, len, (int)ptrdiff)); i, offset, (long) buf, len, (int)ptrdiff));
DBUG_ASSERT(len == len64); DBUG_ASSERT(len == len64);
// Ugly hack assumes only ptr needs to be changed // Ugly hack assumes only ptr needs to be changed
field_blob->ptr+= ptrdiff; field_blob->ptr+= ptrdiff;
@ -1171,8 +1172,8 @@ int ha_ndbcluster::add_index_handle(THD *thd, NDBDICT *dict, KEY *key_info,
index= dict->getIndexGlobal(index_name, *m_table); index= dict->getIndexGlobal(index_name, *m_table);
if (!index) if (!index)
ERR_RETURN(dict->getNdbError()); ERR_RETURN(dict->getNdbError());
DBUG_PRINT("info", ("index: 0x%x id: %d version: %d.%d status: %d", DBUG_PRINT("info", ("index: 0x%lx id: %d version: %d.%d status: %d",
index, (long) index,
index->getObjectId(), index->getObjectId(),
index->getObjectVersion() & 0xFFFFFF, index->getObjectVersion() & 0xFFFFFF,
index->getObjectVersion() >> 24, index->getObjectVersion() >> 24,
@ -1215,8 +1216,8 @@ int ha_ndbcluster::add_index_handle(THD *thd, NDBDICT *dict, KEY *key_info,
index= dict->getIndexGlobal(unique_index_name, *m_table); index= dict->getIndexGlobal(unique_index_name, *m_table);
if (!index) if (!index)
ERR_RETURN(dict->getNdbError()); ERR_RETURN(dict->getNdbError());
DBUG_PRINT("info", ("index: 0x%x id: %d version: %d.%d status: %d", DBUG_PRINT("info", ("index: 0x%lx id: %d version: %d.%d status: %d",
index, (long) index,
index->getObjectId(), index->getObjectId(),
index->getObjectVersion() & 0xFFFFFF, index->getObjectVersion() & 0xFFFFFF,
index->getObjectVersion() >> 24, index->getObjectVersion() >> 24,
@ -2305,7 +2306,7 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
// Set bound if not done with this key // Set bound if not done with this key
if (p.key != NULL) if (p.key != NULL)
{ {
DBUG_PRINT("info", ("key %d:%d offset=%d length=%d last=%d bound=%d", DBUG_PRINT("info", ("key %d:%d offset: %d length: %d last: %d bound: %d",
j, i, tot_len, part_len, p.part_last, p.bound_type)); j, i, tot_len, part_len, p.part_last, p.bound_type));
DBUG_DUMP("info", (const char*)p.part_ptr, part_store_len); DBUG_DUMP("info", (const char*)p.part_ptr, part_store_len);
@ -2462,7 +2463,7 @@ int ha_ndbcluster::full_table_scan(byte *buf)
part_spec.start_part= 0; part_spec.start_part= 0;
part_spec.end_part= m_part_info->get_tot_partitions() - 1; part_spec.end_part= m_part_info->get_tot_partitions() - 1;
prune_partition_set(table, &part_spec); prune_partition_set(table, &part_spec);
DBUG_PRINT("info", ("part_spec.start_part = %u, part_spec.end_part = %u", DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u",
part_spec.start_part, part_spec.end_part)); part_spec.start_part, part_spec.end_part));
/* /*
If partition pruning has found no partition in set If partition pruning has found no partition in set
@ -2658,7 +2659,7 @@ int ha_ndbcluster::write_row(byte *record)
{ {
// Send rows to NDB // Send rows to NDB
DBUG_PRINT("info", ("Sending inserts to NDB, "\ DBUG_PRINT("info", ("Sending inserts to NDB, "\
"rows_inserted:%d, bulk_insert_rows: %d", "rows_inserted: %d bulk_insert_rows: %d",
(int)m_rows_inserted, (int)m_bulk_insert_rows)); (int)m_rows_inserted, (int)m_bulk_insert_rows));
m_bulk_insert_not_flushed= FALSE; m_bulk_insert_not_flushed= FALSE;
@ -3108,7 +3109,8 @@ void ndb_unpack_record(TABLE *table, NdbValue *value,
char* ptr; char* ptr;
field_blob->get_ptr(&ptr, row_offset); field_blob->get_ptr(&ptr, row_offset);
uint32 len= field_blob->get_length(row_offset); uint32 len= field_blob->get_length(row_offset);
DBUG_PRINT("info",("[%u] SET ptr=%p len=%u", col_no, ptr, len)); DBUG_PRINT("info",("[%u] SET ptr: 0x%lx len: %u",
col_no, (long) ptr, len));
#endif #endif
} }
} }
@ -3350,7 +3352,7 @@ int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key,
if (m_use_partition_function) if (m_use_partition_function)
{ {
get_partition_set(table, buf, active_index, start_key, &part_spec); get_partition_set(table, buf, active_index, start_key, &part_spec);
DBUG_PRINT("info", ("part_spec.start_part = %u, part_spec.end_part = %u", DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u",
part_spec.start_part, part_spec.end_part)); part_spec.start_part, part_spec.end_part));
/* /*
If partition pruning has found no partition in set If partition pruning has found no partition in set
@ -3876,7 +3878,7 @@ int ha_ndbcluster::end_bulk_insert()
NdbTransaction *trans= m_active_trans; NdbTransaction *trans= m_active_trans;
// Send rows to NDB // Send rows to NDB
DBUG_PRINT("info", ("Sending inserts to NDB, "\ DBUG_PRINT("info", ("Sending inserts to NDB, "\
"rows_inserted:%d, bulk_insert_rows: %d", "rows_inserted: %d bulk_insert_rows: %d",
(int) m_rows_inserted, (int) m_bulk_insert_rows)); (int) m_rows_inserted, (int) m_bulk_insert_rows));
m_bulk_insert_not_flushed= FALSE; m_bulk_insert_not_flushed= FALSE;
if (m_transaction_on) if (m_transaction_on)
@ -5101,13 +5103,12 @@ void ha_ndbcluster::prepare_for_alter()
int ha_ndbcluster::add_index(TABLE *table_arg, int ha_ndbcluster::add_index(TABLE *table_arg,
KEY *key_info, uint num_of_keys) KEY *key_info, uint num_of_keys)
{ {
DBUG_ENTER("ha_ndbcluster::add_index");
DBUG_PRINT("info", ("ha_ndbcluster::add_index to table %s",
table_arg->s->table_name));
int error= 0; int error= 0;
uint idx; uint idx;
DBUG_ENTER("ha_ndbcluster::add_index");
DBUG_PRINT("enter", ("table %s", table_arg->s->table_name.str));
DBUG_ASSERT(m_share->state == NSS_ALTERED); DBUG_ASSERT(m_share->state == NSS_ALTERED);
for (idx= 0; idx < num_of_keys; idx++) for (idx= 0; idx < num_of_keys; idx++)
{ {
KEY *key= key_info + idx; KEY *key= key_info + idx;
@ -6662,7 +6663,7 @@ static int ndbcluster_end(handlerton *hton, ha_panic_function type)
void ha_ndbcluster::print_error(int error, myf errflag) void ha_ndbcluster::print_error(int error, myf errflag)
{ {
DBUG_ENTER("ha_ndbcluster::print_error"); DBUG_ENTER("ha_ndbcluster::print_error");
DBUG_PRINT("enter", ("error = %d", error)); DBUG_PRINT("enter", ("error: %d", error));
if (error == HA_ERR_NO_PARTITION_FOUND) if (error == HA_ERR_NO_PARTITION_FOUND)
m_part_info->print_no_partition_found(table); m_part_info->print_no_partition_found(table);
@ -7168,16 +7169,16 @@ static void dbug_print_open_tables()
for (uint i= 0; i < ndbcluster_open_tables.records; i++) for (uint i= 0; i < ndbcluster_open_tables.records; i++)
{ {
NDB_SHARE *share= (NDB_SHARE*) hash_element(&ndbcluster_open_tables, i); NDB_SHARE *share= (NDB_SHARE*) hash_element(&ndbcluster_open_tables, i);
DBUG_PRINT("share", DBUG_PRINT("loop",
("[%d] 0x%lx key: %s key_length: %d", ("[%d] 0x%lx key: %s key_length: %d",
i, share, share->key, share->key_length)); i, (long) share, share->key, share->key_length));
DBUG_PRINT("share", DBUG_PRINT("loop",
("db.tablename: %s.%s use_count: %d commit_count: %d", ("db.tablename: %s.%s use_count: %d commit_count: %lu",
share->db, share->table_name, share->db, share->table_name,
share->use_count, share->commit_count)); share->use_count, (ulong) share->commit_count));
#ifdef HAVE_NDB_BINLOG #ifdef HAVE_NDB_BINLOG
if (share->table) if (share->table)
DBUG_PRINT("share", DBUG_PRINT("loop",
("table->s->db.table_name: %s.%s", ("table->s->db.table_name: %s.%s",
share->table->s->db.str, share->table->s->table_name.str)); share->table->s->db.str, share->table->s->table_name.str));
#endif #endif
@ -7330,13 +7331,13 @@ static int rename_share(NDB_SHARE *share, const char *new_key)
share->table_name= share->db + strlen(share->db) + 1; share->table_name= share->db + strlen(share->db) + 1;
ha_ndbcluster::set_tabname(new_key, share->table_name); ha_ndbcluster::set_tabname(new_key, share->table_name);
DBUG_PRINT("rename_share", DBUG_PRINT("info",
("0x%lx key: %s key_length: %d", ("share: 0x%lx key: %s key_length: %d",
share, share->key, share->key_length)); (long) share, share->key, share->key_length));
DBUG_PRINT("rename_share", DBUG_PRINT("info",
("db.tablename: %s.%s use_count: %d commit_count: %d", ("db.tablename: %s.%s use_count: %d commit_count: %lu",
share->db, share->table_name, share->db, share->table_name,
share->use_count, share->commit_count)); share->use_count, (ulong) share->commit_count));
if (share->table) if (share->table)
{ {
DBUG_PRINT("rename_share", DBUG_PRINT("rename_share",
@ -7371,13 +7372,13 @@ NDB_SHARE *ndbcluster_get_share(NDB_SHARE *share)
dbug_print_open_tables(); dbug_print_open_tables();
DBUG_PRINT("get_share", DBUG_PRINT("info",
("0x%lx key: %s key_length: %d", ("share: 0x%lx key: %s key_length: %d",
share, share->key, share->key_length)); (long) share, share->key, share->key_length));
DBUG_PRINT("get_share", DBUG_PRINT("info",
("db.tablename: %s.%s use_count: %d commit_count: %d", ("db.tablename: %s.%s use_count: %d commit_count: %lu",
share->db, share->table_name, share->db, share->table_name,
share->use_count, share->commit_count)); share->use_count, (ulong) share->commit_count));
pthread_mutex_unlock(&ndbcluster_mutex); pthread_mutex_unlock(&ndbcluster_mutex);
return share; return share;
} }
@ -7485,13 +7486,12 @@ NDB_SHARE *ndbcluster_get_share(const char *key, TABLE *table,
void ndbcluster_real_free_share(NDB_SHARE **share) void ndbcluster_real_free_share(NDB_SHARE **share)
{ {
DBUG_ENTER("ndbcluster_real_free_share"); DBUG_ENTER("ndbcluster_real_free_share");
DBUG_PRINT("real_free_share", DBUG_PRINT("enter",
("0x%lx key: %s key_length: %d", ("share: 0x%lx key: %s key_length: %d "
(*share), (*share)->key, (*share)->key_length)); "db.tablename: %s.%s use_count: %d commit_count: %lu",
DBUG_PRINT("real_free_share", (long) (*share), (*share)->key, (*share)->key_length,
("db.tablename: %s.%s use_count: %d commit_count: %d",
(*share)->db, (*share)->table_name, (*share)->db, (*share)->table_name,
(*share)->use_count, (*share)->commit_count)); (*share)->use_count, (ulong) (*share)->commit_count));
hash_delete(&ndbcluster_open_tables, (byte*) *share); hash_delete(&ndbcluster_open_tables, (byte*) *share);
thr_lock_delete(&(*share)->lock); thr_lock_delete(&(*share)->lock);
@ -7539,13 +7539,13 @@ void ndbcluster_free_share(NDB_SHARE **share, bool have_lock)
else else
{ {
dbug_print_open_tables(); dbug_print_open_tables();
DBUG_PRINT("free_share", DBUG_PRINT("info",
("0x%lx key: %s key_length: %d", ("share: 0x%lx key: %s key_length: %d",
*share, (*share)->key, (*share)->key_length)); (long) *share, (*share)->key, (*share)->key_length));
DBUG_PRINT("free_share", DBUG_PRINT("info",
("db.tablename: %s.%s use_count: %d commit_count: %d", ("db.tablename: %s.%s use_count: %d commit_count: %lu",
(*share)->db, (*share)->table_name, (*share)->db, (*share)->table_name,
(*share)->use_count, (*share)->commit_count)); (*share)->use_count, (ulong) (*share)->commit_count));
} }
if (!have_lock) if (!have_lock)
pthread_mutex_unlock(&ndbcluster_mutex); pthread_mutex_unlock(&ndbcluster_mutex);
@ -7815,7 +7815,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
get_partition_set(table, curr, active_index, get_partition_set(table, curr, active_index,
&multi_range_curr->start_key, &multi_range_curr->start_key,
&part_spec); &part_spec);
DBUG_PRINT("info", ("part_spec.start_part = %u, part_spec.end_part = %u", DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u",
part_spec.start_part, part_spec.end_part)); part_spec.start_part, part_spec.end_part));
/* /*
If partition pruning has found no partition in set If partition pruning has found no partition in set
@ -8347,8 +8347,8 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
ndb_get_table_statistics(NULL, false, ndb, ndbtab_g.get_table(), &stat) == 0) ndb_get_table_statistics(NULL, false, ndb, ndbtab_g.get_table(), &stat) == 0)
{ {
char buff[22], buff2[22]; char buff[22], buff2[22];
DBUG_PRINT("ndb_util_thread", DBUG_PRINT("info",
("Table: %s, commit_count: %llu, rows: %llu", ("Table: %s commit_count: %s rows: %s",
share->key, share->key,
llstr(stat.commit_count, buff), llstr(stat.commit_count, buff),
llstr(stat.row_count, buff2))); llstr(stat.row_count, buff2)));

View File

@ -161,16 +161,16 @@ static void dbug_print_table(const char *info, TABLE *table)
} }
DBUG_PRINT("info", DBUG_PRINT("info",
("%s: %s.%s s->fields: %d " ("%s: %s.%s s->fields: %d "
"reclength: %d rec_buff_length: %d record[0]: %lx " "reclength: %lu rec_buff_length: %u record[0]: 0x%lx "
"record[1]: %lx", "record[1]: 0x%lx",
info, info,
table->s->db.str, table->s->db.str,
table->s->table_name.str, table->s->table_name.str,
table->s->fields, table->s->fields,
table->s->reclength, table->s->reclength,
table->s->rec_buff_length, table->s->rec_buff_length,
table->record[0], (long) table->record[0],
table->record[1])); (long) table->record[1]));
for (unsigned int i= 0; i < table->s->fields; i++) for (unsigned int i= 0; i < table->s->fields; i++)
{ {
@ -180,7 +180,7 @@ static void dbug_print_table(const char *info, TABLE *table)
"ptr: 0x%lx[+%d] null_bit: %u null_ptr: 0x%lx[+%d]", "ptr: 0x%lx[+%d] null_bit: %u null_ptr: 0x%lx[+%d]",
i, i,
f->field_name, f->field_name,
f->flags, (long) f->flags,
(f->flags & PRI_KEY_FLAG) ? "pri" : "attr", (f->flags & PRI_KEY_FLAG) ? "pri" : "attr",
(f->flags & NOT_NULL_FLAG) ? "" : ",nullable", (f->flags & NOT_NULL_FLAG) ? "" : ",nullable",
(f->flags & UNSIGNED_FLAG) ? ",unsigned" : ",signed", (f->flags & UNSIGNED_FLAG) ? ",unsigned" : ",signed",
@ -189,16 +189,18 @@ static void dbug_print_table(const char *info, TABLE *table)
(f->flags & BINARY_FLAG) ? ",binary" : "", (f->flags & BINARY_FLAG) ? ",binary" : "",
f->real_type(), f->real_type(),
f->pack_length(), f->pack_length(),
f->ptr, f->ptr - table->record[0], (long) f->ptr, (int) (f->ptr - table->record[0]),
f->null_bit, f->null_bit,
f->null_ptr, (byte*) f->null_ptr - table->record[0])); (long) f->null_ptr,
(int) ((byte*) f->null_ptr - table->record[0])));
if (f->type() == MYSQL_TYPE_BIT) if (f->type() == MYSQL_TYPE_BIT)
{ {
Field_bit *g= (Field_bit*) f; Field_bit *g= (Field_bit*) f;
DBUG_PRINT("MYSQL_TYPE_BIT",("field_length: %d bit_ptr: 0x%lx[+%d] " DBUG_PRINT("MYSQL_TYPE_BIT",("field_length: %d bit_ptr: 0x%lx[+%d] "
"bit_ofs: %u bit_len: %u", "bit_ofs: %d bit_len: %u",
g->field_length, g->bit_ptr, g->field_length, (long) g->bit_ptr,
(byte*) g->bit_ptr-table->record[0], (int) ((byte*) g->bit_ptr -
table->record[0]),
g->bit_ofs, g->bit_len)); g->bit_ofs, g->bit_len));
} }
} }
@ -605,11 +607,11 @@ static int ndbcluster_binlog_end(THD *thd)
{ {
DBUG_PRINT("share", DBUG_PRINT("share",
("[%d] 0x%lx key: %s key_length: %d", ("[%d] 0x%lx key: %s key_length: %d",
i, share, share->key, share->key_length)); i, (long) share, share->key, share->key_length));
DBUG_PRINT("share", DBUG_PRINT("share",
("db.tablename: %s.%s use_count: %d commit_count: %d", ("db.tablename: %s.%s use_count: %d commit_count: %lu",
share->db, share->table_name, share->db, share->table_name,
share->use_count, share->commit_count)); share->use_count, (long) share->commit_count));
} }
} }
pthread_mutex_unlock(&ndbcluster_mutex); pthread_mutex_unlock(&ndbcluster_mutex);
@ -685,8 +687,8 @@ static NDB_SHARE *ndbcluster_check_apply_status_share()
void *share= hash_search(&ndbcluster_open_tables, void *share= hash_search(&ndbcluster_open_tables,
NDB_APPLY_TABLE_FILE, NDB_APPLY_TABLE_FILE,
sizeof(NDB_APPLY_TABLE_FILE) - 1); sizeof(NDB_APPLY_TABLE_FILE) - 1);
DBUG_PRINT("info",("ndbcluster_check_apply_status_share %s %p", DBUG_PRINT("info",("ndbcluster_check_apply_status_share %s 0x%lx",
NDB_APPLY_TABLE_FILE, share)); NDB_APPLY_TABLE_FILE, (long) share));
pthread_mutex_unlock(&ndbcluster_mutex); pthread_mutex_unlock(&ndbcluster_mutex);
return (NDB_SHARE*) share; return (NDB_SHARE*) share;
} }
@ -703,8 +705,8 @@ static NDB_SHARE *ndbcluster_check_schema_share()
void *share= hash_search(&ndbcluster_open_tables, void *share= hash_search(&ndbcluster_open_tables,
NDB_SCHEMA_TABLE_FILE, NDB_SCHEMA_TABLE_FILE,
sizeof(NDB_SCHEMA_TABLE_FILE) - 1); sizeof(NDB_SCHEMA_TABLE_FILE) - 1);
DBUG_PRINT("info",("ndbcluster_check_schema_share %s %p", DBUG_PRINT("info",("ndbcluster_check_schema_share %s 0x%lx",
NDB_SCHEMA_TABLE_FILE, share)); NDB_SCHEMA_TABLE_FILE, (long) share));
pthread_mutex_unlock(&ndbcluster_mutex); pthread_mutex_unlock(&ndbcluster_mutex);
return (NDB_SHARE*) share; return (NDB_SHARE*) share;
} }
@ -2721,10 +2723,9 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
if (share->flags & NSF_BLOB_FLAG) if (share->flags & NSF_BLOB_FLAG)
op->mergeEvents(TRUE); // currently not inherited from event op->mergeEvents(TRUE); // currently not inherited from event
DBUG_PRINT("info", ("share->ndb_value[0]: 0x%x", DBUG_PRINT("info", ("share->ndb_value[0]: 0x%lx share->ndb_value[1]: 0x%lx",
share->ndb_value[0])); (long) share->ndb_value[0],
DBUG_PRINT("info", ("share->ndb_value[1]: 0x%x", (long) share->ndb_value[1]));
share->ndb_value[1]));
int n_columns= ndbtab->getNoOfColumns(); int n_columns= ndbtab->getNoOfColumns();
int n_fields= table ? table->s->fields : 0; // XXX ??? int n_fields= table ? table->s->fields : 0; // XXX ???
for (int j= 0; j < n_columns; j++) for (int j= 0; j < n_columns; j++)
@ -2778,12 +2779,14 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
} }
share->ndb_value[0][j].ptr= attr0.ptr; share->ndb_value[0][j].ptr= attr0.ptr;
share->ndb_value[1][j].ptr= attr1.ptr; share->ndb_value[1][j].ptr= attr1.ptr;
DBUG_PRINT("info", ("&share->ndb_value[0][%d]: 0x%x " DBUG_PRINT("info", ("&share->ndb_value[0][%d]: 0x%lx "
"share->ndb_value[0][%d]: 0x%x", "share->ndb_value[0][%d]: 0x%lx",
j, &share->ndb_value[0][j], j, attr0.ptr)); j, (long) &share->ndb_value[0][j],
DBUG_PRINT("info", ("&share->ndb_value[1][%d]: 0x%x " j, (long) attr0.ptr));
"share->ndb_value[1][%d]: 0x%x", DBUG_PRINT("info", ("&share->ndb_value[1][%d]: 0x%lx "
j, &share->ndb_value[0][j], j, attr1.ptr)); "share->ndb_value[1][%d]: 0x%lx",
j, (long) &share->ndb_value[0][j],
j, (long) attr1.ptr));
} }
op->setCustomData((void *) share); // set before execute op->setCustomData((void *) share); // set before execute
share->op= op; // assign op in NDB_SHARE share->op= op; // assign op in NDB_SHARE
@ -2826,8 +2829,8 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
(void) pthread_cond_signal(&injector_cond); (void) pthread_cond_signal(&injector_cond);
} }
DBUG_PRINT("info",("%s share->op: 0x%lx, share->use_count: %u", DBUG_PRINT("info",("%s share->op: 0x%lx share->use_count: %u",
share->key, share->op, share->use_count)); share->key, (long) share->op, share->use_count));
if (ndb_extra_logging) if (ndb_extra_logging)
sql_print_information("NDB Binlog: logging %s", share->key); sql_print_information("NDB Binlog: logging %s", share->key);
@ -3012,10 +3015,11 @@ ndb_binlog_thread_handle_non_data_event(THD *thd, Ndb *ndb,
free_share(&apply_status_share); free_share(&apply_status_share);
apply_status_share= 0; apply_status_share= 0;
} }
DBUG_PRINT("info", ("CLUSTER FAILURE EVENT: " DBUG_PRINT("error", ("CLUSTER FAILURE EVENT: "
"%s received share: 0x%lx op: %lx share op: %lx " "%s received share: 0x%lx op: 0x%lx share op: 0x%lx "
"op_old: %lx", "op_old: 0x%lx",
share->key, share, pOp, share->op, share->op_old)); share->key, (long) share, (long) pOp,
(long) share->op, (long) share->op_old));
break; break;
case NDBEVENT::TE_DROP: case NDBEVENT::TE_DROP:
if (apply_status_share == share) if (apply_status_share == share)
@ -3033,10 +3037,11 @@ ndb_binlog_thread_handle_non_data_event(THD *thd, Ndb *ndb,
// fall through // fall through
case NDBEVENT::TE_ALTER: case NDBEVENT::TE_ALTER:
row.n_schemaops++; row.n_schemaops++;
DBUG_PRINT("info", ("TABLE %s EVENT: %s received share: 0x%lx op: %lx " DBUG_PRINT("info", ("TABLE %s EVENT: %s received share: 0x%lx op: 0x%lx "
"share op: %lx op_old: %lx", "share op: 0x%lx op_old: 0x%lx",
type == NDBEVENT::TE_DROP ? "DROP" : "ALTER", type == NDBEVENT::TE_DROP ? "DROP" : "ALTER",
share->key, share, pOp, share->op, share->op_old)); share->key, (long) share, (long) pOp,
(long) share->op, (long) share->op_old));
break; break;
case NDBEVENT::TE_NODE_FAILURE: case NDBEVENT::TE_NODE_FAILURE:
/* fall through */ /* fall through */
@ -3513,7 +3518,8 @@ restart:
} }
} }
// now check that we have epochs consistant with what we had before the restart // now check that we have epochs consistant with what we had before the restart
DBUG_PRINT("info", ("schema_res: %d schema_gci: %d", schema_res, schema_gci)); DBUG_PRINT("info", ("schema_res: %d schema_gci: %lu", schema_res,
(long) schema_gci));
{ {
i_ndb->flushIncompleteEvents(schema_gci); i_ndb->flushIncompleteEvents(schema_gci);
s_ndb->flushIncompleteEvents(schema_gci); s_ndb->flushIncompleteEvents(schema_gci);
@ -3697,8 +3703,8 @@ restart:
!= NULL) != NULL)
{ {
NDB_SHARE *share= (NDB_SHARE*)gci_op->getCustomData(); NDB_SHARE *share= (NDB_SHARE*)gci_op->getCustomData();
DBUG_PRINT("info", ("per gci_op: %p share: %p event_types: 0x%x", DBUG_PRINT("info", ("per gci_op: 0x%lx share: 0x%lx event_types: 0x%x",
gci_op, share, event_types)); (long) gci_op, (long) share, event_types));
// workaround for interface returning TE_STOP events // workaround for interface returning TE_STOP events
// which are normally filtered out below in the nextEvent loop // which are normally filtered out below in the nextEvent loop
if ((event_types & ~NdbDictionary::Event::TE_STOP) == 0) if ((event_types & ~NdbDictionary::Event::TE_STOP) == 0)
@ -3784,11 +3790,13 @@ restart:
{ {
NDB_SHARE *share= (NDB_SHARE*) pOp->getCustomData(); NDB_SHARE *share= (NDB_SHARE*) pOp->getCustomData();
DBUG_PRINT("info", DBUG_PRINT("info",
("EVENT TYPE: %d GCI: %lld last applied: %lld " ("EVENT TYPE: %d GCI: %ld last applied: %ld "
"share: 0x%lx (%s.%s)", pOp->getEventType(), gci, "share: 0x%lx (%s.%s)", pOp->getEventType(),
ndb_latest_applied_binlog_epoch, share, (long) gci,
share ? share->db : "share == NULL", (long) ndb_latest_applied_binlog_epoch,
share ? share->table_name : "")); (long) share,
share ? share->db : "'NULL'",
share ? share->table_name : "'NULL'"));
DBUG_ASSERT(share != 0); DBUG_ASSERT(share != 0);
} }
// assert that there is consistancy between gci op list // assert that there is consistancy between gci op list

View File

@ -2027,7 +2027,7 @@ bool ha_partition::create_handlers(MEM_ROOT *mem_root)
if (!(m_file[i]= get_new_handler(table_share, mem_root, if (!(m_file[i]= get_new_handler(table_share, mem_root,
m_engine_array[i]))) m_engine_array[i])))
DBUG_RETURN(TRUE); DBUG_RETURN(TRUE);
DBUG_PRINT("info", ("engine_type: %u", m_engine_array[i])); DBUG_PRINT("info", ("engine_type: %u", m_engine_array[i]->db_type));
} }
/* For the moment we only support partition over the same table engine */ /* For the moment we only support partition over the same table engine */
if (m_engine_array[0] == myisam_hton) if (m_engine_array[0] == myisam_hton)
@ -2939,8 +2939,8 @@ int ha_partition::rnd_init(bool scan)
include_partition_fields_in_used_fields(); include_partition_fields_in_used_fields();
/* Now we see what the index of our first important partition is */ /* Now we see what the index of our first important partition is */
DBUG_PRINT("info", ("m_part_info->used_partitions 0x%x", DBUG_PRINT("info", ("m_part_info->used_partitions: 0x%lx",
m_part_info->used_partitions.bitmap)); (long) m_part_info->used_partitions.bitmap));
part_id= bitmap_get_first_set(&(m_part_info->used_partitions)); part_id= bitmap_get_first_set(&(m_part_info->used_partitions));
DBUG_PRINT("info", ("m_part_spec.start_part %d", part_id)); DBUG_PRINT("info", ("m_part_spec.start_part %d", part_id));

View File

@ -1513,7 +1513,7 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
DBUG_ENTER("handler::ha_open"); DBUG_ENTER("handler::ha_open");
DBUG_PRINT("enter", DBUG_PRINT("enter",
("name: %s db_type: %d db_stat: %d mode: %d lock_test: %d", ("name: %s db_type: %d db_stat: %d mode: %d lock_test: %d",
name, table_share->db_type, table_arg->db_stat, mode, name, ht->db_type, table_arg->db_stat, mode,
test_if_locked)); test_if_locked));
table= table_arg; table= table_arg;
@ -1927,8 +1927,8 @@ int handler::update_auto_increment()
void handler::column_bitmaps_signal() void handler::column_bitmaps_signal()
{ {
DBUG_ENTER("column_bitmaps_signal"); DBUG_ENTER("column_bitmaps_signal");
DBUG_PRINT("info", ("read_set: 0x%lx write_set: 0x%lx", table->read_set, DBUG_PRINT("info", ("read_set: 0x%lx write_set: 0x%lx", (long) table->read_set,
table->write_set)); (long) table->write_set));
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
} }
@ -3507,8 +3507,10 @@ namespace
int write_locked_table_maps(THD *thd) int write_locked_table_maps(THD *thd)
{ {
DBUG_ENTER("write_locked_table_maps"); DBUG_ENTER("write_locked_table_maps");
DBUG_PRINT("enter", ("thd=%p, thd->lock=%p, thd->locked_tables=%p, thd->extra_lock", DBUG_PRINT("enter", ("thd: 0x%lx thd->lock: 0x%lx thd->locked_tables: 0x%lx "
thd, thd->lock, thd->locked_tables, thd->extra_lock)); "thd->extra_lock: 0x%lx",
(long) thd, (long) thd->lock,
(long) thd->locked_tables, (long) thd->extra_lock));
if (thd->get_binlog_table_maps() == 0) if (thd->get_binlog_table_maps() == 0)
{ {
@ -3528,7 +3530,7 @@ namespace
++table_ptr) ++table_ptr)
{ {
TABLE *const table= *table_ptr; TABLE *const table= *table_ptr;
DBUG_PRINT("info", ("Checking table %s", table->s->table_name)); DBUG_PRINT("info", ("Checking table %s", table->s->table_name.str));
if (table->current_lock == F_WRLCK && if (table->current_lock == F_WRLCK &&
check_table_binlog_row_based(thd, table)) check_table_binlog_row_based(thd, table))
{ {

View File

@ -3061,7 +3061,7 @@ longlong Item_is_not_null_test::val_int()
if (!used_tables_cache) if (!used_tables_cache)
{ {
owner->was_null|= (!cached_value); owner->was_null|= (!cached_value);
DBUG_PRINT("info", ("cached :%ld", (long) cached_value)); DBUG_PRINT("info", ("cached: %ld", (long) cached_value));
DBUG_RETURN(cached_value); DBUG_RETURN(cached_value);
} }
if (args[0]->is_null()) if (args[0]->is_null())

View File

@ -5044,7 +5044,7 @@ Item_func_sp::result_type() const
{ {
Field *field; Field *field;
DBUG_ENTER("Item_func_sp::result_type"); DBUG_ENTER("Item_func_sp::result_type");
DBUG_PRINT("info", ("m_sp = %p", m_sp)); DBUG_PRINT("info", ("m_sp: 0x%lx", (long) m_sp));
if (result_field) if (result_field)
DBUG_RETURN(result_field->result_type()); DBUG_RETURN(result_field->result_type());

View File

@ -1344,7 +1344,7 @@ binlog_trans_log_savepos(THD *thd, my_off_t *pos)
(binlog_trx_data*) thd->ha_data[binlog_hton->slot]; (binlog_trx_data*) thd->ha_data[binlog_hton->slot];
DBUG_ASSERT(mysql_bin_log.is_open()); DBUG_ASSERT(mysql_bin_log.is_open());
*pos= trx_data->position(); *pos= trx_data->position();
DBUG_PRINT("return", ("*pos=%u", *pos)); DBUG_PRINT("return", ("*pos: %lu", (ulong) *pos));
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
} }
@ -1368,7 +1368,7 @@ static void
binlog_trans_log_truncate(THD *thd, my_off_t pos) binlog_trans_log_truncate(THD *thd, my_off_t pos)
{ {
DBUG_ENTER("binlog_trans_log_truncate"); DBUG_ENTER("binlog_trans_log_truncate");
DBUG_PRINT("enter", ("pos=%u", pos)); DBUG_PRINT("enter", ("pos: %lu", (ulong) pos));
DBUG_ASSERT(thd->ha_data[binlog_hton->slot] != NULL); DBUG_ASSERT(thd->ha_data[binlog_hton->slot] != NULL);
/* Only true if binlog_trans_log_savepos() wasn't called before */ /* Only true if binlog_trans_log_savepos() wasn't called before */
@ -1444,8 +1444,8 @@ binlog_end_trans(THD *thd, binlog_trx_data *trx_data,
DBUG_ENTER("binlog_end_trans"); DBUG_ENTER("binlog_end_trans");
int error=0; int error=0;
IO_CACHE *trans_log= &trx_data->trans_log; IO_CACHE *trans_log= &trx_data->trans_log;
DBUG_PRINT("enter", ("transaction: %s, end_ev=%p", DBUG_PRINT("enter", ("transaction: %s end_ev: 0x%lx",
all ? "all" : "stmt", end_ev)); all ? "all" : "stmt", (long) end_ev));
DBUG_PRINT("info", ("thd->options={ %s%s}", DBUG_PRINT("info", ("thd->options={ %s%s}",
FLAGSTR(thd->options, OPTION_NOT_AUTOCOMMIT), FLAGSTR(thd->options, OPTION_NOT_AUTOCOMMIT),
FLAGSTR(thd->options, OPTION_BEGIN))); FLAGSTR(thd->options, OPTION_BEGIN)));
@ -3417,12 +3417,13 @@ int THD::binlog_setup_trx_data()
void void
THD::binlog_start_trans_and_stmt() THD::binlog_start_trans_and_stmt()
{ {
DBUG_ENTER("binlog_start_trans_and_stmt");
binlog_trx_data *trx_data= (binlog_trx_data*) ha_data[binlog_hton->slot]; binlog_trx_data *trx_data= (binlog_trx_data*) ha_data[binlog_hton->slot];
DBUG_PRINT("enter", ("trx_data=0x%lu", trx_data)); DBUG_ENTER("binlog_start_trans_and_stmt");
if (trx_data) DBUG_PRINT("enter", ("trx_data: 0x%lx trx_data->before_stmt_pos: %lu",
DBUG_PRINT("enter", ("trx_data->before_stmt_pos=%u", (long) trx_data,
trx_data->before_stmt_pos)); (trx_data ? (ulong) trx_data->before_stmt_pos :
(ulong) 0)));
if (trx_data == NULL || if (trx_data == NULL ||
trx_data->before_stmt_pos == MY_OFF_T_UNDEF) trx_data->before_stmt_pos == MY_OFF_T_UNDEF)
{ {
@ -3453,8 +3454,8 @@ int THD::binlog_write_table_map(TABLE *table, bool is_trans)
{ {
int error; int error;
DBUG_ENTER("THD::binlog_write_table_map"); DBUG_ENTER("THD::binlog_write_table_map");
DBUG_PRINT("enter", ("table: %0xlx (%s: #%u)", DBUG_PRINT("enter", ("table: 0x%lx (%s: #%lu)",
(long) table, table->s->table_name, (long) table, table->s->table_name.str,
table->s->table_map_id)); table->s->table_map_id));
/* Pre-conditions */ /* Pre-conditions */
@ -3517,7 +3518,7 @@ MYSQL_BIN_LOG::flush_and_set_pending_rows_event(THD *thd,
{ {
DBUG_ENTER("MYSQL_BIN_LOG::flush_and_set_pending_rows_event(event)"); DBUG_ENTER("MYSQL_BIN_LOG::flush_and_set_pending_rows_event(event)");
DBUG_ASSERT(mysql_bin_log.is_open()); DBUG_ASSERT(mysql_bin_log.is_open());
DBUG_PRINT("enter", ("event=%p", event)); DBUG_PRINT("enter", ("event: 0x%lx", (long) event));
int error= 0; int error= 0;
@ -3526,7 +3527,7 @@ MYSQL_BIN_LOG::flush_and_set_pending_rows_event(THD *thd,
DBUG_ASSERT(trx_data); DBUG_ASSERT(trx_data);
DBUG_PRINT("info", ("trx_data->pending()=%p", trx_data->pending())); DBUG_PRINT("info", ("trx_data->pending(): 0x%lx", (long) trx_data->pending()));
if (Rows_log_event* pending= trx_data->pending()) if (Rows_log_event* pending= trx_data->pending())
{ {
@ -3681,9 +3682,9 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
my_off_t trans_log_pos= my_b_tell(trans_log); my_off_t trans_log_pos= my_b_tell(trans_log);
if (event_info->get_cache_stmt() || trans_log_pos != 0) if (event_info->get_cache_stmt() || trans_log_pos != 0)
{ {
DBUG_PRINT("info", ("Using trans_log: cache=%d, trans_log_pos=%u", DBUG_PRINT("info", ("Using trans_log: cache: %d, trans_log_pos: %lu",
event_info->get_cache_stmt(), event_info->get_cache_stmt(),
trans_log_pos)); (ulong) trans_log_pos));
if (trans_log_pos == 0) if (trans_log_pos == 0)
thd->binlog_start_trans_and_stmt(); thd->binlog_start_trans_and_stmt();
file= trans_log; file= trans_log;
@ -3725,15 +3726,17 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
} }
if (thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements() > 0) if (thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements() > 0)
{ {
DBUG_PRINT("info",("number of auto_inc intervals: %lu", DBUG_PRINT("info",("number of auto_inc intervals: %u",
thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements())); thd->auto_inc_intervals_in_cur_stmt_for_binlog.
nb_elements()));
/* /*
If the auto_increment was second in a table's index (possible with If the auto_increment was second in a table's index (possible with
MyISAM or BDB) (table->next_number_key_offset != 0), such event is MyISAM or BDB) (table->next_number_key_offset != 0), such event is
in fact not necessary. We could avoid logging it. in fact not necessary. We could avoid logging it.
*/ */
Intvar_log_event e(thd,(uchar) INSERT_ID_EVENT, Intvar_log_event e(thd, (uchar) INSERT_ID_EVENT,
thd->auto_inc_intervals_in_cur_stmt_for_binlog.minimum()); thd->auto_inc_intervals_in_cur_stmt_for_binlog.
minimum());
if (e.write(file)) if (e.write(file))
goto err; goto err;
} }

View File

@ -5345,8 +5345,8 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len,
uint8 const common_header_len= description_event->common_header_len; uint8 const common_header_len= description_event->common_header_len;
uint8 const post_header_len= description_event->post_header_len[event_type-1]; uint8 const post_header_len= description_event->post_header_len[event_type-1];
DBUG_PRINT("enter",("event_len=%ld, common_header_len=%d, " DBUG_PRINT("enter",("event_len: %u common_header_len: %d "
"post_header_len=%d", "post_header_len: %d",
event_len, common_header_len, event_len, common_header_len,
post_header_len)); post_header_len));
@ -5376,7 +5376,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len,
const byte* const ptr_rows_data= var_start + byte_count + 1; const byte* const ptr_rows_data= var_start + byte_count + 1;
my_size_t const data_size= event_len - (ptr_rows_data - (const byte *) buf); my_size_t const data_size= event_len - (ptr_rows_data - (const byte *) buf);
DBUG_PRINT("info",("m_table_id=%lu, m_flags=%d, m_width=%u, data_size=%lu", DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %u",
m_table_id, m_flags, m_width, data_size)); m_table_id, m_flags, m_width, data_size));
m_rows_buf= (byte*)my_malloc(data_size, MYF(MY_WME)); m_rows_buf= (byte*)my_malloc(data_size, MYF(MY_WME));
@ -5416,7 +5416,7 @@ int Rows_log_event::do_add_row_data(byte *const row_data,
would save binlog space. TODO would save binlog space. TODO
*/ */
DBUG_ENTER("Rows_log_event::do_add_row_data"); DBUG_ENTER("Rows_log_event::do_add_row_data");
DBUG_PRINT("enter", ("row_data: 0x%lx length: %lu", (ulong) row_data, DBUG_PRINT("enter", ("row_data: 0x%lx length: %u", (ulong) row_data,
length)); length));
/* /*
Don't print debug messages when running valgrind since they can Don't print debug messages when running valgrind since they can
@ -5513,7 +5513,7 @@ unpack_row(RELAY_LOG_INFO *rli,
{ {
DBUG_ENTER("unpack_row"); DBUG_ENTER("unpack_row");
DBUG_ASSERT(record && row); DBUG_ASSERT(record && row);
DBUG_PRINT("enter", ("row=0x%lx; record=0x%lx", row, record)); DBUG_PRINT("enter", ("row: 0x%lx record: 0x%lx", (long) row, (long) record));
my_ptrdiff_t const offset= record - (byte*) table->record[0]; my_ptrdiff_t const offset= record - (byte*) table->record[0];
my_size_t master_null_bytes= table->s->null_bytes; my_size_t master_null_bytes= table->s->null_bytes;
@ -5555,10 +5555,12 @@ unpack_row(RELAY_LOG_INFO *rli,
if (bitmap_is_set(cols, field_ptr - begin_ptr)) if (bitmap_is_set(cols, field_ptr - begin_ptr))
{ {
DBUG_ASSERT(table->record[0] <= f->ptr); DBUG_ASSERT(table->record[0] <= f->ptr);
DBUG_ASSERT(f->ptr < table->record[0] + table->s->reclength + (f->pack_length_in_rec() == 0)); DBUG_ASSERT(f->ptr < (table->record[0] + table->s->reclength +
(f->pack_length_in_rec() == 0)));
f->move_field_offset(offset); f->move_field_offset(offset);
DBUG_PRINT("info", ("unpacking column '%s' to 0x%lx", f->field_name, f->ptr)); DBUG_PRINT("info", ("unpacking column '%s' to 0x%lx", f->field_name,
(long) f->ptr));
ptr= f->unpack(f->ptr, ptr); ptr= f->unpack(f->ptr, ptr);
f->move_field_offset(-offset); f->move_field_offset(-offset);
/* Field...::unpack() cannot return 0 */ /* Field...::unpack() cannot return 0 */
@ -6068,7 +6070,7 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len,
uint8 common_header_len= description_event->common_header_len; uint8 common_header_len= description_event->common_header_len;
uint8 post_header_len= description_event->post_header_len[TABLE_MAP_EVENT-1]; uint8 post_header_len= description_event->post_header_len[TABLE_MAP_EVENT-1];
DBUG_PRINT("info",("event_len=%ld, common_header_len=%d, post_header_len=%d", DBUG_PRINT("info",("event_len: %u common_header_len: %d post_header_len: %d",
event_len, common_header_len, post_header_len)); event_len, common_header_len, post_header_len));
/* /*
@ -6116,10 +6118,10 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len,
uchar *ptr_after_colcnt= (uchar*) ptr_colcnt; uchar *ptr_after_colcnt= (uchar*) ptr_colcnt;
m_colcnt= net_field_length(&ptr_after_colcnt); m_colcnt= net_field_length(&ptr_after_colcnt);
DBUG_PRINT("info",("m_dblen=%d off=%d m_tbllen=%d off=%d m_colcnt=%d off=%d", DBUG_PRINT("info",("m_dblen: %d off: %ld m_tbllen: %d off: %ld m_colcnt: %lu off: %ld",
m_dblen, ptr_dblen-(const byte*)vpart, m_dblen, (long) (ptr_dblen-(const byte*)vpart),
m_tbllen, ptr_tbllen-(const byte*)vpart, m_tbllen, (long) (ptr_tbllen-(const byte*)vpart),
m_colcnt, ptr_colcnt-(const byte*)vpart)); m_colcnt, (long) (ptr_colcnt-(const byte*)vpart)));
/* Allocate mem for all fields in one go. If fails, catched in is_valid() */ /* Allocate mem for all fields in one go. If fails, catched in is_valid() */
m_memory= my_multi_malloc(MYF(MY_WME), m_memory= my_multi_malloc(MYF(MY_WME),
@ -6523,10 +6525,10 @@ copy_extra_record_fields(TABLE *table,
my_size_t master_reclength, my_size_t master_reclength,
my_ptrdiff_t master_fields) my_ptrdiff_t master_fields)
{ {
DBUG_PRINT("info", ("Copying to %p " DBUG_PRINT("info", ("Copying to 0x%lx "
"from field %ld at offset %u " "from field %ld at offset %u "
"to field %d at offset %u", "to field %d at offset %lu",
table->record[0], (long) table->record[0],
master_fields, master_reclength, master_fields, master_reclength,
table->s->fields, table->s->reclength)); table->s->fields, table->s->reclength));
/* /*

View File

@ -2121,7 +2121,7 @@ the thread stack. Please read http://www.mysql.com/doc/en/Linux.html\n\n",
#ifdef HAVE_STACKTRACE #ifdef HAVE_STACKTRACE
if (!(test_flags & TEST_NO_STACKTRACE)) if (!(test_flags & TEST_NO_STACKTRACE))
{ {
fprintf(stderr,"thd=%p\n",thd); fprintf(stderr,"thd: 0x%lx\n",(long) thd);
print_stacktrace(thd ? (gptr) thd->thread_stack : (gptr) 0, print_stacktrace(thd ? (gptr) thd->thread_stack : (gptr) 0,
thread_stack); thread_stack);
} }

View File

@ -10814,7 +10814,7 @@ static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map,
if (!tmp.length()) if (!tmp.length())
tmp.append(STRING_WITH_LEN("(empty)")); tmp.append(STRING_WITH_LEN("(empty)"));
DBUG_PRINT("info", ("SEL_TREE %p (%s) scans:%s", tree, msg, tmp.ptr())); DBUG_PRINT("info", ("SEL_TREE: 0x%lx (%s) scans: %s", (long) tree, msg, tmp.ptr()));
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
} }

View File

@ -564,8 +564,8 @@ err:
mysql_free_result(res); mysql_free_result(res);
if (error) if (error)
{ {
sql_print_error("While trying to obtain the list of slaves from the master \ sql_print_error("While trying to obtain the list of slaves from the master "
'%s:%d', user '%s' got the following error: '%s'", "'%s:%d', user '%s' got the following error: '%s'",
mi->host, mi->port, mi->user, error); mi->host, mi->port, mi->user, error);
DBUG_RETURN(1); DBUG_RETURN(1);
} }

View File

@ -402,7 +402,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,
bool look_for_description_event) bool look_for_description_event)
{ {
DBUG_ENTER("init_relay_log_pos"); DBUG_ENTER("init_relay_log_pos");
DBUG_PRINT("info", ("pos=%lu", pos)); DBUG_PRINT("info", ("pos: %lu", (ulong) pos));
*errmsg=0; *errmsg=0;
pthread_mutex_t *log_lock=rli->relay_log.get_log_lock(); pthread_mutex_t *log_lock=rli->relay_log.get_log_lock();
@ -855,7 +855,7 @@ void st_relay_log_info::close_temporary_tables()
Don't ask for disk deletion. For now, anyway they will be deleted when Don't ask for disk deletion. For now, anyway they will be deleted when
slave restarts, but it is a better intention to not delete them. slave restarts, but it is a better intention to not delete them.
*/ */
DBUG_PRINT("info", ("table: %p", table)); DBUG_PRINT("info", ("table: 0x%lx", (long) table));
close_temporary(table, 1, 0); close_temporary(table, 1, 0);
} }
save_temporary_tables= 0; save_temporary_tables= 0;

View File

@ -50,17 +50,17 @@ table_mapping::~table_mapping()
st_table* table_mapping::get_table(ulong table_id) st_table* table_mapping::get_table(ulong table_id)
{ {
DBUG_ENTER("table_mapping::get_table(ulong)"); DBUG_ENTER("table_mapping::get_table(ulong)");
DBUG_PRINT("enter", ("table_id=%d", table_id)); DBUG_PRINT("enter", ("table_id: %lu", table_id));
entry *e= find_entry(table_id); entry *e= find_entry(table_id);
if (e) if (e)
{ {
DBUG_PRINT("info", ("tid %d -> table %p (%s)", DBUG_PRINT("info", ("tid %lu -> table 0x%lx (%s)",
table_id, e->table, table_id, (long) e->table,
MAYBE_TABLE_NAME(e->table))); MAYBE_TABLE_NAME(e->table)));
DBUG_RETURN(e->table); DBUG_RETURN(e->table);
} }
DBUG_PRINT("info", ("tid %d is not mapped!", table_id)); DBUG_PRINT("info", ("tid %lu is not mapped!", table_id));
DBUG_RETURN(NULL); DBUG_RETURN(NULL);
} }
@ -93,9 +93,9 @@ int table_mapping::expand()
int table_mapping::set_table(ulong table_id, TABLE* table) int table_mapping::set_table(ulong table_id, TABLE* table)
{ {
DBUG_ENTER("table_mapping::set_table(ulong,TABLE*)"); DBUG_ENTER("table_mapping::set_table(ulong,TABLE*)");
DBUG_PRINT("enter", ("table_id=%d, table=%p (%s)", DBUG_PRINT("enter", ("table_id: %lu table: 0x%lx (%s)",
table_id, table_id,
table, MAYBE_TABLE_NAME(table))); (long) table, MAYBE_TABLE_NAME(table)));
entry *e= find_entry(table_id); entry *e= find_entry(table_id);
if (e == 0) if (e == 0)
{ {
@ -111,8 +111,8 @@ int table_mapping::set_table(ulong table_id, TABLE* table)
e->table= table; e->table= table;
my_hash_insert(&m_table_ids,(byte *)e); my_hash_insert(&m_table_ids,(byte *)e);
DBUG_PRINT("info", ("tid %d -> table %p (%s)", DBUG_PRINT("info", ("tid %lu -> table 0x%lx (%s)",
table_id, e->table, table_id, (long) e->table,
MAYBE_TABLE_NAME(e->table))); MAYBE_TABLE_NAME(e->table)));
DBUG_RETURN(0); // All OK DBUG_RETURN(0); // All OK
} }

View File

@ -3943,7 +3943,7 @@ sys_var_event_scheduler::update(THD *thd, set_var *var)
DBUG_RETURN(TRUE); DBUG_RETURN(TRUE);
} }
DBUG_PRINT("new_value", ("%lu", (bool)var->save_result.ulong_value)); DBUG_PRINT("info", ("new_value: %d", (int) var->save_result.ulong_value));
Item_result var_type= var->value->result_type(); Item_result var_type= var->value->result_type();

View File

@ -1609,7 +1609,7 @@ static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings)
DBUG_RETURN(packet_error); DBUG_RETURN(packet_error);
} }
DBUG_PRINT("info",( "len=%u, net->read_pos[4] = %d\n", DBUG_PRINT("exit", ("len: %lu net->read_pos[4]: %d",
len, mysql->net.read_pos[4])); len, mysql->net.read_pos[4]));
DBUG_RETURN(len - 1); DBUG_RETURN(len - 1);
} }
@ -1800,7 +1800,7 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli)
ev->when = time(NULL); ev->when = time(NULL);
ev->thd = thd; // because up to this point, ev->thd == 0 ev->thd = thd; // because up to this point, ev->thd == 0
exec_res = ev->exec_event(rli); exec_res = ev->exec_event(rli);
DBUG_PRINT("info", ("exec_event result = %d", exec_res)); DBUG_PRINT("info", ("exec_event result: %d", exec_res));
DBUG_ASSERT(rli->sql_thd==thd); DBUG_ASSERT(rli->sql_thd==thd);
/* /*
Format_description_log_event should not be deleted because it will be Format_description_log_event should not be deleted because it will be
@ -1951,9 +1951,9 @@ pthread_handler_t handle_slave_io(void *arg)
// we can get killed during safe_connect // we can get killed during safe_connect
if (!safe_connect(thd, mysql, mi)) if (!safe_connect(thd, mysql, mi))
{ {
sql_print_information("Slave I/O thread: connected to master '%s@%s:%d',\ sql_print_information("Slave I/O thread: connected to master '%s@%s:%d',"
replication started in log '%s' at position %s", mi->user, "replication started in log '%s' at position %s",
mi->host, mi->port, mi->user, mi->host, mi->port,
IO_RPL_LOG_NAME, IO_RPL_LOG_NAME,
llstr(mi->master_log_pos,llbuff)); llstr(mi->master_log_pos,llbuff));
/* /*
@ -3107,8 +3107,8 @@ static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi,
{ {
last_errno=mysql_errno(mysql); last_errno=mysql_errno(mysql);
suppress_warnings= 0; suppress_warnings= 0;
sql_print_error("Slave I/O thread: error %s to master \ sql_print_error("Slave I/O thread: error %s to master "
'%s@%s:%d': \ "'%s@%s:%d': \
Error: '%s' errno: %d retry-time: %d retries: %lu", Error: '%s' errno: %d retry-time: %d retries: %lu",
(reconnect ? "reconnecting" : "connecting"), (reconnect ? "reconnecting" : "connecting"),
mi->user, mi->host, mi->port, mi->user, mi->host, mi->port,

View File

@ -899,7 +899,7 @@ subst_spvars(THD *thd, sp_instr *instr, LEX_STRING *query_str)
break; break;
val= (*splocal)->this_item(); val= (*splocal)->this_item();
DBUG_PRINT("info", ("print %p", val)); DBUG_PRINT("info", ("print 0x%lx", (long) val));
str_value= sp_get_item_value(val, &str_value_holder); str_value= sp_get_item_value(val, &str_value_holder);
if (str_value) if (str_value)
res|= qbuf.append(*str_value); res|= qbuf.append(*str_value);

View File

@ -1087,7 +1087,7 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived)
if (!lock_in_use) if (!lock_in_use)
VOID(pthread_mutex_lock(&LOCK_open)); VOID(pthread_mutex_lock(&LOCK_open));
DBUG_PRINT("info", ("thd->open_tables: %p", thd->open_tables)); DBUG_PRINT("info", ("thd->open_tables: 0x%lx", (long) thd->open_tables));
found_old_table= 0; found_old_table= 0;
while (thd->open_tables) while (thd->open_tables)
@ -1177,6 +1177,16 @@ static inline uint tmpkeyval(THD *thd, TABLE *table)
void close_temporary_tables(THD *thd) void close_temporary_tables(THD *thd)
{ {
TABLE *table; TABLE *table;
TABLE *next;
/*
TODO: 5.1 maintains prev link in temporary_tables
double-linked list so we could fix it. But it is not necessary
at this time when the list is being destroyed
*/
TABLE *prev_table;
/* Assume thd->options has OPTION_QUOTE_SHOW_CREATE */
bool was_quote_show= TRUE;
if (!thd->temporary_tables) if (!thd->temporary_tables)
return; return;
@ -1192,12 +1202,7 @@ void close_temporary_tables(THD *thd)
return; return;
} }
TABLE *next, /* Better add "if exists", in case a RESET MASTER has been done */
*prev_table /* TODO: 5.1 maintaines prev link in temporary_tables
double-linked list so we could fix it. But it is not necessary
at this time when the list is being destroyed */;
bool was_quote_show= true; /* to assume thd->options has OPTION_QUOTE_SHOW_CREATE */
// Better add "if exists", in case a RESET MASTER has been done
const char stub[]= "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS "; const char stub[]= "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS ";
uint stub_len= sizeof(stub) - 1; uint stub_len= sizeof(stub) - 1;
char buf[256]; char buf[256];
@ -1303,7 +1308,7 @@ void close_temporary_tables(THD *thd)
} }
} }
if (!was_quote_show) if (!was_quote_show)
thd->options &= ~OPTION_QUOTE_SHOW_CREATE; /* restore option */ thd->options&= ~OPTION_QUOTE_SHOW_CREATE; /* restore option */
thd->temporary_tables=0; thd->temporary_tables=0;
} }
@ -2069,7 +2074,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
VOID(pthread_mutex_unlock(&LOCK_open)); VOID(pthread_mutex_unlock(&LOCK_open));
DBUG_RETURN(0); // VIEW DBUG_RETURN(0); // VIEW
} }
DBUG_PRINT("info", ("inserting table %p into the cache", table)); DBUG_PRINT("info", ("inserting table 0x%lx into the cache", (long) table));
VOID(my_hash_insert(&open_cache,(byte*) table)); VOID(my_hash_insert(&open_cache,(byte*) table));
} }
@ -2399,7 +2404,7 @@ bool table_is_used(TABLE *table, bool wait_for_name_lock)
{ {
DBUG_PRINT("info", ("share: 0x%lx locked_by_logger: %d " DBUG_PRINT("info", ("share: 0x%lx locked_by_logger: %d "
"locked_by_flush: %d locked_by_name: %d " "locked_by_flush: %d locked_by_name: %d "
"db_stat: %u version: %u", "db_stat: %u version: %lu",
(ulong) search->s, search->locked_by_logger, (ulong) search->s, search->locked_by_logger,
search->locked_by_flush, search->locked_by_name, search->locked_by_flush, search->locked_by_name,
search->db_stat, search->db_stat,

View File

@ -80,8 +80,9 @@ void mysql_client_binlog_statement(THD* thd)
int bytes_decoded= base64_decode(strptr, coded_len, buf, &endptr); int bytes_decoded= base64_decode(strptr, coded_len, buf, &endptr);
DBUG_PRINT("info", DBUG_PRINT("info",
("bytes_decoded=%d; strptr=0x%lu; endptr=0x%lu ('%c':%d)", ("bytes_decoded: %d strptr: 0x%lx endptr: 0x%lx ('%c':%d)",
bytes_decoded, strptr, endptr, *endptr, *endptr)); bytes_decoded, (long) strptr, (long) endptr, *endptr,
*endptr));
if (bytes_decoded < 0) if (bytes_decoded < 0)
{ {
@ -145,14 +146,15 @@ void mysql_client_binlog_statement(THD* thd)
bufptr += event_len; bufptr += event_len;
DBUG_PRINT("info",("ev->get_type_code()=%d", ev->get_type_code())); DBUG_PRINT("info",("ev->get_type_code()=%d", ev->get_type_code()));
DBUG_PRINT("info",("bufptr+EVENT_TYPE_OFFSET=0x%lx", DBUG_PRINT("info",("bufptr+EVENT_TYPE_OFFSET: 0x%lx",
bufptr+EVENT_TYPE_OFFSET)); (long) (bufptr+EVENT_TYPE_OFFSET)));
DBUG_PRINT("info", ("bytes_decoded=%d; bufptr=0x%lx; buf[EVENT_LEN_OFFSET]=%u", DBUG_PRINT("info", ("bytes_decoded: %d bufptr: 0x%lx buf[EVENT_LEN_OFFSET]: %lu",
bytes_decoded, bufptr, uint4korr(bufptr+EVENT_LEN_OFFSET))); bytes_decoded, (long) bufptr,
uint4korr(bufptr+EVENT_LEN_OFFSET)));
ev->thd= thd; ev->thd= thd;
if (int err= ev->exec_event(thd->rli_fake)) if (int err= ev->exec_event(thd->rli_fake))
{ {
DBUG_PRINT("info", ("exec_event() - error=%d", error)); DBUG_PRINT("error", ("exec_event() returned: %d", err));
/* /*
TODO: Maybe a better error message since the BINLOG statement TODO: Maybe a better error message since the BINLOG statement
now contains several events. now contains several events.

View File

@ -2981,7 +2981,7 @@ static TABLE_COUNTER_TYPE process_and_count_tables(TABLE_LIST *tables_used,
DBUG_PRINT("qcache", ("table: %s db: %s type: %u", DBUG_PRINT("qcache", ("table: %s db: %s type: %u",
tables_used->table->s->table_name.str, tables_used->table->s->table_name.str,
tables_used->table->s->db.str, tables_used->table->s->db.str,
tables_used->table->s->db_type)); tables_used->table->s->db_type->db_type));
if (tables_used->derived) if (tables_used->derived)
{ {
table_count--; table_count--;
@ -3037,7 +3037,7 @@ Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex,
lex->safe_to_cache_query) lex->safe_to_cache_query)
{ {
DBUG_PRINT("qcache", ("options: %lx %lx type: %u", DBUG_PRINT("qcache", ("options: %lx %lx type: %u",
OPTION_TO_QUERY_CACHE, (long) OPTION_TO_QUERY_CACHE,
(long) lex->select_lex.options, (long) lex->select_lex.options,
(int) thd->variables.query_cache_type)); (int) thd->variables.query_cache_type));
@ -3057,7 +3057,7 @@ Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex,
DBUG_PRINT("qcache", DBUG_PRINT("qcache",
("not interesting query: %d or not cacheable, options %lx %lx type: %u", ("not interesting query: %d or not cacheable, options %lx %lx type: %u",
(int) lex->sql_command, (int) lex->sql_command,
OPTION_TO_QUERY_CACHE, (long) OPTION_TO_QUERY_CACHE,
(long) lex->select_lex.options, (long) lex->select_lex.options,
(int) thd->variables.query_cache_type)); (int) thd->variables.query_cache_type));
DBUG_RETURN(0); DBUG_RETURN(0);

View File

@ -551,7 +551,7 @@ void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
void THD::awake(THD::killed_state state_to_set) void THD::awake(THD::killed_state state_to_set)
{ {
DBUG_ENTER("THD::awake"); DBUG_ENTER("THD::awake");
DBUG_PRINT("enter", ("this=0x%lx", this)); DBUG_PRINT("enter", ("this: 0x%lx", (long) this));
THD_CHECK_SENTRY(this); THD_CHECK_SENTRY(this);
safe_mutex_assert_owner(&LOCK_delete); safe_mutex_assert_owner(&LOCK_delete);
@ -2623,9 +2623,9 @@ namespace {
return m_memory != 0; return m_memory != 0;
} }
byte *slot(int const s) byte *slot(uint s)
{ {
DBUG_ASSERT(0 <= s && s < sizeof(m_ptr)/sizeof(*m_ptr)); DBUG_ASSERT(s < sizeof(m_ptr)/sizeof(*m_ptr));
DBUG_ASSERT(m_ptr[s] != 0); DBUG_ASSERT(m_ptr[s] != 0);
DBUG_ASSERT(m_alloc_checked == true); DBUG_ASSERT(m_alloc_checked == true);
return m_ptr[s]; return m_ptr[s];

View File

@ -367,9 +367,9 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
strlen(tables->alias) + 1))) strlen(tables->alias) + 1)))
{ {
table= hash_tables->table; table= hash_tables->table;
DBUG_PRINT("info-in-hash",("'%s'.'%s' as '%s' tab %p", DBUG_PRINT("info-in-hash",("'%s'.'%s' as '%s' table: 0x%lx",
hash_tables->db, hash_tables->table_name, hash_tables->db, hash_tables->table_name,
hash_tables->alias, table)); hash_tables->alias, (long) table));
if (!table) if (!table)
{ {
/* /*
@ -633,7 +633,8 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags,
TABLE **table_ptr; TABLE **table_ptr;
bool did_lock= FALSE; bool did_lock= FALSE;
DBUG_ENTER("mysql_ha_flush"); DBUG_ENTER("mysql_ha_flush");
DBUG_PRINT("enter", ("tables: %p mode_flags: 0x%02x", tables, mode_flags)); DBUG_PRINT("enter", ("tables: 0x%lx mode_flags: 0x%02x",
(long) tables, mode_flags));
if (tables) if (tables)
{ {

View File

@ -1443,7 +1443,7 @@ bool st_select_lex::add_order_to_list(THD *thd, Item *item, bool asc)
bool st_select_lex::add_item_to_list(THD *thd, Item *item) bool st_select_lex::add_item_to_list(THD *thd, Item *item)
{ {
DBUG_ENTER("st_select_lex::add_item_to_list"); DBUG_ENTER("st_select_lex::add_item_to_list");
DBUG_PRINT("info", ("Item: %p", item)); DBUG_PRINT("info", ("Item: 0x%lx", (long) item));
DBUG_RETURN(item_list.push_back(item)); DBUG_RETURN(item_list.push_back(item));
} }

View File

@ -1604,7 +1604,7 @@ bool do_command(THD *thd)
command= COM_END; // Wrong command command= COM_END; // Wrong command
DBUG_PRINT("info",("Command on %s = %d (%s)", DBUG_PRINT("info",("Command on %s = %d (%s)",
vio_description(net->vio), command, vio_description(net->vio), command,
command_name[command])); command_name[command].str));
} }
net->read_timeout=old_timeout; // restore it net->read_timeout=old_timeout; // restore it
/* /*
@ -1828,7 +1828,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
char *packet_end= thd->query + thd->query_length; char *packet_end= thd->query + thd->query_length;
/* 'b' stands for 'buffer' parameter', special for 'my_snprintf' */ /* 'b' stands for 'buffer' parameter', special for 'my_snprintf' */
const char *format= "%.*b"; const char *format= "%.*b";
general_log.write(thd, command, format, thd->query_length, thd->query); general_log_print(thd, command, format, thd->query_length, thd->query);
DBUG_PRINT("query",("%-.4096s",thd->query)); DBUG_PRINT("query",("%-.4096s",thd->query));
if (!(specialflag & SPECIAL_NO_PRIOR)) if (!(specialflag & SPECIAL_NO_PRIOR))

View File

@ -4480,7 +4480,7 @@ that are reorganised.
{ {
if (!alt_part_info->use_default_partitions) if (!alt_part_info->use_default_partitions)
{ {
DBUG_PRINT("info", ("part_info= %x", tab_part_info)); DBUG_PRINT("info", ("part_info: 0x%lx", (long) tab_part_info));
tab_part_info->use_default_partitions= FALSE; tab_part_info->use_default_partitions= FALSE;
} }
tab_part_info->use_default_no_partitions= FALSE; tab_part_info->use_default_no_partitions= FALSE;

View File

@ -1918,7 +1918,7 @@ void mysql_stmt_prepare(THD *thd, const char *packet, uint packet_length)
else else
{ {
const char *format= "[%lu] %.*b"; const char *format= "[%lu] %.*b";
general_log.write(thd, COM_STMT_PREPARE, format, stmt->id, general_log_print(thd, COM_STMT_PREPARE, format, stmt->id,
stmt->query_length, stmt->query); stmt->query_length, stmt->query);
} }
@ -2265,7 +2265,7 @@ void mysql_stmt_execute(THD *thd, char *packet_arg, uint packet_length)
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
DBUG_PRINT("exec_query", ("%s", stmt->query)); DBUG_PRINT("exec_query", ("%s", stmt->query));
DBUG_PRINT("info",("stmt: %p", stmt)); DBUG_PRINT("info",("stmt: 0x%lx", (long) stmt));
sp_cache_flush_obsolete(&thd->sp_proc_cache); sp_cache_flush_obsolete(&thd->sp_proc_cache);
sp_cache_flush_obsolete(&thd->sp_func_cache); sp_cache_flush_obsolete(&thd->sp_func_cache);
@ -2305,9 +2305,9 @@ void mysql_stmt_execute(THD *thd, char *packet_arg, uint packet_length)
if (error == 0) if (error == 0)
{ {
const char *format= "[%lu] %.*b"; const char *format= "[%lu] %.*b";
general_log.write(thd, COM_STMT_EXECUTE, format, stmt->id, general_log_print(thd, COM_STMT_EXECUTE, format, stmt->id,
thd->query_length, thd->query); thd->query_length, thd->query);
}
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
set_params_data_err: set_params_data_err:
@ -2360,7 +2360,7 @@ void mysql_sql_stmt_execute(THD *thd)
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
} }
DBUG_PRINT("info",("stmt: %p", stmt)); DBUG_PRINT("info",("stmt: 0x%lx", (long) stmt));
/* /*
If the free_list is not empty, we'll wrongly free some externally If the free_list is not empty, we'll wrongly free some externally
@ -2724,7 +2724,8 @@ void Prepared_statement::setup_set_params()
Prepared_statement::~Prepared_statement() Prepared_statement::~Prepared_statement()
{ {
DBUG_ENTER("Prepared_statement::~Prepared_statement"); DBUG_ENTER("Prepared_statement::~Prepared_statement");
DBUG_PRINT("enter",("stmt: %p cursor: %p", this, cursor)); DBUG_PRINT("enter",("stmt: 0x%lx cursor: 0x%lx",
(long) this, (long) cursor));
delete cursor; delete cursor;
/* /*
We have to call free on the items even if cleanup is called as some items, We have to call free on the items even if cleanup is called as some items,
@ -2745,7 +2746,7 @@ Query_arena::Type Prepared_statement::type() const
void Prepared_statement::cleanup_stmt() void Prepared_statement::cleanup_stmt()
{ {
DBUG_ENTER("Prepared_statement::cleanup_stmt"); DBUG_ENTER("Prepared_statement::cleanup_stmt");
DBUG_PRINT("enter",("stmt: %p", this)); DBUG_PRINT("enter",("stmt: 0x%lx", (long) this));
/* The order is important */ /* The order is important */
lex->unit.cleanup(); lex->unit.cleanup();

View File

@ -3743,7 +3743,7 @@ static void wait_while_table_is_used(THD *thd,TABLE *table,
enum ha_extra_function function) enum ha_extra_function function)
{ {
DBUG_ENTER("wait_while_table_is_used"); DBUG_ENTER("wait_while_table_is_used");
DBUG_PRINT("enter", ("table: '%s' share: 0x%lx db_stat: %u version: %u", DBUG_PRINT("enter", ("table: '%s' share: 0x%lx db_stat: %u version: %lu",
table->s->table_name.str, (ulong) table->s, table->s->table_name.str, (ulong) table->s,
table->db_stat, table->s->version)); table->db_stat, table->s->version));

View File

@ -248,14 +248,15 @@ print_plan(JOIN* join, uint idx, double record_count, double read_time,
if (join->best_read == DBL_MAX) if (join->best_read == DBL_MAX)
{ {
fprintf(DBUG_FILE, fprintf(DBUG_FILE,
"%s; idx:%u, best: DBL_MAX, atime: %g, itime: %g, count: %g\n", "%s; idx: %u best: DBL_MAX atime: %g itime: %g count: %g\n",
info, idx, current_read_time, read_time, record_count); info, idx, current_read_time, read_time, record_count);
} }
else else
{ {
fprintf(DBUG_FILE, fprintf(DBUG_FILE,
"%s; idx:%u, best: %g, accumulated: %g, increment: %g, count: %g\n", "%s; idx :%u best: %g accumulated: %g increment: %g count: %g\n",
info, idx, join->best_read, current_read_time, read_time, record_count); info, idx, join->best_read, current_read_time, read_time,
record_count);
} }
/* Print the tables in JOIN->positions */ /* Print the tables in JOIN->positions */

View File

@ -1612,7 +1612,7 @@ Handle_old_incorrect_sql_modes_hook::process_unknown_string(char *&unknown_key,
char *end) char *end)
{ {
DBUG_ENTER("Handle_old_incorrect_sql_modes_hook::process_unknown_string"); DBUG_ENTER("Handle_old_incorrect_sql_modes_hook::process_unknown_string");
DBUG_PRINT("info", ("unknown key:%60s", unknown_key)); DBUG_PRINT("info", ("unknown key: %60s", unknown_key));
if (unknown_key + INVALID_SQL_MODES_LENGTH + 1 < end && if (unknown_key + INVALID_SQL_MODES_LENGTH + 1 < end &&
unknown_key[INVALID_SQL_MODES_LENGTH] == '=' && unknown_key[INVALID_SQL_MODES_LENGTH] == '=' &&
@ -1654,7 +1654,7 @@ process_unknown_string(char *&unknown_key, gptr base, MEM_ROOT *mem_root,
char *end) char *end)
{ {
DBUG_ENTER("Handle_old_incorrect_trigger_table_hook::process_unknown_string"); DBUG_ENTER("Handle_old_incorrect_trigger_table_hook::process_unknown_string");
DBUG_PRINT("info", ("unknown key:%60s", unknown_key)); DBUG_PRINT("info", ("unknown key: %60s", unknown_key));
if (unknown_key + INVALID_TRIGGER_TABLE_LENGTH + 1 < end && if (unknown_key + INVALID_TRIGGER_TABLE_LENGTH + 1 < end &&
unknown_key[INVALID_TRIGGER_TABLE_LENGTH] == '=' && unknown_key[INVALID_TRIGGER_TABLE_LENGTH] == '=' &&

View File

@ -1339,7 +1339,7 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
Field **field_ptr; Field **field_ptr;
DBUG_ENTER("open_table_from_share"); DBUG_ENTER("open_table_from_share");
DBUG_PRINT("enter",("name: '%s.%s' form: 0x%lx", share->db.str, DBUG_PRINT("enter",("name: '%s.%s' form: 0x%lx", share->db.str,
share->table_name.str, outparam)); share->table_name.str, (long) outparam));
error= 1; error= 1;
bzero((char*) outparam, sizeof(*outparam)); bzero((char*) outparam, sizeof(*outparam));
@ -2401,8 +2401,8 @@ table_check_intact(TABLE *table, const uint table_f_count,
my_bool error= FALSE; my_bool error= FALSE;
my_bool fields_diff_count; my_bool fields_diff_count;
DBUG_ENTER("table_check_intact"); DBUG_ENTER("table_check_intact");
DBUG_PRINT("info",("table=%s expected_count=%d",table->alias, table_f_count)); DBUG_PRINT("info",("table: %s expected_count: %d last_create_time: %ld",
DBUG_PRINT("info",("last_create_time=%d", *last_create_time)); table->alias, table_f_count, *last_create_time));
if ((fields_diff_count= (table->s->fields != table_f_count)) || if ((fields_diff_count= (table->s->fields != table_f_count)) ||
(*last_create_time != table->file->stats.create_time)) (*last_create_time != table->file->stats.create_time))

View File

@ -2157,7 +2157,7 @@ error:
int ha_federated::index_init(uint keynr, bool sorted) int ha_federated::index_init(uint keynr, bool sorted)
{ {
DBUG_ENTER("ha_federated::index_init"); DBUG_ENTER("ha_federated::index_init");
DBUG_PRINT("info", ("table: '%s' key: %u", table->s->table_name, keynr)); DBUG_PRINT("info", ("table: '%s' key: %u", table->s->table_name.str, keynr));
active_index= keynr; active_index= keynr;
DBUG_RETURN(0); DBUG_RETURN(0);
} }

View File

@ -33,7 +33,8 @@ int mi_rsame_with_pos(MI_INFO *info, byte *record, int inx, my_off_t filepos)
DBUG_ENTER("mi_rsame_with_pos"); DBUG_ENTER("mi_rsame_with_pos");
DBUG_PRINT("enter",("index: %d filepos: %ld", inx, (long) filepos)); DBUG_PRINT("enter",("index: %d filepos: %ld", inx, (long) filepos));
if (inx < -1 || inx >= 0 && ! mi_is_key_active(info->s->state.key_map, inx)) if (inx < -1 ||
(inx >= 0 && ! mi_is_key_active(info->s->state.key_map, inx)))
{ {
DBUG_RETURN(my_errno=HA_ERR_WRONG_INDEX); DBUG_RETURN(my_errno=HA_ERR_WRONG_INDEX);
} }

View File

@ -137,6 +137,7 @@ extern "C" {
#define LINT_SET_PTR = {0,0} #define LINT_SET_PTR = {0,0}
#else #else
#define LINT_SET_PTR #define LINT_SET_PTR
#endif
#ifndef MIN #ifndef MIN
#define MIN(x,y) (((x)<(y))?(x):(y)) #define MIN(x,y) (((x)<(y))?(x):(y))

View File

@ -106,7 +106,7 @@ inline NdbOut& dec(NdbOut& _NdbOut) {
return _NdbOut.setHexFormat(0); return _NdbOut.setHexFormat(0);
} }
extern "C" extern "C"
void ndbout_c(const char * fmt, ...); void ndbout_c(const char * fmt, ...) ATTRIBUTE_FORMAT(printf, 1, 2);
class FilteredNdbOut : public NdbOut { class FilteredNdbOut : public NdbOut {
public: public:

View File

@ -153,7 +153,6 @@ public:
ValueType m_type; ValueType m_type;
protected: protected:
Reader(); Reader();
virtual ~Reader() {}
virtual void reset() = 0; virtual void reset() = 0;
virtual bool step(Uint32 len) = 0; virtual bool step(Uint32 len) = 0;
@ -168,7 +167,6 @@ public:
class Writer { class Writer {
public: public:
Writer() {} Writer() {}
virtual ~Writer() {}
bool first(); bool first();
bool add(Uint16 key, Uint32 value); bool add(Uint16 key, Uint32 value);
@ -192,7 +190,6 @@ public:
SimplePropertiesLinearReader(const Uint32 * src, Uint32 len); SimplePropertiesLinearReader(const Uint32 * src, Uint32 len);
virtual ~SimplePropertiesLinearReader() {} virtual ~SimplePropertiesLinearReader() {}
virtual ~SimplePropertiesLinearReader() {}
virtual void reset(); virtual void reset();
virtual bool step(Uint32 len); virtual bool step(Uint32 len);
virtual bool getWord(Uint32 * dst); virtual bool getWord(Uint32 * dst);
@ -230,7 +227,6 @@ public:
UtilBufferWriter(class UtilBuffer & buf); UtilBufferWriter(class UtilBuffer & buf);
virtual ~UtilBufferWriter() {} virtual ~UtilBufferWriter() {}
virtual ~UtilBufferWriter() {}
virtual bool reset(); virtual bool reset();
virtual bool putWord(Uint32 val); virtual bool putWord(Uint32 val);
virtual bool putWords(const Uint32 * src, Uint32 len); virtual bool putWords(const Uint32 * src, Uint32 len);
@ -284,7 +280,6 @@ public:
SimplePropertiesSectionWriter(class SectionSegmentPool &); SimplePropertiesSectionWriter(class SectionSegmentPool &);
virtual ~SimplePropertiesSectionWriter() {} virtual ~SimplePropertiesSectionWriter() {}
virtual ~SimplePropertiesSectionWriter() {}
virtual bool reset(); virtual bool reset();
virtual bool putWord(Uint32 val); virtual bool putWord(Uint32 val);
virtual bool putWords(const Uint32 * src, Uint32 len); virtual bool putWords(const Uint32 * src, Uint32 len);

View File

@ -2118,7 +2118,7 @@ Backup::execDROP_TRIG_REF(Signal* signal)
BackupRecordPtr ptr LINT_SET_PTR; BackupRecordPtr ptr LINT_SET_PTR;
c_backupPool.getPtr(ptr, ptrI); c_backupPool.getPtr(ptr, ptrI);
if(ref->getConf()->getTriggerId() != -1) if(ref->getConf()->getTriggerId() != ~(Uint32) 0)
{ {
ndbout << "ERROR DROPPING TRIGGER: " << ref->getConf()->getTriggerId(); ndbout << "ERROR DROPPING TRIGGER: " << ref->getConf()->getTriggerId();
ndbout << " Err: " << (Uint32)ref->getErrorCode() << endl << endl; ndbout << " Err: " << (Uint32)ref->getErrorCode() << endl << endl;

View File

@ -1287,7 +1287,7 @@ Cmvmi::execTESTSIG(Signal* signal){
fprintf(stdout, "\n"); fprintf(stdout, "\n");
for(i = 0; i<signal->header.m_noOfSections; i++){ for(i = 0; i<signal->header.m_noOfSections; i++){
SegmentedSectionPtr ptr = {0,0,0}; SegmentedSectionPtr ptr;
ndbout_c("-- Section %d --", i); ndbout_c("-- Section %d --", i);
signal->getSection(ptr, i); signal->getSection(ptr, i);
ndbrequire(ptr.p != 0); ndbrequire(ptr.p != 0);
@ -1345,7 +1345,7 @@ Cmvmi::execTESTSIG(Signal* signal){
LinearSectionPtr ptr[3]; LinearSectionPtr ptr[3];
const Uint32 secs = signal->getNoOfSections(); const Uint32 secs = signal->getNoOfSections();
for(i = 0; i<secs; i++){ for(i = 0; i<secs; i++){
SegmentedSectionPtr sptr = {0,0,0}; SegmentedSectionPtr sptr;
signal->getSection(sptr, i); signal->getSection(sptr, i);
ptr[i].sz = sptr.sz; ptr[i].sz = sptr.sz;
ptr[i].p = new Uint32[sptr.sz]; ptr[i].p = new Uint32[sptr.sz];
@ -1394,7 +1394,7 @@ Cmvmi::execTESTSIG(Signal* signal){
LinearSectionPtr ptr[3]; LinearSectionPtr ptr[3];
const Uint32 secs = signal->getNoOfSections(); const Uint32 secs = signal->getNoOfSections();
for(i = 0; i<secs; i++){ for(i = 0; i<secs; i++){
SegmentedSectionPtr sptr = {0,0,0}; SegmentedSectionPtr sptr;
signal->getSection(sptr, i); signal->getSection(sptr, i);
ptr[i].sz = sptr.sz; ptr[i].sz = sptr.sz;
ptr[i].p = new Uint32[sptr.sz]; ptr[i].p = new Uint32[sptr.sz];
@ -1460,7 +1460,7 @@ Cmvmi::execTESTSIG(Signal* signal){
const Uint32 secs = signal->getNoOfSections(); const Uint32 secs = signal->getNoOfSections();
memset(g_test, 0, sizeof(g_test)); memset(g_test, 0, sizeof(g_test));
for(i = 0; i<secs; i++){ for(i = 0; i<secs; i++){
SegmentedSectionPtr sptr = {0,0,0}; SegmentedSectionPtr sptr;
signal->getSection(sptr, i); signal->getSection(sptr, i);
g_test[i].sz = sptr.sz; g_test[i].sz = sptr.sz;
g_test[i].p = new Uint32[sptr.sz]; g_test[i].p = new Uint32[sptr.sz];

View File

@ -971,10 +971,10 @@ void Dbacc::initOpRec(Signal* signal)
Uint32 opbits = 0; Uint32 opbits = 0;
opbits |= Treqinfo & 0x7; opbits |= Treqinfo & 0x7;
opbits |= ((Treqinfo >> 4) & 0x3) ? Operationrec::OP_LOCK_MODE : 0; opbits |= ((Treqinfo >> 4) & 0x3) ? (Uint32) Operationrec::OP_LOCK_MODE : 0;
opbits |= ((Treqinfo >> 4) & 0x3) ? Operationrec::OP_ACC_LOCK_MODE : 0; opbits |= ((Treqinfo >> 4) & 0x3) ? (Uint32) Operationrec::OP_ACC_LOCK_MODE : 0;
opbits |= (dirtyReadFlag) ? Operationrec::OP_DIRTY_READ : 0; opbits |= (dirtyReadFlag) ? (Uint32) Operationrec::OP_DIRTY_READ : 0;
opbits |= ((Treqinfo >> 31) & 0x1) ? Operationrec::OP_LOCK_REQ : 0; opbits |= ((Treqinfo >> 31) & 0x1) ? (Uint32) Operationrec::OP_LOCK_REQ : 0;
//operationRecPtr.p->nodeType = (Treqinfo >> 7) & 0x3; //operationRecPtr.p->nodeType = (Treqinfo >> 7) & 0x3;
operationRecPtr.p->fid = fragrecptr.p->myfid; operationRecPtr.p->fid = fragrecptr.p->myfid;
@ -6947,10 +6947,10 @@ void Dbacc::initScanOpRec(Signal* signal)
Uint32 opbits = 0; Uint32 opbits = 0;
opbits |= ZSCAN_OP; opbits |= ZSCAN_OP;
opbits |= scanPtr.p->scanLockMode ? Operationrec::OP_LOCK_MODE : 0; opbits |= scanPtr.p->scanLockMode ? (Uint32) Operationrec::OP_LOCK_MODE : 0;
opbits |= scanPtr.p->scanLockMode ? Operationrec::OP_ACC_LOCK_MODE : 0; opbits |= scanPtr.p->scanLockMode ? (Uint32) Operationrec::OP_ACC_LOCK_MODE : 0;
opbits |= scanPtr.p->scanReadCommittedFlag ? opbits |= (scanPtr.p->scanReadCommittedFlag ?
Operationrec::OP_EXECUTED_DIRTY_READ : 0; (Uint32) Operationrec::OP_EXECUTED_DIRTY_READ : 0);
opbits |= Operationrec::OP_COMMIT_DELETE_CHECK; opbits |= Operationrec::OP_COMMIT_DELETE_CHECK;
operationRecPtr.p->userptr = RNIL; operationRecPtr.p->userptr = RNIL;
operationRecPtr.p->scanRecPtr = scanPtr.i; operationRecPtr.p->scanRecPtr = scanPtr.i;
@ -7700,6 +7700,7 @@ void Dbacc::putOverflowRecInFrag(Signal* signal)
OverflowRecordPtr tpifPrevOverrecPtr; OverflowRecordPtr tpifPrevOverrecPtr;
tpifNextOverrecPtr.i = fragrecptr.p->firstOverflowRec; tpifNextOverrecPtr.i = fragrecptr.p->firstOverflowRec;
LINT_INIT(tpifPrevOverrecPtr.p);
tpifPrevOverrecPtr.i = RNIL; tpifPrevOverrecPtr.i = RNIL;
while (tpifNextOverrecPtr.i != RNIL) { while (tpifNextOverrecPtr.i != RNIL) {
ptrCheckGuard(tpifNextOverrecPtr, coverflowrecsize, overflowRecord); ptrCheckGuard(tpifNextOverrecPtr, coverflowrecsize, overflowRecord);
@ -7749,6 +7750,7 @@ void Dbacc::putRecInFreeOverdir(Signal* signal)
OverflowRecordPtr tpfoPrevOverrecPtr; OverflowRecordPtr tpfoPrevOverrecPtr;
tpfoNextOverrecPtr.i = fragrecptr.p->firstFreeDirindexRec; tpfoNextOverrecPtr.i = fragrecptr.p->firstFreeDirindexRec;
LINT_INIT(tpfoPrevOverrecPtr.p);
tpfoPrevOverrecPtr.i = RNIL; tpfoPrevOverrecPtr.i = RNIL;
while (tpfoNextOverrecPtr.i != RNIL) { while (tpfoNextOverrecPtr.i != RNIL) {
ptrCheckGuard(tpfoNextOverrecPtr, coverflowrecsize, overflowRecord); ptrCheckGuard(tpfoNextOverrecPtr, coverflowrecsize, overflowRecord);

View File

@ -189,7 +189,7 @@ struct {
&Dbdict::drop_undofile_prepare_start, 0, &Dbdict::drop_undofile_prepare_start, 0,
0, 0,
0, 0, 0, 0,
0, 0 0, 0, 0
} }
}; };

View File

@ -2909,7 +2909,7 @@ Dbdih::nr_start_fragment(Signal* signal,
} }
} }
if (maxLcpIndex == ~0) if (maxLcpIndex == ~ (Uint32) 0)
{ {
ndbout_c("Didnt find any LCP for node: %d tab: %d frag: %d", ndbout_c("Didnt find any LCP for node: %d tab: %d frag: %d",
takeOverPtr.p->toStartingNode, takeOverPtr.p->toStartingNode,
@ -5968,6 +5968,7 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){
break; break;
default: default:
ndbrequire(false); ndbrequire(false);
lcpState= MasterLCPConf::LCP_STATUS_IDLE; // remove warning
}//switch }//switch
Uint32 failedNodeId = c_lcpState.m_MASTER_LCPREQ_FailedNodeId; Uint32 failedNodeId = c_lcpState.m_MASTER_LCPREQ_FailedNodeId;
@ -6892,6 +6893,8 @@ void Dbdih::execDIADDTABREQ(Signal* signal)
Uint32 align; Uint32 align;
}; };
SegmentedSectionPtr fragDataPtr; SegmentedSectionPtr fragDataPtr;
LINT_INIT(fragDataPtr.i);
LINT_INIT(fragDataPtr.sz);
signal->getSection(fragDataPtr, DiAddTabReq::FRAGMENTATION); signal->getSection(fragDataPtr, DiAddTabReq::FRAGMENTATION);
copy((Uint32*)fragments, fragDataPtr); copy((Uint32*)fragments, fragDataPtr);
releaseSections(signal); releaseSections(signal);
@ -6981,7 +6984,9 @@ Dbdih::sendAddFragreq(Signal* signal, ConnectRecordPtr connectPtr,
TabRecordPtr tabPtr, Uint32 fragId){ TabRecordPtr tabPtr, Uint32 fragId){
jam(); jam();
const Uint32 fragCount = tabPtr.p->totalfragments; const Uint32 fragCount = tabPtr.p->totalfragments;
ReplicaRecordPtr replicaPtr; replicaPtr.i = RNIL; ReplicaRecordPtr replicaPtr;
LINT_INIT(replicaPtr.p);
replicaPtr.i = RNIL;
FragmentstorePtr fragPtr; FragmentstorePtr fragPtr;
for(; fragId<fragCount; fragId++){ for(; fragId<fragCount; fragId++){
jam(); jam();
@ -7541,7 +7546,11 @@ void Dbdih::execDI_FCOUNTREQ(Signal* signal)
if(connectPtr.i == RNIL) if(connectPtr.i == RNIL)
ref->m_connectionData = RNIL; ref->m_connectionData = RNIL;
else else
{
jam();
ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
ref->m_connectionData = connectPtr.p->userpointer; ref->m_connectionData = connectPtr.p->userpointer;
}
ref->m_tableRef = tabPtr.i; ref->m_tableRef = tabPtr.i;
ref->m_senderData = senderData; ref->m_senderData = senderData;
ref->m_error = DihFragCountRef::ErroneousTableState; ref->m_error = DihFragCountRef::ErroneousTableState;
@ -11443,6 +11452,7 @@ Dbdih::findBestLogNode(CreateReplicaRecord* createReplica,
{ {
ConstPtr<ReplicaRecord> fblFoundReplicaPtr; ConstPtr<ReplicaRecord> fblFoundReplicaPtr;
ConstPtr<ReplicaRecord> fblReplicaPtr; ConstPtr<ReplicaRecord> fblReplicaPtr;
LINT_INIT(fblFoundReplicaPtr.p);
/* --------------------------------------------------------------------- */ /* --------------------------------------------------------------------- */
/* WE START WITH ZERO AS FOUND TO ENSURE THAT FIRST HIT WILL BE */ /* WE START WITH ZERO AS FOUND TO ENSURE THAT FIRST HIT WILL BE */

View File

@ -3417,9 +3417,9 @@ void Dblqh::execLQHKEYREQ(Signal* signal)
} }
else else
{ {
regTcPtr->operation = op == ZREAD_EX ? ZREAD : op; regTcPtr->operation = (Operation_t) op == ZREAD_EX ? ZREAD : (Operation_t) op;
regTcPtr->lockType = regTcPtr->lockType =
op == ZREAD_EX ? ZUPDATE : op == ZWRITE ? ZINSERT : op; op == ZREAD_EX ? ZUPDATE : (Operation_t) op == ZWRITE ? ZINSERT : (Operation_t) op;
} }
CRASH_INSERTION2(5041, regTcPtr->simpleRead && CRASH_INSERTION2(5041, regTcPtr->simpleRead &&
@ -18520,7 +18520,7 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal)
do do
{ {
ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
ndbout_c(" file %d(%d) FileChangeState: %d logFileStatus: %d currentMbyte: %d currentFilepage", ndbout_c(" file %d(%d) FileChangeState: %d logFileStatus: %d currentMbyte: %d currentFilepage %d",
logFilePtr.p->fileNo, logFilePtr.p->fileNo,
logFilePtr.i, logFilePtr.i,
logFilePtr.p->fileChangeState, logFilePtr.p->fileChangeState,

View File

@ -3194,7 +3194,7 @@ void Dbtc::sendlqhkeyreq(Signal* signal,
if (unlikely(version < NDBD_ROWID_VERSION)) if (unlikely(version < NDBD_ROWID_VERSION))
{ {
Uint32 op = regTcPtr->operation; Uint32 op = regTcPtr->operation;
Uint32 lock = op == ZREAD_EX ? ZUPDATE : op == ZWRITE ? ZINSERT : op; Uint32 lock = (Operation_t) op == ZREAD_EX ? ZUPDATE : (Operation_t) op == ZWRITE ? ZINSERT : (Operation_t) op;
LqhKeyReq::setLockType(Tdata10, lock); LqhKeyReq::setLockType(Tdata10, lock);
} }
/* ---------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- */

View File

@ -43,7 +43,7 @@ void Dbtup::execTUP_DEALLOCREQ(Signal* signal)
getFragmentrec(regFragPtr, frag_id, regTabPtr.p); getFragmentrec(regFragPtr, frag_id, regTabPtr.p);
ndbassert(regFragPtr.p != NULL); ndbassert(regFragPtr.p != NULL);
if (! (((frag_page_id << MAX_TUPLES_BITS) + page_index) == ~0)) if (! (((frag_page_id << MAX_TUPLES_BITS) + page_index) == ~ (Uint32) 0))
{ {
Local_key tmp; Local_key tmp;
tmp.m_page_no= getRealpid(regFragPtr.p, frag_page_id); tmp.m_page_no= getRealpid(regFragPtr.p, frag_page_id);

View File

@ -82,7 +82,7 @@ Dbtup::dump_disk_alloc(Dbtup::Disk_alloc_info & alloc)
{ {
ndbout << ptr << " "; ndbout << ptr << " ";
} }
ndbout_c(""); ndbout_c(" ");
} }
ndbout_c("page requests"); ndbout_c("page requests");
for(Uint32 i = 0; i<MAX_FREE_LIST; i++) for(Uint32 i = 0; i<MAX_FREE_LIST; i++)
@ -95,7 +95,7 @@ Dbtup::dump_disk_alloc(Dbtup::Disk_alloc_info & alloc)
{ {
ndbout << ptr << " "; ndbout << ptr << " ";
} }
ndbout_c(""); ndbout_c(" ");
} }
ndbout_c("Extent matrix"); ndbout_c("Extent matrix");
@ -108,7 +108,7 @@ Dbtup::dump_disk_alloc(Dbtup::Disk_alloc_info & alloc)
{ {
ndbout << ptr << " "; ndbout << ptr << " ";
} }
ndbout_c(""); ndbout_c(" ");
} }
if (alloc.m_curr_extent_info_ptr_i != RNIL) if (alloc.m_curr_extent_info_ptr_i != RNIL)

View File

@ -684,7 +684,7 @@ void Dbtup::execTUPKEYREQ(Signal* signal)
copyAttrinfo(regOperPtr, &cinBuffer[0]); copyAttrinfo(regOperPtr, &cinBuffer[0]);
Uint32 localkey = (pageid << MAX_TUPLES_BITS) + pageidx; Uint32 localkey = (pageid << MAX_TUPLES_BITS) + pageidx;
if(Roptype == ZINSERT && localkey == ~0) if (Roptype == ZINSERT && localkey == ~ (Uint32) 0)
{ {
// No tuple allocatated yet // No tuple allocatated yet
goto do_insert; goto do_insert;

View File

@ -284,4 +284,5 @@ Dbtup::alloc_fix_rowid(Fragrecord* regFragPtr,
case ZEMPTY_MM: case ZEMPTY_MM:
ndbrequire(false); ndbrequire(false);
} }
return 0; /* purify: deadcode */
} }

View File

@ -1809,11 +1809,11 @@ Lgman::execLCP_FRAG_ORD(Signal* signal)
if(0) if(0)
ndbout_c ndbout_c
("execLCP_FRAG_ORD (%d %d) (%d %d) (%d %d) free pages: %d", ("execLCP_FRAG_ORD (%d %d) (%d %d) (%d %d) free pages: %ld",
ptr.p->m_tail_pos[0].m_ptr_i, ptr.p->m_tail_pos[0].m_idx, ptr.p->m_tail_pos[0].m_ptr_i, ptr.p->m_tail_pos[0].m_idx,
ptr.p->m_tail_pos[1].m_ptr_i, ptr.p->m_tail_pos[1].m_idx, ptr.p->m_tail_pos[1].m_ptr_i, ptr.p->m_tail_pos[1].m_idx,
ptr.p->m_tail_pos[2].m_ptr_i, ptr.p->m_tail_pos[2].m_idx, ptr.p->m_tail_pos[2].m_ptr_i, ptr.p->m_tail_pos[2].m_idx,
(ptr.p->m_free_file_words / File_formats::UNDO_PAGE_WORDS)); (long) (ptr.p->m_free_file_words / File_formats::UNDO_PAGE_WORDS));
} }
m_logfile_group_list.next(ptr); m_logfile_group_list.next(ptr);
} }

View File

@ -655,7 +655,7 @@ Ndbfs::createAsyncFile(){
// Print info about all open files // Print info about all open files
for (unsigned i = 0; i < theFiles.size(); i++){ for (unsigned i = 0; i < theFiles.size(); i++){
AsyncFile* file = theFiles[i]; AsyncFile* file = theFiles[i];
ndbout_c("%2d (0x%x): %s", i, file, file->isOpen()?"OPEN":"CLOSED"); ndbout_c("%2d (0x%lx): %s", i, (long) file, file->isOpen()?"OPEN":"CLOSED");
} }
ERROR_SET(fatal, NDBD_EXIT_AFS_MAXOPEN,""," Ndbfs::createAsyncFile"); ERROR_SET(fatal, NDBD_EXIT_AFS_MAXOPEN,""," Ndbfs::createAsyncFile");
} }
@ -1130,7 +1130,7 @@ Ndbfs::execDUMP_STATE_ORD(Signal* signal)
ndbout << "All files: " << endl; ndbout << "All files: " << endl;
for (unsigned i = 0; i < theFiles.size(); i++){ for (unsigned i = 0; i < theFiles.size(); i++){
AsyncFile* file = theFiles[i]; AsyncFile* file = theFiles[i];
ndbout_c("%2d (0x%x): %s", i,file, file->isOpen()?"OPEN":"CLOSED"); ndbout_c("%2d (0x%lx): %s", i, (long) file, file->isOpen()?"OPEN":"CLOSED");
} }
} }
}//Ndbfs::execDUMP_STATE_ORD() }//Ndbfs::execDUMP_STATE_ORD()

View File

@ -1188,7 +1188,7 @@ Pgman::process_lcp(Signal* signal)
pl_hash.next(m_lcp_curr_bucket, iter); pl_hash.next(m_lcp_curr_bucket, iter);
Uint32 loop = 0; Uint32 loop = 0;
while (iter.curr.i != RNIL && while (iter.curr.i != RNIL &&
m_lcp_outstanding < max_count && m_lcp_outstanding < (Uint32) max_count &&
(loop ++ < 32 || iter.bucket == m_lcp_curr_bucket)) (loop ++ < 32 || iter.bucket == m_lcp_curr_bucket))
{ {
Ptr<Page_entry>& ptr = iter.curr; Ptr<Page_entry>& ptr = iter.curr;
@ -2324,7 +2324,7 @@ Pgman::execDUMP_STATE_ORD(Signal* signal)
if (signal->theData[0] == 11004) if (signal->theData[0] == 11004)
{ {
ndbout << "Dump LCP bucket m_lcp_outstanding: %d", m_lcp_outstanding; ndbout << "Dump LCP bucket m_lcp_outstanding: " << m_lcp_outstanding;
if (m_lcp_curr_bucket != ~(Uint32)0) if (m_lcp_curr_bucket != ~(Uint32)0)
{ {
Page_hashlist::Iterator iter; Page_hashlist::Iterator iter;

View File

@ -1137,7 +1137,7 @@ Restore::reorder_key(const KeyDescriptor* desc,
} }
dst += sz; dst += sz;
} }
ndbassert((dst - Tmp) == len); ndbassert((Uint32) (dst - Tmp) == len);
memcpy(data, Tmp, 4*len); memcpy(data, Tmp, 4*len);
} }

View File

@ -1590,6 +1590,9 @@ Suma::execGET_TABINFOREF(Signal* signal){
break; break;
case GetTabInfoRef::TableNameTooLong: case GetTabInfoRef::TableNameTooLong:
ndbrequire(false); ndbrequire(false);
break;
case GetTabInfoRef::NoFetchByName:
break;
} }
if (do_resend_request) if (do_resend_request)
{ {
@ -4306,7 +4309,7 @@ Suma::Restart::sendSubStartReq(SubscriptionPtr subPtr, SubscriberPtr subbPtr,
// restarting suma will not respond to this until startphase 5 // restarting suma will not respond to this until startphase 5
// since it is not until then data copying has been completed // since it is not until then data copying has been completed
DBUG_PRINT("info",("Restarting subscriber: %u on key: [%u,%u]", DBUG_PRINT("info",("Restarting subscriber: %u on key: [%u,%u] %u",
subbPtr.i, subbPtr.i,
subPtr.p->m_subscriptionId, subPtr.p->m_subscriptionId,
subPtr.p->m_subscriptionKey, subPtr.p->m_subscriptionKey,

View File

@ -191,7 +191,7 @@ Configuration::init(int argc, char** argv)
} }
if (! (val > 0 && val < MAX_NDB_NODES)) if (! (val > 0 && val < MAX_NDB_NODES))
{ {
ndbout_c("Invalid nodeid specified in nowait-nodes: %d : %s", ndbout_c("Invalid nodeid specified in nowait-nodes: %ld : %s",
val, _nowait_nodes); val, _nowait_nodes);
exit(-1); exit(-1);
} }

View File

@ -287,6 +287,7 @@ DLHashTableImpl<P, T, U>::remove(Ptr<T> & ptr, const T & key)
Uint32 i; Uint32 i;
T * p; T * p;
Ptr<T> prev; Ptr<T> prev;
LINT_INIT(prev.p);
prev.i = RNIL; prev.i = RNIL;
i = hashValues[hv]; i = hashValues[hv];

View File

@ -70,6 +70,7 @@ RWPool::getPtr(Uint32 i)
return record; return record;
} }
handle_invalid_get_ptr(i); handle_invalid_get_ptr(i);
return 0; /* purify: deadcode */
} }
#endif #endif

View File

@ -1930,6 +1930,7 @@ SimulatedBlock::xfrm_attr(Uint32 attrDesc, CHARSET_INFO* cs,
{ {
jam(); jam();
Uint32 len; Uint32 len;
LINT_INIT(len);
switch(array){ switch(array){
case NDB_ARRAYTYPE_SHORT_VAR: case NDB_ARRAYTYPE_SHORT_VAR:
len = 1 + srcPtr[0]; len = 1 + srcPtr[0];

View File

@ -115,6 +115,7 @@ WOPool::getPtr(Uint32 i)
return record; return record;
} }
handle_invalid_get_ptr(i); handle_invalid_get_ptr(i);
return 0; /* purify: deadcode */
} }
#endif #endif

View File

@ -223,6 +223,10 @@ Ndbd_mem_manager::init(bool alloc_less_memory)
InitChunk chunk; InitChunk chunk;
Uint32 remaining = pages - allocated; Uint32 remaining = pages - allocated;
#if defined(_lint) || defined(FORCE_INIT_OF_VARS)
memset((char*) &chunk, 0 , sizeof(chunk));
#endif
if (do_malloc(pages - allocated, &chunk)) if (do_malloc(pages - allocated, &chunk))
{ {
Uint32 i = 0; Uint32 i = 0;

View File

@ -1558,6 +1558,8 @@ CommandInterpreter::executeShow(char* parameters)
case NDB_MGM_NODE_TYPE_UNKNOWN: case NDB_MGM_NODE_TYPE_UNKNOWN:
ndbout << "Error: Unknown Node Type" << endl; ndbout << "Error: Unknown Node Type" << endl;
return -1; return -1;
case NDB_MGM_NODE_TYPE_MAX:
break; /* purify: deadcode */
} }
} }

View File

@ -2495,7 +2495,7 @@ MgmtSrvr::startBackup(Uint32& backupId, int waitCompleted)
const BackupCompleteRep * const rep = const BackupCompleteRep * const rep =
CAST_CONSTPTR(BackupCompleteRep, signal->getDataPtr()); CAST_CONSTPTR(BackupCompleteRep, signal->getDataPtr());
#ifdef VM_TRACE #ifdef VM_TRACE
ndbout_c("Backup(%d) completed %d", rep->backupId); ndbout_c("Backup(%d) completed", rep->backupId);
#endif #endif
event.Event = BackupEvent::BackupCompleted; event.Event = BackupEvent::BackupCompleted;
event.Completed.BackupId = rep->backupId; event.Completed.BackupId = rep->backupId;
@ -2751,7 +2751,7 @@ MgmtSrvr::setDbParameter(int node, int param, const char * value,
break; break;
case 1: case 1:
res = i2.set(param, val_64); res = i2.set(param, val_64);
ndbout_c("Updating node %d param: %d to %Ld", node, param, val_32); ndbout_c("Updating node %d param: %d to %u", node, param, val_32);
break; break;
case 2: case 2:
res = i2.set(param, val_char); res = i2.set(param, val_char);

View File

@ -417,7 +417,7 @@ GlobalDictCache::alter_table_rep(const char * name,
{ {
TableVersion & ver = (* vers)[i]; TableVersion & ver = (* vers)[i];
if(ver.m_version == tableVersion && ver.m_impl && if(ver.m_version == tableVersion && ver.m_impl &&
ver.m_impl->m_id == tableId) (Uint32) ver.m_impl->m_id == tableId)
{ {
ver.m_status = DROPPED; ver.m_status = DROPPED;
ver.m_impl->m_status = altered ? ver.m_impl->m_status = altered ?

View File

@ -3583,7 +3583,7 @@ NdbDictInterface::createEvent(class Ndb & ndb,
evnt.mi_type = evntConf->getEventType(); evnt.mi_type = evntConf->getEventType();
evnt.setTable(dataPtr); evnt.setTable(dataPtr);
} else { } else {
if (evnt.m_tableImpl->m_id != evntConf->getTableId() || if ((Uint32) evnt.m_tableImpl->m_id != evntConf->getTableId() ||
evnt.m_tableImpl->m_version != evntConf->getTableVersion() || evnt.m_tableImpl->m_version != evntConf->getTableVersion() ||
//evnt.m_attrListBitmask != evntConf->getAttrListBitmask() || //evnt.m_attrListBitmask != evntConf->getAttrListBitmask() ||
evnt.mi_type != evntConf->getEventType()) { evnt.mi_type != evntConf->getEventType()) {
@ -3701,7 +3701,7 @@ NdbDictionaryImpl::getEvent(const char * eventName, NdbTableImpl* tab)
DBUG_RETURN(NULL); DBUG_RETURN(NULL);
} }
if ((tab->m_status != NdbDictionary::Object::Retrieved) || if ((tab->m_status != NdbDictionary::Object::Retrieved) ||
(tab->m_id != ev->m_table_id) || ((Uint32) tab->m_id != ev->m_table_id) ||
(table_version_major(tab->m_version) != (table_version_major(tab->m_version) !=
table_version_major(ev->m_table_version))) table_version_major(ev->m_table_version)))
{ {
@ -3731,7 +3731,7 @@ NdbDictionaryImpl::getEvent(const char * eventName, NdbTableImpl* tab)
DBUG_PRINT("info",("Table: id: %d version: %d", DBUG_PRINT("info",("Table: id: %d version: %d",
table.m_id, table.m_version)); table.m_id, table.m_version));
if (table.m_id != ev->m_table_id || if ((Uint32) table.m_id != ev->m_table_id ||
table_version_major(table.m_version) != table_version_major(table.m_version) !=
table_version_major(ev->m_table_version)) table_version_major(ev->m_table_version))
{ {
@ -3747,7 +3747,7 @@ NdbDictionaryImpl::getEvent(const char * eventName, NdbTableImpl* tab)
#endif #endif
if ( attributeList_sz > table.getNoOfColumns() ) if ( attributeList_sz > (uint) table.getNoOfColumns() )
{ {
m_error.code = 241; m_error.code = 241;
DBUG_PRINT("error",("Invalid version, too many columns")); DBUG_PRINT("error",("Invalid version, too many columns"));
@ -3757,7 +3757,7 @@ NdbDictionaryImpl::getEvent(const char * eventName, NdbTableImpl* tab)
assert( (int)attributeList_sz <= table.getNoOfColumns() ); assert( (int)attributeList_sz <= table.getNoOfColumns() );
for(unsigned id= 0; ev->m_columns.size() < attributeList_sz; id++) { for(unsigned id= 0; ev->m_columns.size() < attributeList_sz; id++) {
if ( id >= table.getNoOfColumns()) if ( id >= (uint) table.getNoOfColumns())
{ {
m_error.code = 241; m_error.code = 241;
DBUG_PRINT("error",("Invalid version, column %d out of range", id)); DBUG_PRINT("error",("Invalid version, column %d out of range", id));

View File

@ -58,7 +58,7 @@ print_std(const SubTableData * sdata, LinearSectionPtr ptr[3])
SubTableData::getOperation(sdata->requestInfo)); SubTableData::getOperation(sdata->requestInfo));
for (int i = 0; i <= 2; i++) { for (int i = 0; i <= 2; i++) {
printf("sec=%d addr=%p sz=%d\n", i, (void*)ptr[i].p, ptr[i].sz); printf("sec=%d addr=%p sz=%d\n", i, (void*)ptr[i].p, ptr[i].sz);
for (int j = 0; j < ptr[i].sz; j++) for (int j = 0; (uint) j < ptr[i].sz; j++)
printf("%08x ", ptr[i].p[j]); printf("%08x ", ptr[i].p[j]);
printf("\n"); printf("\n");
} }
@ -199,11 +199,11 @@ NdbEventOperationImpl::init(NdbEventImpl& evnt)
m_mergeEvents = false; m_mergeEvents = false;
#endif #endif
m_ref_count = 0; m_ref_count = 0;
DBUG_PRINT("info", ("m_ref_count = 0 for op: %p", this)); DBUG_PRINT("info", ("m_ref_count = 0 for op: 0x%lx", (long) this));
m_has_error= 0; m_has_error= 0;
DBUG_PRINT("exit",("this: 0x%x oid: %u", this, m_oid)); DBUG_PRINT("exit",("this: 0x%lx oid: %u", (long) this, m_oid));
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
} }
@ -739,8 +739,8 @@ NdbEventOperationImpl::receive_event()
NdbTableImpl *tmp_table_impl= m_eventImpl->m_tableImpl; NdbTableImpl *tmp_table_impl= m_eventImpl->m_tableImpl;
m_eventImpl->m_tableImpl = at; m_eventImpl->m_tableImpl = at;
DBUG_PRINT("info", ("switching table impl 0x%x -> 0x%x", DBUG_PRINT("info", ("switching table impl 0x%lx -> 0x%lx",
tmp_table_impl, at)); (long) tmp_table_impl, (long) at));
// change the rec attrs to refer to the new table object // change the rec attrs to refer to the new table object
int i; int i;
@ -751,9 +751,9 @@ NdbEventOperationImpl::receive_event()
{ {
int no = p->getColumn()->getColumnNo(); int no = p->getColumn()->getColumnNo();
NdbColumnImpl *tAttrInfo = at->getColumn(no); NdbColumnImpl *tAttrInfo = at->getColumn(no);
DBUG_PRINT("info", ("rec_attr: 0x%x " DBUG_PRINT("info", ("rec_attr: 0x%lx "
"switching column impl 0x%x -> 0x%x", "switching column impl 0x%lx -> 0x%lx",
p, p->m_column, tAttrInfo)); (long) p, (long) p->m_column, (long) tAttrInfo));
p->m_column = tAttrInfo; p->m_column = tAttrInfo;
p = p->next(); p = p->next();
} }
@ -765,9 +765,9 @@ NdbEventOperationImpl::receive_event()
{ {
int no = p->getColumn()->getColumnNo(); int no = p->getColumn()->getColumnNo();
NdbColumnImpl *tAttrInfo = at->getColumn(no); NdbColumnImpl *tAttrInfo = at->getColumn(no);
DBUG_PRINT("info", ("rec_attr: 0x%x " DBUG_PRINT("info", ("rec_attr: 0x%lx "
"switching column impl 0x%x -> 0x%x", "switching column impl 0x%lx -> 0x%lx",
p, p->m_column, tAttrInfo)); (long) p, (long) p->m_column, (long) tAttrInfo));
p->m_column = tAttrInfo; p->m_column = tAttrInfo;
p = p->next(); p = p->next();
} }
@ -1269,8 +1269,9 @@ NdbEventBuffer::getGCIEventOperations(Uint32* iter, Uint32* event_types)
EventBufData_list::Gci_op g = gci_ops->m_gci_op_list[(*iter)++]; EventBufData_list::Gci_op g = gci_ops->m_gci_op_list[(*iter)++];
if (event_types != NULL) if (event_types != NULL)
*event_types = g.event_types; *event_types = g.event_types;
DBUG_PRINT("info", ("gci: %d g.op: %x g.event_types: %x", DBUG_PRINT("info", ("gci: %u g.op: 0x%lx g.event_types: 0x%lx",
(unsigned)gci_ops->m_gci, g.op, g.event_types)); (unsigned)gci_ops->m_gci, (long) g.op,
(long) g.event_types));
DBUG_RETURN(g.op); DBUG_RETURN(g.op);
} }
DBUG_RETURN(NULL); DBUG_RETURN(NULL);
@ -1563,8 +1564,8 @@ NdbEventBuffer::complete_outof_order_gcis()
#endif #endif
m_complete_data.m_data.append_list(&bucket->m_data, start_gci); m_complete_data.m_data.append_list(&bucket->m_data, start_gci);
#ifdef VM_TRACE #ifdef VM_TRACE
ndbout_c(" moved %lld rows -> %lld", bucket->m_data.m_count, ndbout_c(" moved %ld rows -> %ld", (long) bucket->m_data.m_count,
m_complete_data.m_data.m_count); (long) m_complete_data.m_data.m_count);
#else #else
ndbout_c(""); ndbout_c("");
#endif #endif
@ -2180,7 +2181,7 @@ NdbEventBuffer::merge_data(const SubTableData * const sdata,
Ev_t* tp = 0; Ev_t* tp = 0;
int i; int i;
for (i = 0; i < sizeof(ev_t)/sizeof(ev_t[0]); i++) { for (i = 0; (uint) i < sizeof(ev_t)/sizeof(ev_t[0]); i++) {
if (ev_t[i].t1 == t1 && ev_t[i].t2 == t2) { if (ev_t[i].t1 == t1 && ev_t[i].t2 == t2) {
tp = &ev_t[i]; tp = &ev_t[i];
break; break;

View File

@ -64,6 +64,9 @@ NdbIndexOperation::indxInit(const NdbIndexImpl * anIndex,
case(NdbDictionary::Index::OrderedIndex): case(NdbDictionary::Index::OrderedIndex):
setErrorCodeAbort(4003); setErrorCodeAbort(4003);
return -1; return -1;
default:
DBUG_ASSERT(0);
break;
} }
m_theIndex = anIndex; m_theIndex = anIndex;
m_accessTable = anIndex->m_table; m_accessTable = anIndex->m_table;

View File

@ -236,7 +236,7 @@ NdbIndexStat::stat_search(const Area& a, const Uint32* key, Uint32 keylen, Uint3
int int
NdbIndexStat::stat_oldest(const Area& a) NdbIndexStat::stat_oldest(const Area& a)
{ {
Uint32 i, k, m; Uint32 i, k= 0, m;
bool found = false; bool found = false;
m = ~(Uint32)0; // shut up incorrect CC warning m = ~(Uint32)0; // shut up incorrect CC warning
for (i = 0; i < a.m_entries; i++) { for (i = 0; i < a.m_entries; i++) {

View File

@ -1091,53 +1091,61 @@ NdbOperation::branch_col(Uint32 type,
int int
NdbOperation::branch_col_eq(Uint32 ColId, const void * val, Uint32 len, NdbOperation::branch_col_eq(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){ bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_eq %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label)); INT_DEBUG(("branch_col_eq %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len,
nopad, Label));
return branch_col(Interpreter::EQ, ColId, val, len, nopad, Label); return branch_col(Interpreter::EQ, ColId, val, len, nopad, Label);
} }
int int
NdbOperation::branch_col_ne(Uint32 ColId, const void * val, Uint32 len, NdbOperation::branch_col_ne(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){ bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_ne %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label)); INT_DEBUG(("branch_col_ne %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len,
nopad, Label));
return branch_col(Interpreter::NE, ColId, val, len, nopad, Label); return branch_col(Interpreter::NE, ColId, val, len, nopad, Label);
} }
int int
NdbOperation::branch_col_lt(Uint32 ColId, const void * val, Uint32 len, NdbOperation::branch_col_lt(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){ bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_lt %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label)); INT_DEBUG(("branch_col_lt %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len,
nopad, Label));
return branch_col(Interpreter::LT, ColId, val, len, nopad, Label); return branch_col(Interpreter::LT, ColId, val, len, nopad, Label);
} }
int int
NdbOperation::branch_col_le(Uint32 ColId, const void * val, Uint32 len, NdbOperation::branch_col_le(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){ bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_le %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label)); INT_DEBUG(("branch_col_le %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len,
nopad, Label));
return branch_col(Interpreter::LE, ColId, val, len, nopad, Label); return branch_col(Interpreter::LE, ColId, val, len, nopad, Label);
} }
int int
NdbOperation::branch_col_gt(Uint32 ColId, const void * val, Uint32 len, NdbOperation::branch_col_gt(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){ bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_gt %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label)); INT_DEBUG(("branch_col_gt %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len,
nopad, Label));
return branch_col(Interpreter::GT, ColId, val, len, nopad, Label); return branch_col(Interpreter::GT, ColId, val, len, nopad, Label);
} }
int int
NdbOperation::branch_col_ge(Uint32 ColId, const void * val, Uint32 len, NdbOperation::branch_col_ge(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){ bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_ge %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label)); INT_DEBUG(("branch_col_ge %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len,
nopad, Label));
return branch_col(Interpreter::GE, ColId, val, len, nopad, Label); return branch_col(Interpreter::GE, ColId, val, len, nopad, Label);
} }
int int
NdbOperation::branch_col_like(Uint32 ColId, const void * val, Uint32 len, NdbOperation::branch_col_like(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){ bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_like %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label)); INT_DEBUG(("branch_col_like %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len,
nopad, Label));
return branch_col(Interpreter::LIKE, ColId, val, len, nopad, Label); return branch_col(Interpreter::LIKE, ColId, val, len, nopad, Label);
} }
int int
NdbOperation::branch_col_notlike(Uint32 ColId, const void * val, Uint32 len, NdbOperation::branch_col_notlike(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){ bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_notlike %u %.*s(%u,%d) -> %u", ColId,len,val,len,nopad,Label)); INT_DEBUG(("branch_col_notlike %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len,
nopad, Label));
return branch_col(Interpreter::NOT_LIKE, ColId, val, len, nopad, Label); return branch_col(Interpreter::NOT_LIKE, ColId, val, len, nopad, Label);
} }

View File

@ -372,7 +372,12 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r)
j = length; j = length;
} }
break; break;
unknown:
case NdbDictionary::Column::Undefined:
case NdbDictionary::Column::Mediumint:
case NdbDictionary::Column::Mediumunsigned:
case NdbDictionary::Column::Longvarbinary:
unknown:
//default: /* no print functions for the rest, just print type */ //default: /* no print functions for the rest, just print type */
out << (int) r.getType(); out << (int) r.getType();
j = length; j = length;

View File

@ -181,7 +181,8 @@ NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
} }
bool rangeScan = false; bool rangeScan = false;
if (m_accessTable->m_indexType == NdbDictionary::Index::OrderedIndex) if ( (int) m_accessTable->m_indexType ==
(int) NdbDictionary::Index::OrderedIndex)
{ {
if (m_currentTable == m_accessTable){ if (m_currentTable == m_accessTable){
// Old way of scanning indexes, should not be allowed // Old way of scanning indexes, should not be allowed
@ -588,7 +589,7 @@ err4:
theNdbCon->theTransactionIsStarted = false; theNdbCon->theTransactionIsStarted = false;
theNdbCon->theReleaseOnClose = true; theNdbCon->theReleaseOnClose = true;
if(DEBUG_NEXT_RESULT) ndbout_c("return -1", retVal); if(DEBUG_NEXT_RESULT) ndbout_c("return %d", retVal);
return -1; return -1;
} }

View File

@ -84,7 +84,7 @@ NdbObjectIdMap::map(void * object){
// unlock(); // unlock();
DBUG_PRINT("info",("NdbObjectIdMap::map(0x%x) %u", object, ff<<2)); DBUG_PRINT("info",("NdbObjectIdMap::map(0x%lx) %u", (long) object, ff<<2));
return ff<<2; return ff<<2;
} }
@ -102,14 +102,16 @@ NdbObjectIdMap::unmap(Uint32 id, void *object){
m_map[i].m_next = m_firstFree; m_map[i].m_next = m_firstFree;
m_firstFree = i; m_firstFree = i;
} else { } else {
ndbout_c("Error: NdbObjectIdMap::::unmap(%u, 0x%x) obj=0x%x", id, object, obj); ndbout_c("Error: NdbObjectIdMap::::unmap(%u, 0x%lx) obj=0x%lx",
DBUG_PRINT("error",("NdbObjectIdMap::unmap(%u, 0x%x) obj=0x%x", id, object, obj)); id, (long) object, (long) obj);
DBUG_PRINT("error",("NdbObjectIdMap::unmap(%u, 0x%lx) obj=0x%lx",
id, (long) object, (long) obj));
return 0; return 0;
} }
// unlock(); // unlock();
DBUG_PRINT("info",("NdbObjectIdMap::unmap(%u) obj=0x%x", id, obj)); DBUG_PRINT("info",("NdbObjectIdMap::unmap(%u) obj=0x%lx", id, (long) obj));
return obj; return obj;
} }

View File

@ -131,7 +131,7 @@ int desc_logfilegroup(Ndb *myndb, char* name)
assert(dict); assert(dict);
NdbDictionary::LogfileGroup lfg= dict->getLogfileGroup(name); NdbDictionary::LogfileGroup lfg= dict->getLogfileGroup(name);
NdbError err= dict->getNdbError(); NdbError err= dict->getNdbError();
if(err.classification!=ndberror_cl_none) if( (int) err.classification != (int) ndberror_cl_none)
return 0; return 0;
ndbout << "Type: LogfileGroup" << endl; ndbout << "Type: LogfileGroup" << endl;
@ -153,7 +153,7 @@ int desc_tablespace(Ndb *myndb, char* name)
assert(dict); assert(dict);
NdbDictionary::Tablespace ts= dict->getTablespace(name); NdbDictionary::Tablespace ts= dict->getTablespace(name);
NdbError err= dict->getNdbError(); NdbError err= dict->getNdbError();
if(err.classification!=ndberror_cl_none) if ((int) err.classification != (int) ndberror_cl_none)
return 0; return 0;
ndbout << "Type: Tablespace" << endl; ndbout << "Type: Tablespace" << endl;
@ -175,11 +175,11 @@ int desc_undofile(Ndb_cluster_connection &con, Ndb *myndb, char* name)
con.init_get_next_node(iter); con.init_get_next_node(iter);
while(id= con.get_next_node(iter)) while ((id= con.get_next_node(iter)))
{ {
NdbDictionary::Undofile uf= dict->getUndofile(0, name); NdbDictionary::Undofile uf= dict->getUndofile(0, name);
NdbError err= dict->getNdbError(); NdbError err= dict->getNdbError();
if(err.classification!=ndberror_cl_none) if ((int) err.classification != (int) ndberror_cl_none)
return 0; return 0;
ndbout << "Type: Undofile" << endl; ndbout << "Type: Undofile" << endl;
@ -211,11 +211,11 @@ int desc_datafile(Ndb_cluster_connection &con, Ndb *myndb, char* name)
con.init_get_next_node(iter); con.init_get_next_node(iter);
while(id= con.get_next_node(iter)) while ((id= con.get_next_node(iter)))
{ {
NdbDictionary::Datafile df= dict->getDatafile(id, name); NdbDictionary::Datafile df= dict->getDatafile(id, name);
NdbError err= dict->getNdbError(); NdbError err= dict->getNdbError();
if(err.classification!=ndberror_cl_none) if ((int) err.classification != (int) ndberror_cl_none)
return 0; return 0;
ndbout << "Type: Datafile" << endl; ndbout << "Type: Datafile" << endl;

View File

@ -300,7 +300,13 @@ RestoreMetaData::markSysTables()
strcmp(tableName, "NDB$EVENTS_0") == 0 || strcmp(tableName, "NDB$EVENTS_0") == 0 ||
strcmp(tableName, "sys/def/SYSTAB_0") == 0 || strcmp(tableName, "sys/def/SYSTAB_0") == 0 ||
strcmp(tableName, "sys/def/NDB$EVENTS_0") == 0 || strcmp(tableName, "sys/def/NDB$EVENTS_0") == 0 ||
/*
The following is for old MySQL versions,
before we changed the database name of the tables from
"cluster_replication" -> "cluster" -> "mysql"
*/
strcmp(tableName, "cluster_replication/def/" NDB_APPLY_TABLE) == 0 || strcmp(tableName, "cluster_replication/def/" NDB_APPLY_TABLE) == 0 ||
strcmp(tableName, "cluster/def/" NDB_APPLY_TABLE) == 0 ||
strcmp(tableName, NDB_REP_DB "/def/" NDB_APPLY_TABLE) == 0 || strcmp(tableName, NDB_REP_DB "/def/" NDB_APPLY_TABLE) == 0 ||
strcmp(tableName, NDB_REP_DB "/def/" NDB_SCHEMA_TABLE)== 0 ) strcmp(tableName, NDB_REP_DB "/def/" NDB_SCHEMA_TABLE)== 0 )
table->isSysTable = true; table->isSysTable = true;

View File

@ -494,7 +494,7 @@ BackupRestore::object(Uint32 type, const void * ptr)
NdbDictionary::Tablespace curr = dict->getTablespace(old.getName()); NdbDictionary::Tablespace curr = dict->getTablespace(old.getName());
NdbError errobj = dict->getNdbError(); NdbError errobj = dict->getNdbError();
if(errobj.classification == ndberror_cl_none) if ((int) errobj.classification == (int) ndberror_cl_none)
{ {
NdbDictionary::Tablespace* currptr = new NdbDictionary::Tablespace(curr); NdbDictionary::Tablespace* currptr = new NdbDictionary::Tablespace(curr);
NdbDictionary::Tablespace * null = 0; NdbDictionary::Tablespace * null = 0;
@ -533,7 +533,7 @@ BackupRestore::object(Uint32 type, const void * ptr)
NdbDictionary::LogfileGroup curr = dict->getLogfileGroup(old.getName()); NdbDictionary::LogfileGroup curr = dict->getLogfileGroup(old.getName());
NdbError errobj = dict->getNdbError(); NdbError errobj = dict->getNdbError();
if(errobj.classification == ndberror_cl_none) if ((int) errobj.classification == (int) ndberror_cl_none)
{ {
NdbDictionary::LogfileGroup* currptr = NdbDictionary::LogfileGroup* currptr =
new NdbDictionary::LogfileGroup(curr); new NdbDictionary::LogfileGroup(curr);
@ -680,7 +680,7 @@ BackupRestore::table(const TableS & table){
return true; return true;
const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* table.m_dictTable); const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* table.m_dictTable);
if(tmptab.m_indexType != NdbDictionary::Index::Undefined){ if ((int) tmptab.m_indexType != (int) NdbDictionary::Index::Undefined){
m_indexes.push_back(table.m_dictTable); m_indexes.push_back(table.m_dictTable);
return true; return true;
} }

View File

@ -7,7 +7,7 @@
int main() { int main() {
plan(5); plan(5);
ok(1 == 1, "testing basic functions"); ok(1 == 1, "testing basic functions");
ok(2 == 2, ""); ok(2 == 2, " ");
ok(3 == 3, NULL); ok(3 == 3, NULL);
if (1 == 1) if (1 == 1)
skip(2, "Sensa fragoli"); skip(2, "Sensa fragoli");

View File

@ -235,6 +235,7 @@ skip(int how_many, char const *const fmt, ...)
while (how_many-- > 0) while (how_many-- > 0)
{ {
va_list ap; va_list ap;
memset((char*) &ap, 0, sizeof(ap)); /* Keep compiler happy */
vemit_tap(1, NULL, ap); vemit_tap(1, NULL, ap);
emit_dir("skip", reason); emit_dir("skip", reason);
emit_endl(); emit_endl();