Fixed some compiler warnings found when compiling for windows.

Changed rows_read and rows_sent status variables to be longlong (to avoid compiler warnings)


sql/item_func.cc:
  Fixed wrong usage of alias
sql/item_subselect.cc:
  Changed buffer size to ulonglong to be able detect buffers bigger than size_t
sql/item_subselect.h:
  Changed buffer size to ulonglong to be able detect buffers bigger than size_t
sql/multi_range_read.cc:
  Fixed compiler warning by using correct type for function argument
sql/mysqld.cc:
  Changed rows_read and rows_sent status variables to be longlong
sql/opt_subselect.h:
  Fixed compiler warning by using correct type for function argument
sql/sql_class.cc:
  Changed rows_read and rows_sent status variables to be longlong
sql/sql_class.h:
  Changed rows_read and rows_sent status variables to be longlong
  Changed max_nulls_in_row to uint as this is number of columns in row.
  This fixed some compiler warnings.
sql/sql_select.cc:
  Added casts to avoid compiler warnings
storage/heap/ha_heap.cc:
  Initilize different types separate
storage/oqgraph/ha_oqgraph.cc:
  Fixed argument to store(longlong) to avoid compiler warnings
This commit is contained in:
Michael Widenius 2010-11-30 01:27:14 +02:00
parent b9589e2ed7
commit ab5e4eefd5
11 changed files with 35 additions and 26 deletions

View File

@ -5786,7 +5786,7 @@ Item_func_sp::cleanup()
sp_result_field= NULL; sp_result_field= NULL;
} }
m_sp= NULL; m_sp= NULL;
dummy_table->alias= NULL; dummy_table->alias.free();
Item_func::cleanup(); Item_func::cleanup();
} }

View File

@ -3651,7 +3651,7 @@ subselect_hash_sj_engine::choose_partial_match_strategy(
bool has_non_null_key, bool has_covering_null_row, bool has_non_null_key, bool has_covering_null_row,
MY_BITMAP *partial_match_key_parts) MY_BITMAP *partial_match_key_parts)
{ {
size_t pm_buff_size; ulonglong pm_buff_size;
DBUG_ASSERT(strategy == PARTIAL_MATCH); DBUG_ASSERT(strategy == PARTIAL_MATCH);
/* /*
@ -3716,11 +3716,12 @@ subselect_hash_sj_engine::choose_partial_match_strategy(
matching via merging is not applicable. matching via merging is not applicable.
*/ */
size_t subselect_hash_sj_engine::rowid_merge_buff_size( ulonglong subselect_hash_sj_engine::rowid_merge_buff_size(
bool has_non_null_key, bool has_covering_null_row, bool has_non_null_key, bool has_covering_null_row,
MY_BITMAP *partial_match_key_parts) MY_BITMAP *partial_match_key_parts)
{ {
size_t buff_size; /* Total size of all buffers used by partial matching. */ /* Total size of all buffers used by partial matching. */
ulonglong buff_size;
ha_rows row_count= tmp_table->file->stats.records; ha_rows row_count= tmp_table->file->stats.records;
uint rowid_length= tmp_table->file->ref_length; uint rowid_length= tmp_table->file->ref_length;
select_materialize_with_stats *result_sink= select_materialize_with_stats *result_sink=

View File

@ -800,9 +800,9 @@ protected:
protected: protected:
exec_strategy get_strategy_using_schema(); exec_strategy get_strategy_using_schema();
exec_strategy get_strategy_using_data(); exec_strategy get_strategy_using_data();
size_t rowid_merge_buff_size(bool has_non_null_key, ulonglong rowid_merge_buff_size(bool has_non_null_key,
bool has_covering_null_row, bool has_covering_null_row,
MY_BITMAP *partial_match_key_parts); MY_BITMAP *partial_match_key_parts);
void choose_partial_match_strategy(bool has_non_null_key, void choose_partial_match_strategy(bool has_non_null_key,
bool has_covering_null_row, bool has_covering_null_row,
MY_BITMAP *partial_match_key_parts); MY_BITMAP *partial_match_key_parts);

View File

@ -820,7 +820,7 @@ bool DsMrr_impl::get_disk_sweep_mrr_cost(uint keynr, ha_rows rows, uint flags,
cost->mem_cost= (double)rows_in_last_step * elem_size; cost->mem_cost= (double)rows_in_last_step * elem_size;
/* Total cost of all index accesses */ /* Total cost of all index accesses */
index_read_cost= h->keyread_time(keynr, 1, (double)rows); index_read_cost= h->keyread_time(keynr, 1, rows);
cost->add_io(index_read_cost, 1 /* Random seeks */); cost->add_io(index_read_cost, 1 /* Random seeks */);
return FALSE; return FALSE;
} }

View File

@ -8086,8 +8086,8 @@ SHOW_VAR status_vars[]= {
{"Opened_tables", (char*) offsetof(STATUS_VAR, opened_tables), SHOW_LONG_STATUS}, {"Opened_tables", (char*) offsetof(STATUS_VAR, opened_tables), SHOW_LONG_STATUS},
{"Opened_table_definitions", (char*) offsetof(STATUS_VAR, opened_shares), SHOW_LONG_STATUS}, {"Opened_table_definitions", (char*) offsetof(STATUS_VAR, opened_shares), SHOW_LONG_STATUS},
{"Prepared_stmt_count", (char*) &show_prepared_stmt_count, SHOW_FUNC}, {"Prepared_stmt_count", (char*) &show_prepared_stmt_count, SHOW_FUNC},
{"Rows_sent", (char*) offsetof(STATUS_VAR, rows_sent), SHOW_LONG_STATUS}, {"Rows_sent", (char*) offsetof(STATUS_VAR, rows_sent), SHOW_LONGLONG_STATUS},
{"Rows_read", (char*) offsetof(STATUS_VAR, rows_read), SHOW_LONG_STATUS}, {"Rows_read", (char*) offsetof(STATUS_VAR, rows_read), SHOW_LONGLONG_STATUS},
#ifdef HAVE_QUERY_CACHE #ifdef HAVE_QUERY_CACHE
{"Qcache_free_blocks", (char*) &query_cache.free_memory_blocks, SHOW_LONG_NOFLUSH}, {"Qcache_free_blocks", (char*) &query_cache.free_memory_blocks, SHOW_LONG_NOFLUSH},
{"Qcache_free_memory", (char*) &query_cache.free_memory, SHOW_LONG_NOFLUSH}, {"Qcache_free_memory", (char*) &query_cache.free_memory, SHOW_LONG_NOFLUSH},

View File

@ -199,7 +199,8 @@ public:
double records= rows2double(s->table->file->stats.records); double records= rows2double(s->table->file->stats.records);
/* The cost is entire index scan cost (divided by 2) */ /* The cost is entire index scan cost (divided by 2) */
double read_time= s->table->file->keyread_time(key, 1, records); double read_time= s->table->file->keyread_time(key, 1,
(ha_rows) records);
/* /*
Now find out how many different keys we will get (for now we Now find out how many different keys we will get (for now we

View File

@ -1189,6 +1189,8 @@ void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var)
/* Handle the not ulong variables. See end of system_status_var */ /* Handle the not ulong variables. See end of system_status_var */
to_var->bytes_received= from_var->bytes_received; to_var->bytes_received= from_var->bytes_received;
to_var->bytes_sent+= from_var->bytes_sent; to_var->bytes_sent+= from_var->bytes_sent;
to_var->rows_read+= from_var->rows_read;
to_var->rows_sent+= from_var->rows_sent;
to_var->binlog_bytes_written= from_var->binlog_bytes_written; to_var->binlog_bytes_written= from_var->binlog_bytes_written;
to_var->cpu_time+= from_var->cpu_time; to_var->cpu_time+= from_var->cpu_time;
to_var->busy_time+= from_var->busy_time; to_var->busy_time+= from_var->busy_time;

View File

@ -557,8 +557,6 @@ typedef struct system_status_var
ulong select_range_count; ulong select_range_count;
ulong select_range_check_count; ulong select_range_check_count;
ulong select_scan_count; ulong select_scan_count;
ulong rows_read;
ulong rows_sent;
ulong long_query_count; ulong long_query_count;
ulong filesort_merge_passes; ulong filesort_merge_passes;
ulong filesort_range_count; ulong filesort_range_count;
@ -588,6 +586,8 @@ typedef struct system_status_var
ulonglong bytes_received; ulonglong bytes_received;
ulonglong bytes_sent; ulonglong bytes_sent;
ulonglong binlog_bytes_written; ulonglong binlog_bytes_written;
ulonglong rows_read;
ulonglong rows_sent;
double last_query_cost; double last_query_cost;
double cpu_time, busy_time; double cpu_time, busy_time;
} STATUS_VAR; } STATUS_VAR;
@ -3042,7 +3042,7 @@ protected:
The number of columns in the biggest sub-row that consists of only The number of columns in the biggest sub-row that consists of only
NULL values. NULL values.
*/ */
ha_rows max_nulls_in_row; uint max_nulls_in_row;
/* /*
Count of rows writtent to the temp table. This is redundant as it is Count of rows writtent to the temp table. This is redundant as it is
already stored in handler::stats.records, however that one is relatively already stored in handler::stats.records, however that one is relatively
@ -3076,7 +3076,7 @@ public:
DBUG_ASSERT(idx < table->s->fields); DBUG_ASSERT(idx < table->s->fields);
return col_stat[idx].min_null_row; return col_stat[idx].min_null_row;
} }
ha_rows get_max_nulls_in_row() { return max_nulls_in_row; } uint get_max_nulls_in_row() { return max_nulls_in_row; }
}; };

View File

@ -4616,9 +4616,10 @@ best_access_path(JOIN *join,
tmp= records; tmp= records;
set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key);
if (table->covering_keys.is_set(key)) if (table->covering_keys.is_set(key))
tmp= table->file->keyread_time(key, 1, tmp); tmp= table->file->keyread_time(key, 1, (ha_rows) tmp);
else else
tmp= table->file->read_time(key, 1, min(tmp,s->worst_seeks)-1); tmp= table->file->read_time(key, 1,
(ha_rows) min(tmp,s->worst_seeks)-1);
tmp*= record_count; tmp*= record_count;
} }
} }
@ -4779,9 +4780,10 @@ best_access_path(JOIN *join,
/* Limit the number of matched rows */ /* Limit the number of matched rows */
set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key);
if (table->covering_keys.is_set(key)) if (table->covering_keys.is_set(key))
tmp= table->file->keyread_time(key, 1, tmp); tmp= table->file->keyread_time(key, 1, (ha_rows) tmp);
else else
tmp= table->file->read_time(key, 1, min(tmp,s->worst_seeks)-1); tmp= table->file->read_time(key, 1,
(ha_rows) min(tmp,s->worst_seeks)-1);
tmp*= record_count; tmp*= record_count;
} }
else else
@ -18426,8 +18428,8 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
item_list.push_back(new Item_string("func", strlen("func"), cs)); item_list.push_back(new Item_string("func", strlen("func"), cs));
} }
/* rows */ /* rows */
ha_rows rows= (sj_strategy == SJ_OPT_MATERIALIZE_SCAN)? ha_rows rows= (ha_rows) ((sj_strategy == SJ_OPT_MATERIALIZE_SCAN)?
tab->emb_sj_nest->sj_mat_info->rows : 1; tab->emb_sj_nest->sj_mat_info->rows : 1.0);
item_list.push_back(new Item_int((longlong)rows, item_list.push_back(new Item_int((longlong)rows,
MY_INT64_NUM_DECIMAL_DIGITS)); MY_INT64_NUM_DECIMAL_DIGITS));
/* filtered */ /* filtered */

View File

@ -695,7 +695,10 @@ int ha_heap::create(const char *name, TABLE *table_arg,
(uchar*) table_arg->record[0]); (uchar*) table_arg->record[0]);
} }
else else
seg->bit_length= seg->bit_start= seg->bit_pos= 0; {
seg->bit_length= seg->bit_start= 0;
seg->bit_pos= 0;
}
} }
} }
mem_per_row+= MY_ALIGN(share->reclength + 1, sizeof(char*)); mem_per_row+= MY_ALIGN(share->reclength + 1, sizeof(char*));

View File

@ -754,19 +754,19 @@ int ha_oqgraph::fill_record(byte *record, const open_query::row &row)
if (row.orig_indicator) if (row.orig_indicator)
{ {
field[1]->set_notnull(); field[1]->set_notnull();
field[1]->store((longlong) row.orig); field[1]->store((longlong) row.orig, 0);
} }
if (row.dest_indicator) if (row.dest_indicator)
{ {
field[2]->set_notnull(); field[2]->set_notnull();
field[2]->store((longlong) row.dest); field[2]->store((longlong) row.dest, 0);
} }
if (row.weight_indicator) if (row.weight_indicator)
{ {
field[3]->set_notnull(); field[3]->set_notnull();
field[3]->store((double) row.weight); field[3]->store((double) row.weight, 0);
} }
if (row.seq_indicator) if (row.seq_indicator)
@ -778,7 +778,7 @@ int ha_oqgraph::fill_record(byte *record, const open_query::row &row)
if (row.link_indicator) if (row.link_indicator)
{ {
field[5]->set_notnull(); field[5]->set_notnull();
field[5]->store((longlong) row.link); field[5]->store((longlong) row.link, 0);
} }
if (ptrdiff) if (ptrdiff)