Fix numerous warnings introduced in the last pushes on Windows
This commit is contained in:
parent
e906ffcf9c
commit
019256c9fc
@ -4382,8 +4382,8 @@ bool Ordered_key::alloc_keys_buffers()
|
||||
{
|
||||
DBUG_ASSERT(key_buff_elements > 0);
|
||||
|
||||
if (!(key_buff= (rownum_t*) my_malloc(key_buff_elements * sizeof(rownum_t),
|
||||
MYF(MY_WME))))
|
||||
if (!(key_buff= (rownum_t*) my_malloc((size_t)(key_buff_elements *
|
||||
sizeof(rownum_t)), MYF(MY_WME))))
|
||||
return TRUE;
|
||||
|
||||
/*
|
||||
@ -4392,7 +4392,7 @@ bool Ordered_key::alloc_keys_buffers()
|
||||
lookup offset.
|
||||
*/
|
||||
/* Notice that max_null_row is max array index, we need count, so +1. */
|
||||
if (bitmap_init(&null_key, NULL, max_null_row + 1, FALSE))
|
||||
if (bitmap_init(&null_key, NULL, (uint)(max_null_row + 1), FALSE))
|
||||
return TRUE;
|
||||
|
||||
cur_key_idx= HA_POS_ERROR;
|
||||
@ -4456,7 +4456,7 @@ Ordered_key::cmp_keys_by_row_data_and_rownum(Ordered_key *key,
|
||||
|
||||
void Ordered_key::sort_keys()
|
||||
{
|
||||
my_qsort2(key_buff, key_buff_elements, sizeof(rownum_t),
|
||||
my_qsort2(key_buff, (size_t) key_buff_elements, sizeof(rownum_t),
|
||||
(qsort2_cmp) &cmp_keys_by_row_data_and_rownum, (void*) this);
|
||||
/* Invalidate the current row position. */
|
||||
cur_key_idx= HA_POS_ERROR;
|
||||
@ -4772,8 +4772,8 @@ subselect_rowid_merge_engine::init(MY_BITMAP *non_null_key_parts,
|
||||
*/
|
||||
if (!(merge_keys= (Ordered_key**) thd->alloc(keys_count *
|
||||
sizeof(Ordered_key*))) ||
|
||||
!(row_num_to_rowid= (uchar*) my_malloc(row_count * rowid_length *
|
||||
sizeof(uchar), MYF(MY_WME))))
|
||||
!(row_num_to_rowid= (uchar*) my_malloc((size_t)(row_count * rowid_length),
|
||||
MYF(MY_WME))))
|
||||
return TRUE;
|
||||
|
||||
/* Create the only non-NULL key if there is any. */
|
||||
|
@ -1009,7 +1009,7 @@ public:
|
||||
|
||||
void set_null(rownum_t row_num)
|
||||
{
|
||||
bitmap_set_bit(&null_key, row_num);
|
||||
bitmap_set_bit(&null_key, (uint)row_num);
|
||||
}
|
||||
bool is_null(rownum_t row_num)
|
||||
{
|
||||
@ -1025,7 +1025,7 @@ public:
|
||||
}
|
||||
if (row_num > max_null_row || row_num < min_null_row)
|
||||
return FALSE;
|
||||
return bitmap_is_set(&null_key, row_num);
|
||||
return bitmap_is_set(&null_key, (uint)row_num);
|
||||
}
|
||||
void print(String *str);
|
||||
};
|
||||
|
@ -1127,8 +1127,8 @@ bool DsMrr_impl::setup_buffer_sharing(uint key_size_in_keybuf,
|
||||
|
||||
if (bytes_for_rowids < (ptrdiff_t)rowid_buf_elem_size + 1)
|
||||
{
|
||||
ptrdiff_t add= rowid_buf_elem_size + 1 - bytes_for_rowids;
|
||||
bytes_for_rowids= rowid_buf_elem_size + 1;
|
||||
ptrdiff_t add= (ptrdiff_t)(rowid_buf_elem_size + 1 - bytes_for_rowids);
|
||||
bytes_for_rowids= (ptrdiff_t)rowid_buf_elem_size + 1;
|
||||
bytes_for_keys -= add;
|
||||
}
|
||||
|
||||
@ -1138,7 +1138,7 @@ bool DsMrr_impl::setup_buffer_sharing(uint key_size_in_keybuf,
|
||||
key_buffer->set_buffer_space(rowid_buffer_end, full_buf_end);
|
||||
|
||||
if (!key_buffer->have_space_for(key_buff_elem_size) ||
|
||||
!rowid_buffer.have_space_for(rowid_buf_elem_size))
|
||||
!rowid_buffer.have_space_for((size_t)rowid_buf_elem_size))
|
||||
return TRUE; /* Failed to provide minimum space for one of the buffers */
|
||||
|
||||
return FALSE;
|
||||
|
@ -4924,7 +4924,7 @@ ha_rows get_table_cardinality_for_index_intersect(TABLE *table)
|
||||
{
|
||||
ha_rows d;
|
||||
double q;
|
||||
for (q= table->file->stats.records, d= 1 ; q >= 10; q/= 10, d*= 10 ) ;
|
||||
for (q= (double)table->file->stats.records, d= 1 ; q >= 10; q/= 10, d*= 10 ) ;
|
||||
return (ha_rows) (floor(q+0.5) * d);
|
||||
}
|
||||
}
|
||||
@ -5090,7 +5090,7 @@ bool prepare_search_best_index_intersect(PARAM *param,
|
||||
return TRUE;
|
||||
|
||||
size_t calc_cost_buff_size=
|
||||
Unique::get_cost_calc_buff_size(records_in_scans,
|
||||
Unique::get_cost_calc_buff_size((size_t)records_in_scans,
|
||||
common->key_size,
|
||||
common->max_memory_size);
|
||||
if (!(common->buff_elems= (uint *) alloc_root(param->mem_root,
|
||||
@ -5432,7 +5432,7 @@ bool check_index_intersect_extension(PARTIAL_INDEX_INTERSECT_INFO *curr,
|
||||
ulonglong max_memory_size= common_info->max_memory_size;
|
||||
|
||||
records_sent_to_unique+= ext_index_scan_records;
|
||||
cost= Unique::get_use_cost(buff_elems, records_sent_to_unique, key_size,
|
||||
cost= Unique::get_use_cost(buff_elems, (size_t) records_sent_to_unique, key_size,
|
||||
max_memory_size, compare_factor, TRUE,
|
||||
&next->in_memory);
|
||||
if (records_filtered_out_by_cpk)
|
||||
@ -5442,7 +5442,7 @@ bool check_index_intersect_extension(PARTIAL_INDEX_INTERSECT_INFO *curr,
|
||||
double cost2;
|
||||
bool in_memory2;
|
||||
ha_rows records2= records_sent_to_unique-records_filtered_out_by_cpk;
|
||||
cost2= Unique::get_use_cost(buff_elems, records2, key_size,
|
||||
cost2= Unique::get_use_cost(buff_elems, (size_t) records2, key_size,
|
||||
max_memory_size, compare_factor, TRUE,
|
||||
&in_memory2);
|
||||
cost2+= get_cpk_filter_cost(ext_index_scan_records, common_info->cpk_scan,
|
||||
|
@ -3315,15 +3315,15 @@ public:
|
||||
bool get(TABLE *table);
|
||||
|
||||
/* Cost of searching for an element in the tree */
|
||||
inline static double get_search_cost(uint tree_elems, uint compare_factor)
|
||||
inline static double get_search_cost(ulonglong tree_elems, uint compare_factor)
|
||||
{
|
||||
return log((double) tree_elems) / (compare_factor * M_LN2);
|
||||
}
|
||||
|
||||
static double get_use_cost(uint *buffer, uint nkeys, uint key_size,
|
||||
static double get_use_cost(uint *buffer, size_t nkeys, uint key_size,
|
||||
ulonglong max_in_memory_size, uint compare_factor,
|
||||
bool intersect_fl, bool *in_memory);
|
||||
inline static int get_cost_calc_buff_size(ulong nkeys, uint key_size,
|
||||
inline static int get_cost_calc_buff_size(size_t nkeys, uint key_size,
|
||||
ulonglong max_in_memory_size)
|
||||
{
|
||||
register ulonglong max_elems_in_tree=
|
||||
|
@ -736,12 +736,12 @@ ulong JOIN_CACHE::get_min_join_buffer_size()
|
||||
{
|
||||
if (!min_buff_size)
|
||||
{
|
||||
ulong len= 0;
|
||||
size_t len= 0;
|
||||
for (JOIN_TAB *tab= join_tab-tables; tab < join_tab; tab++)
|
||||
len+= tab->get_max_used_fieldlength();
|
||||
len+= get_record_max_affix_length() + get_max_key_addon_space_per_record();
|
||||
ulong min_sz= len*min_records;
|
||||
ulong add_sz= 0;
|
||||
size_t min_sz= len*min_records;
|
||||
size_t add_sz= 0;
|
||||
for (uint i=0; i < min_records; i++)
|
||||
add_sz+= join_tab_scan->aux_buffer_incr(i+1);
|
||||
avg_aux_buffer_incr= add_sz/min_records;
|
||||
@ -787,9 +787,9 @@ ulong JOIN_CACHE::get_max_join_buffer_size(bool optimize_buff_size)
|
||||
{
|
||||
if (!max_buff_size)
|
||||
{
|
||||
ulong max_sz;
|
||||
ulong min_sz= get_min_join_buffer_size();
|
||||
ulong len= 0;
|
||||
size_t max_sz;
|
||||
size_t min_sz= get_min_join_buffer_size();
|
||||
size_t len= 0;
|
||||
for (JOIN_TAB *tab= join_tab-tables; tab < join_tab; tab++)
|
||||
len+= tab->get_used_fieldlength();
|
||||
len+= get_record_max_affix_length();
|
||||
@ -797,7 +797,7 @@ ulong JOIN_CACHE::get_max_join_buffer_size(bool optimize_buff_size)
|
||||
len+= get_max_key_addon_space_per_record() + avg_aux_buffer_incr;
|
||||
space_per_record= len;
|
||||
|
||||
ulong limit_sz= join->thd->variables.join_buff_size;
|
||||
size_t limit_sz= join->thd->variables.join_buff_size;
|
||||
if (join_tab->join_buffer_size_limit)
|
||||
set_if_smaller(limit_sz, join_tab->join_buffer_size_limit);
|
||||
if (!optimize_buff_size)
|
||||
@ -860,8 +860,8 @@ int JOIN_CACHE::alloc_buffer()
|
||||
min_buff_size= 0;
|
||||
max_buff_size= 0;
|
||||
min_records= 1;
|
||||
max_records= partial_join_cardinality <= join_buff_space_limit ?
|
||||
(ulonglong) partial_join_cardinality : join_buff_space_limit;
|
||||
max_records= (size_t) (partial_join_cardinality <= join_buff_space_limit ?
|
||||
(ulonglong) partial_join_cardinality : join_buff_space_limit);
|
||||
set_if_bigger(max_records, 10);
|
||||
min_buff_size= get_min_join_buffer_size();
|
||||
buff_size= get_max_join_buffer_size(optimize_buff_size);
|
||||
@ -931,10 +931,10 @@ fail:
|
||||
|
||||
bool JOIN_CACHE::shrink_join_buffer_in_ratio(ulonglong n, ulonglong d)
|
||||
{
|
||||
ulonglong next_buff_size;
|
||||
size_t next_buff_size;
|
||||
if (n < d)
|
||||
return FALSE;
|
||||
next_buff_size= (ulonglong) ((double) buff_size / n * d);
|
||||
next_buff_size= (size_t) ((double) buff_size / n * d);
|
||||
set_if_bigger(next_buff_size, min_buff_size);
|
||||
buff_size= next_buff_size;
|
||||
return realloc_buffer();
|
||||
@ -2407,7 +2407,7 @@ inline bool JOIN_CACHE::check_match(uchar *rec_ptr)
|
||||
|
||||
enum_nested_loop_state JOIN_CACHE::join_null_complements(bool skip_last)
|
||||
{
|
||||
uint cnt;
|
||||
ulonglong cnt;
|
||||
enum_nested_loop_state rc= NESTED_LOOP_OK;
|
||||
bool is_first_inner= join_tab == join_tab->first_unmatched;
|
||||
|
||||
|
@ -216,13 +216,13 @@ protected:
|
||||
The expected length of a record in the join buffer together with
|
||||
all prefixes and postfixes
|
||||
*/
|
||||
ulong avg_record_length;
|
||||
size_t avg_record_length;
|
||||
|
||||
/* The expected size of the space per record in the auxiliary buffer */
|
||||
ulong avg_aux_buffer_incr;
|
||||
size_t avg_aux_buffer_incr;
|
||||
|
||||
/* Expected join buffer space used for one record */
|
||||
ulong space_per_record;
|
||||
size_t space_per_record;
|
||||
|
||||
/* Pointer to the beginning of the join buffer */
|
||||
uchar *buff;
|
||||
@ -230,26 +230,26 @@ protected:
|
||||
Size of the entire memory allocated for the join buffer.
|
||||
Part of this memory may be reserved for the auxiliary buffer.
|
||||
*/
|
||||
ulong buff_size;
|
||||
size_t buff_size;
|
||||
/* The minimal join buffer size when join buffer still makes sense to use */
|
||||
ulong min_buff_size;
|
||||
size_t min_buff_size;
|
||||
/* The maximum expected size if the join buffer to be used */
|
||||
ulong max_buff_size;
|
||||
size_t max_buff_size;
|
||||
/* Size of the auxiliary buffer */
|
||||
ulong aux_buff_size;
|
||||
size_t aux_buff_size;
|
||||
|
||||
/* The number of records put into the join buffer */
|
||||
ulong records;
|
||||
size_t records;
|
||||
/*
|
||||
The number of records in the fully refilled join buffer of
|
||||
the minimal size equal to min_buff_size
|
||||
*/
|
||||
ulong min_records;
|
||||
size_t min_records;
|
||||
/*
|
||||
The maximum expected number of records to be put in the join buffer
|
||||
at one refill
|
||||
*/
|
||||
ulong max_records;
|
||||
size_t max_records;
|
||||
|
||||
/*
|
||||
Pointer to the current position in the join buffer.
|
||||
@ -578,7 +578,7 @@ public:
|
||||
/* Get the current size of the cache join buffer */
|
||||
ulong get_join_buffer_size() { return buff_size; }
|
||||
/* Set the size of the cache join buffer to a new value */
|
||||
void set_join_buffer_size(ulong sz) { buff_size= sz; }
|
||||
void set_join_buffer_size(size_t sz) { buff_size= sz; }
|
||||
|
||||
/* Get the minimum possible size of the cache join buffer */
|
||||
virtual ulong get_min_join_buffer_size();
|
||||
@ -1259,7 +1259,7 @@ protected:
|
||||
Get the number of ranges in the cache buffer passed to the MRR
|
||||
interface. For each record its own range is passed.
|
||||
*/
|
||||
uint get_number_of_ranges_for_mrr() { return records; }
|
||||
uint get_number_of_ranges_for_mrr() { return (uint)records; }
|
||||
|
||||
/*
|
||||
Setup the MRR buffer as the space between the last record put
|
||||
|
@ -1724,7 +1724,7 @@ bool JOIN::shrink_join_buffers(JOIN_TAB *jt,
|
||||
DBUG_ASSERT(cache);
|
||||
if (needed_space < cache->get_min_join_buffer_size())
|
||||
return TRUE;
|
||||
cache->set_join_buffer_size(needed_space);
|
||||
cache->set_join_buffer_size((size_t)needed_space);
|
||||
|
||||
return FALSE;
|
||||
}
|
||||
|
@ -290,17 +290,17 @@ static double get_merge_many_buffs_cost(uint *buffer,
|
||||
these will be random seeks.
|
||||
*/
|
||||
|
||||
double Unique::get_use_cost(uint *buffer, uint nkeys, uint key_size,
|
||||
double Unique::get_use_cost(uint *buffer, size_t nkeys, uint key_size,
|
||||
ulonglong max_in_memory_size,
|
||||
uint compare_factor,
|
||||
bool intersect_fl, bool *in_memory)
|
||||
{
|
||||
ulong max_elements_in_tree;
|
||||
ulong last_tree_elems;
|
||||
size_t max_elements_in_tree;
|
||||
size_t last_tree_elems;
|
||||
int n_full_trees; /* number of trees in unique - 1 */
|
||||
double result;
|
||||
|
||||
max_elements_in_tree= ((ulong) max_in_memory_size /
|
||||
max_elements_in_tree= ((size_t) max_in_memory_size /
|
||||
ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size));
|
||||
|
||||
n_full_trees= nkeys / max_elements_in_tree;
|
||||
|
Loading…
x
Reference in New Issue
Block a user