Fix some -Wsign-conversion
InnoDB was using int64_t instead of ha_rows (unsigned 64-bit).
This commit is contained in:
parent
baa5a43d8c
commit
b2c4740034
@ -1093,7 +1093,7 @@ my_well_formed_length(CHARSET_INFO *cs, const char *b, const char *e,
|
||||
MY_STRCOPY_STATUS status;
|
||||
(void) cs->cset->well_formed_char_length(cs, b, e, nchars, &status);
|
||||
*error= status.m_well_formed_error_pos == NULL ? 0 : 1;
|
||||
return status.m_source_end_pos - b;
|
||||
return (size_t) (status.m_source_end_pos - b);
|
||||
}
|
||||
|
||||
|
||||
|
@ -573,21 +573,21 @@ static inline my_bool my_b_write_byte(IO_CACHE *info, uchar chr)
|
||||
static inline size_t my_b_fill(IO_CACHE *info)
|
||||
{
|
||||
info->read_pos= info->read_end;
|
||||
return _my_b_read(info,0,0) ? 0 : info->read_end - info->read_pos;
|
||||
return _my_b_read(info,0,0) ? 0 : (size_t) (info->read_end - info->read_pos);
|
||||
}
|
||||
|
||||
static inline my_off_t my_b_tell(const IO_CACHE *info)
|
||||
{
|
||||
if (info->type == WRITE_CACHE) {
|
||||
return info->pos_in_file + (info->write_pos - info->request_pos);
|
||||
return info->pos_in_file + (my_off_t)(info->write_pos - info->request_pos);
|
||||
|
||||
}
|
||||
return info->pos_in_file + (info->read_pos - info->request_pos);
|
||||
return info->pos_in_file + (my_off_t) (info->read_pos - info->request_pos);
|
||||
}
|
||||
|
||||
static inline my_off_t my_b_write_tell(const IO_CACHE *info)
|
||||
{
|
||||
return info->pos_in_file + (info->write_pos - info->write_buffer);
|
||||
return info->pos_in_file + (my_off_t) (info->write_pos - info->write_buffer);
|
||||
}
|
||||
|
||||
static inline uchar* my_b_get_buffer_start(const IO_CACHE *info)
|
||||
@ -597,7 +597,7 @@ static inline uchar* my_b_get_buffer_start(const IO_CACHE *info)
|
||||
|
||||
static inline size_t my_b_get_bytes_in_buffer(const IO_CACHE *info)
|
||||
{
|
||||
return info->read_end - info->request_pos;
|
||||
return (size_t) (info->read_end - info->request_pos);
|
||||
}
|
||||
|
||||
static inline my_off_t my_b_get_pos_in_file(const IO_CACHE *info)
|
||||
@ -608,9 +608,9 @@ static inline my_off_t my_b_get_pos_in_file(const IO_CACHE *info)
|
||||
static inline size_t my_b_bytes_in_cache(const IO_CACHE *info)
|
||||
{
|
||||
if (info->type == WRITE_CACHE) {
|
||||
return info->write_end - info->write_pos;
|
||||
return (size_t) (info->write_end - info->write_pos);
|
||||
}
|
||||
return info->read_end - info->read_pos;
|
||||
return (size_t) (info->read_end - info->read_pos);
|
||||
}
|
||||
|
||||
int my_b_copy_to_file(IO_CACHE *cache, FILE *file);
|
||||
|
@ -104,7 +104,8 @@ typedef struct st_mysql_time_status
|
||||
|
||||
static inline void my_time_status_init(MYSQL_TIME_STATUS *status)
|
||||
{
|
||||
status->warnings= status->precision= 0;
|
||||
status->warnings= 0;
|
||||
status->precision= 0;
|
||||
}
|
||||
|
||||
my_bool check_date(const MYSQL_TIME *ltime, my_bool not_zero_date,
|
||||
|
@ -4199,7 +4199,7 @@ public:
|
||||
{
|
||||
// following assert is redundant, because fixed=1 assigned in constructor
|
||||
DBUG_ASSERT(fixed == 1);
|
||||
ulonglong value= (ulonglong) Item_hex_hybrid::val_int();
|
||||
longlong value= Item_hex_hybrid::val_int();
|
||||
int2my_decimal(E_DEC_FATAL_ERROR, value, TRUE, decimal_value);
|
||||
return decimal_value;
|
||||
}
|
||||
|
@ -141,16 +141,16 @@ public:
|
||||
};
|
||||
};
|
||||
|
||||
/* An iterator to quickly walk over bits in unlonglong bitmap. */
|
||||
/* An iterator to quickly walk over bits in ulonglong bitmap. */
|
||||
class Table_map_iterator
|
||||
{
|
||||
ulonglong bmp;
|
||||
uint no;
|
||||
public:
|
||||
Table_map_iterator(ulonglong t) : bmp(t), no(0) {}
|
||||
int next_bit()
|
||||
uint next_bit()
|
||||
{
|
||||
static const char last_bit[16]= {32, 0, 1, 0,
|
||||
static const uchar last_bit[16]= {32, 0, 1, 0,
|
||||
2, 0, 1, 0,
|
||||
3, 0, 1, 0,
|
||||
2, 0, 1, 0};
|
||||
@ -162,10 +162,10 @@ public:
|
||||
if (!bmp)
|
||||
return BITMAP_END;
|
||||
}
|
||||
bmp &= ~(1LL << bit);
|
||||
bmp &= ~(1ULL << bit);
|
||||
return no + bit;
|
||||
}
|
||||
int operator++(int) { return next_bit(); }
|
||||
uint operator++(int) { return next_bit(); }
|
||||
enum { BITMAP_END= 64 };
|
||||
};
|
||||
|
||||
@ -201,7 +201,10 @@ public:
|
||||
bool is_subset(const Bitmap<64>& map2) const { return !(map & ~map2.map); }
|
||||
bool is_overlapping(const Bitmap<64>& map2) const { return (map & map2.map)!= 0; }
|
||||
bool operator==(const Bitmap<64>& map2) const { return map == map2.map; }
|
||||
char *print(char *buf) const { longlong2str(map,buf,16); return buf; }
|
||||
char *print(char *buf) const {
|
||||
longlong2str(longlong(map), buf, 16);
|
||||
return buf;
|
||||
}
|
||||
ulonglong to_ulonglong() const { return map; }
|
||||
class Iterator : public Table_map_iterator
|
||||
{
|
||||
|
@ -84,7 +84,7 @@ public:
|
||||
start= start_arg;
|
||||
end= end_arg;
|
||||
if (end != start)
|
||||
TRASH_ALLOC(start, end - start);
|
||||
TRASH_ALLOC(start, size_t(end - start));
|
||||
reset();
|
||||
}
|
||||
|
||||
@ -224,7 +224,7 @@ public:
|
||||
{
|
||||
DBUG_ASSERT(unused_end >= unused_start);
|
||||
DBUG_ASSERT(end == unused_start);
|
||||
TRASH_ALLOC(unused_start, unused_end - unused_start);
|
||||
TRASH_ALLOC(unused_start, size_t(unused_end - unused_start));
|
||||
end= unused_end;
|
||||
}
|
||||
/* Return pointer to start of the memory area that is occupied by the data */
|
||||
|
@ -628,7 +628,7 @@ public:
|
||||
{
|
||||
char *buff= Ptr + str_length;
|
||||
char *end= ll2str(i, buff, radix, 0);
|
||||
str_length+= (int) (end-buff);
|
||||
str_length+= uint32(end-buff);
|
||||
}
|
||||
|
||||
/* Inline (general) functions used by the protocol functions */
|
||||
|
@ -5758,43 +5758,41 @@ the number of pages between slot1->page and slot2->page (which is
|
||||
n_rows_on_prev_level). In this case we set is_n_rows_exact to FALSE.
|
||||
@return number of rows, not including the borders (exact or estimated) */
|
||||
static
|
||||
int64_t
|
||||
ha_rows
|
||||
btr_estimate_n_rows_in_range_on_level(
|
||||
/*==================================*/
|
||||
dict_index_t* index, /*!< in: index */
|
||||
btr_path_t* slot1, /*!< in: left border */
|
||||
btr_path_t* slot2, /*!< in: right border */
|
||||
int64_t n_rows_on_prev_level, /*!< in: number of rows
|
||||
ha_rows n_rows_on_prev_level, /*!< in: number of rows
|
||||
on the previous level for the
|
||||
same descend paths; used to
|
||||
determine the number of pages
|
||||
on this level */
|
||||
ibool* is_n_rows_exact) /*!< out: TRUE if the returned
|
||||
bool* is_n_rows_exact) /*!< out: TRUE if the returned
|
||||
value is exact i.e. not an
|
||||
estimation */
|
||||
{
|
||||
int64_t n_rows;
|
||||
int n_pages_read = 0;
|
||||
ha_rows n_rows = 0;
|
||||
uint n_pages_read = 0;
|
||||
ulint level;
|
||||
|
||||
n_rows = 0;
|
||||
|
||||
/* Assume by default that we will scan all pages between
|
||||
slot1->page_no and slot2->page_no. */
|
||||
*is_n_rows_exact = TRUE;
|
||||
*is_n_rows_exact = true;
|
||||
|
||||
/* Add records from slot1->page_no which are to the right of
|
||||
the record which serves as a left border of the range, if any
|
||||
(we don't include the record itself in this count). */
|
||||
if (slot1->nth_rec <= slot1->n_recs) {
|
||||
n_rows += int64_t(slot1->n_recs - slot1->nth_rec);
|
||||
n_rows += slot1->n_recs - slot1->nth_rec;
|
||||
}
|
||||
|
||||
/* Add records from slot2->page_no which are to the left of
|
||||
the record which servers as a right border of the range, if any
|
||||
(we don't include the record itself in this count). */
|
||||
if (slot2->nth_rec > 1) {
|
||||
n_rows += int64_t(slot2->nth_rec) - 1;
|
||||
n_rows += slot2->nth_rec - 1;
|
||||
}
|
||||
|
||||
/* Count the records in the pages between slot1->page_no and
|
||||
@ -5897,7 +5895,7 @@ btr_estimate_n_rows_in_range_on_level(
|
||||
|
||||
inexact:
|
||||
|
||||
*is_n_rows_exact = FALSE;
|
||||
*is_n_rows_exact = false;
|
||||
|
||||
/* We did interrupt before reaching slot2->page */
|
||||
|
||||
@ -5905,8 +5903,7 @@ inexact:
|
||||
/* The number of pages on this level is
|
||||
n_rows_on_prev_level, multiply it by the
|
||||
average number of recs per page so far */
|
||||
n_rows = n_rows_on_prev_level
|
||||
* n_rows / n_pages_read;
|
||||
n_rows = n_rows_on_prev_level * n_rows / n_pages_read;
|
||||
} else {
|
||||
/* The tree changed before we could even
|
||||
start with slot1->page_no */
|
||||
@ -5925,7 +5922,7 @@ static const unsigned rows_in_range_max_retries = 4;
|
||||
/** We pretend that a range has that many records if the tree keeps changing
|
||||
for rows_in_range_max_retries retries while we try to estimate the records
|
||||
in a given range. */
|
||||
static const int64_t rows_in_range_arbitrary_ret_val = 10;
|
||||
static const ha_rows rows_in_range_arbitrary_ret_val = 10;
|
||||
|
||||
/** Estimates the number of rows in a given index range.
|
||||
@param[in] index index
|
||||
@ -5942,7 +5939,7 @@ rows_in_range_arbitrary_ret_val as a result (if
|
||||
nth_attempt >= rows_in_range_max_retries and the tree is modified between
|
||||
the two dives). */
|
||||
static
|
||||
int64_t
|
||||
ha_rows
|
||||
btr_estimate_n_rows_in_range_low(
|
||||
dict_index_t* index,
|
||||
const dtuple_t* tuple1,
|
||||
@ -5956,16 +5953,16 @@ btr_estimate_n_rows_in_range_low(
|
||||
btr_cur_t cursor;
|
||||
btr_path_t* slot1;
|
||||
btr_path_t* slot2;
|
||||
ibool diverged;
|
||||
ibool diverged_lot;
|
||||
bool diverged;
|
||||
bool diverged_lot;
|
||||
ulint divergence_level;
|
||||
int64_t n_rows;
|
||||
ibool is_n_rows_exact;
|
||||
ha_rows n_rows;
|
||||
bool is_n_rows_exact;
|
||||
ulint i;
|
||||
mtr_t mtr;
|
||||
int64_t table_n_rows;
|
||||
ha_rows table_n_rows;
|
||||
|
||||
table_n_rows = int64_t(dict_table_get_n_rows(index->table));
|
||||
table_n_rows = dict_table_get_n_rows(index->table);
|
||||
|
||||
/* Below we dive to the two records specified by tuple1 and tuple2 and
|
||||
we remember the entire dive paths from the tree root. The place where
|
||||
@ -6099,16 +6096,16 @@ btr_estimate_n_rows_in_range_low(
|
||||
/* We have the path information for the range in path1 and path2 */
|
||||
|
||||
n_rows = 0;
|
||||
is_n_rows_exact = TRUE;
|
||||
is_n_rows_exact = true;
|
||||
|
||||
/* This becomes true when the two paths do not pass through the
|
||||
same pages anymore. */
|
||||
diverged = FALSE;
|
||||
diverged = false;
|
||||
|
||||
/* This becomes true when the paths are not the same or adjacent
|
||||
any more. This means that they pass through the same or
|
||||
neighboring-on-the-same-level pages only. */
|
||||
diverged_lot = FALSE;
|
||||
diverged_lot = false;
|
||||
|
||||
/* This is the level where paths diverged a lot. */
|
||||
divergence_level = 1000000;
|
||||
@ -6231,21 +6228,17 @@ btr_estimate_n_rows_in_range_low(
|
||||
return(rows_in_range_arbitrary_ret_val);
|
||||
}
|
||||
|
||||
const int64_t ret =
|
||||
btr_estimate_n_rows_in_range_low(
|
||||
index, tuple1, mode1,
|
||||
tuple2, mode2, nth_attempt + 1);
|
||||
|
||||
return(ret);
|
||||
return btr_estimate_n_rows_in_range_low(
|
||||
index, tuple1, mode1,
|
||||
tuple2, mode2, nth_attempt + 1);
|
||||
}
|
||||
|
||||
diverged = TRUE;
|
||||
diverged = true;
|
||||
|
||||
if (slot1->nth_rec < slot2->nth_rec) {
|
||||
/* We do not count the borders (nor the left
|
||||
nor the right one), thus "- 1". */
|
||||
n_rows = int64_t(slot2->nth_rec
|
||||
- slot1->nth_rec) - 1;
|
||||
n_rows = slot2->nth_rec - slot1->nth_rec - 1;
|
||||
|
||||
if (n_rows > 0) {
|
||||
/* There is at least one row between
|
||||
@ -6253,7 +6246,7 @@ btr_estimate_n_rows_in_range_low(
|
||||
and slot2, so on the level below the
|
||||
slots will point to non-adjacent
|
||||
pages. */
|
||||
diverged_lot = TRUE;
|
||||
diverged_lot = true;
|
||||
divergence_level = i;
|
||||
}
|
||||
} else {
|
||||
@ -6275,18 +6268,18 @@ btr_estimate_n_rows_in_range_low(
|
||||
if (slot1->nth_rec < slot1->n_recs
|
||||
|| slot2->nth_rec > 1) {
|
||||
|
||||
diverged_lot = TRUE;
|
||||
diverged_lot = true;
|
||||
divergence_level = i;
|
||||
|
||||
n_rows = 0;
|
||||
|
||||
if (slot1->nth_rec < slot1->n_recs) {
|
||||
n_rows += int64_t(slot1->n_recs
|
||||
- slot1->nth_rec);
|
||||
n_rows += slot1->n_recs
|
||||
- slot1->nth_rec;
|
||||
}
|
||||
|
||||
if (slot2->nth_rec > 1) {
|
||||
n_rows += int64_t(slot2->nth_rec) - 1;
|
||||
n_rows += slot2->nth_rec - 1;
|
||||
}
|
||||
}
|
||||
} else if (diverged_lot) {
|
||||
@ -6305,7 +6298,7 @@ btr_estimate_n_rows_in_range_low(
|
||||
@param[in] tuple2 range end, may also be empty tuple
|
||||
@param[in] mode2 search mode for range end
|
||||
@return estimated number of rows */
|
||||
int64_t
|
||||
ha_rows
|
||||
btr_estimate_n_rows_in_range(
|
||||
dict_index_t* index,
|
||||
const dtuple_t* tuple1,
|
||||
@ -6313,10 +6306,8 @@ btr_estimate_n_rows_in_range(
|
||||
const dtuple_t* tuple2,
|
||||
page_cur_mode_t mode2)
|
||||
{
|
||||
const int64_t ret = btr_estimate_n_rows_in_range_low(
|
||||
index, tuple1, mode1, tuple2, mode2, 1 /* first attempt */);
|
||||
|
||||
return(ret);
|
||||
return btr_estimate_n_rows_in_range_low(
|
||||
index, tuple1, mode1, tuple2, mode2, 1);
|
||||
}
|
||||
|
||||
/*******************************************************************//**
|
||||
|
@ -1843,7 +1843,7 @@ rtr_rec_cal_increase(
|
||||
@param[in] tuple range tuple containing mbr, may also be empty tuple
|
||||
@param[in] mode search mode
|
||||
@return estimated number of rows */
|
||||
int64_t
|
||||
ha_rows
|
||||
rtr_estimate_n_rows_in_range(
|
||||
dict_index_t* index,
|
||||
const dtuple_t* tuple,
|
||||
@ -1994,6 +1994,5 @@ rtr_estimate_n_rows_in_range(
|
||||
return(HA_POS_ERROR);
|
||||
}
|
||||
|
||||
return(static_cast<int64_t>(dict_table_get_n_rows(index->table)
|
||||
* area / n_recs));
|
||||
return dict_table_get_n_rows(index->table) * area / n_recs;
|
||||
}
|
||||
|
@ -13383,7 +13383,7 @@ ha_innobase::records_in_range(
|
||||
dict_index_t* index;
|
||||
dtuple_t* range_start;
|
||||
dtuple_t* range_end;
|
||||
int64_t n_rows;
|
||||
ha_rows n_rows;
|
||||
page_cur_mode_t mode1;
|
||||
page_cur_mode_t mode2;
|
||||
mem_heap_t* heap;
|
||||
|
@ -28,6 +28,7 @@ Created 10/16/1994 Heikki Tuuri
|
||||
#define btr0cur_h
|
||||
|
||||
#include "univ.i"
|
||||
#include "my_base.h"
|
||||
#include "dict0dict.h"
|
||||
#include "page0cur.h"
|
||||
#include "btr0types.h"
|
||||
@ -600,7 +601,7 @@ btr_cur_parse_del_mark_set_sec_rec(
|
||||
@param[in] tuple2 range end, may also be empty tuple
|
||||
@param[in] mode2 search mode for range end
|
||||
@return estimated number of rows */
|
||||
int64_t
|
||||
ha_rows
|
||||
btr_estimate_n_rows_in_range(
|
||||
dict_index_t* index,
|
||||
const dtuple_t* tuple1,
|
||||
|
@ -28,6 +28,7 @@ Created 2013/03/27 Jimmy Yang and Allen Lai
|
||||
#define gis0rtree_h
|
||||
|
||||
#include "univ.i"
|
||||
#include "my_base.h"
|
||||
|
||||
#include "data0type.h"
|
||||
#include "data0types.h"
|
||||
@ -543,7 +544,7 @@ rtr_info_reinit_in_cursor(
|
||||
@param[in] tuple range tuple containing mbr, may also be empty tuple
|
||||
@param[in] mode search mode
|
||||
@return estimated number of rows */
|
||||
int64_t
|
||||
ha_rows
|
||||
rtr_estimate_n_rows_in_range(
|
||||
dict_index_t* index,
|
||||
const dtuple_t* tuple,
|
||||
|
@ -195,7 +195,7 @@ trx_sysf_create(
|
||||
ut_a(ptr <= page + (srv_page_size - FIL_PAGE_DATA_END));
|
||||
|
||||
/* Initialize all of the page. This part used to be uninitialized. */
|
||||
memset(ptr, 0, srv_page_size - FIL_PAGE_DATA_END + page - ptr);
|
||||
memset(ptr, 0, srv_page_size - FIL_PAGE_DATA_END + size_t(page - ptr));
|
||||
|
||||
mlog_log_string(TRX_SYS + page, srv_page_size - FIL_PAGE_DATA_END
|
||||
- TRX_SYS, mtr);
|
||||
|
Loading…
x
Reference in New Issue
Block a user