few small MySQL bugs/issues that impact the engines, as discussed in the SE summit
* remove handler::index_read_last() * create handler::keyread_read_time() (was get_index_only_read_time() in opt_range.cc) * ha_show_status() allows engine's show_status() to fail * remove HTON_FLUSH_AFTER_RENAME * fix key_cmp_if_same() to work for floats and doubles * set table->status in the server, don't force engines to do it * increment status vars in the server, don't force engines to do it mysql-test/r/status_user.result: correct test results - innodb was wrongly counting internal index searches as handler_read_* calls. sql/ha_partition.cc: compensate for handler incrementing status counters - we want to count only calls to underlying engines sql/handler.h: inline methods moved to sql_class.h sql/key.cc: simplify the check sql/opt_range.cc: move get_index_only_read_time to the handler class sql/sp.cc: don't use a key that's stored in the record buffer - the engine can overwrite the buffer with anything, destroying the key sql/sql_class.h: inline handler methods that need to see THD and TABLE definitions sql/sql_select.cc: no ha_index_read_last_map anymore sql/sql_table.cc: remove HTON_FLUSH_AFTER_RENAME sql/table.cc: set HA_CAN_MEMCMP as appropriate sql/tztime.cc: don't use a key that's stored in the record buffer - the engine can overwrite the buffer with anything, destroying the key storage/myisam/ha_myisam.cc: engines don't need to update table->status or use ha_statistic_increment anymore storage/myisam/ha_myisam.h: index_read_last_map is no more
This commit is contained in:
parent
59eb4f6aa0
commit
ac6b3c4430
@ -300,6 +300,7 @@ enum ha_base_keytype {
|
|||||||
*/
|
*/
|
||||||
#define HA_END_SPACE_ARE_EQUAL 512
|
#define HA_END_SPACE_ARE_EQUAL 512
|
||||||
#define HA_BIT_PART 1024
|
#define HA_BIT_PART 1024
|
||||||
|
#define HA_CAN_MEMCMP 2048 /* internal, never stored in frm */
|
||||||
|
|
||||||
/* optionbits for database */
|
/* optionbits for database */
|
||||||
#define HA_OPTION_PACK_RECORD 1
|
#define HA_OPTION_PACK_RECORD 1
|
||||||
|
@ -100,8 +100,8 @@ Handler_commit 19
|
|||||||
Handler_delete 1
|
Handler_delete 1
|
||||||
Handler_discover 0
|
Handler_discover 0
|
||||||
Handler_prepare 18
|
Handler_prepare 18
|
||||||
Handler_read_first 1
|
Handler_read_first 0
|
||||||
Handler_read_key 8
|
Handler_read_key 3
|
||||||
Handler_read_next 0
|
Handler_read_next 0
|
||||||
Handler_read_prev 0
|
Handler_read_prev 0
|
||||||
Handler_read_rnd 0
|
Handler_read_rnd 0
|
||||||
@ -113,7 +113,7 @@ Handler_update 5
|
|||||||
Handler_write 7
|
Handler_write 7
|
||||||
select variable_value - @global_read_key as "handler_read_key" from information_schema.global_status where variable_name="handler_read_key";
|
select variable_value - @global_read_key as "handler_read_key" from information_schema.global_status where variable_name="handler_read_key";
|
||||||
handler_read_key
|
handler_read_key
|
||||||
8
|
3
|
||||||
set @@global.userstat=0;
|
set @@global.userstat=0;
|
||||||
select * from information_schema.index_statistics;
|
select * from information_schema.index_statistics;
|
||||||
TABLE_SCHEMA TABLE_NAME INDEX_NAME ROWS_READ
|
TABLE_SCHEMA TABLE_NAME INDEX_NAME ROWS_READ
|
||||||
|
@ -3636,6 +3636,7 @@ int ha_partition::rnd_next(uchar *buf)
|
|||||||
int result= HA_ERR_END_OF_FILE;
|
int result= HA_ERR_END_OF_FILE;
|
||||||
uint part_id= m_part_spec.start_part;
|
uint part_id= m_part_spec.start_part;
|
||||||
DBUG_ENTER("ha_partition::rnd_next");
|
DBUG_ENTER("ha_partition::rnd_next");
|
||||||
|
decrement_statistics(&SSV::ha_read_rnd_next_count);
|
||||||
|
|
||||||
if (NO_CURRENT_PART_ID == part_id)
|
if (NO_CURRENT_PART_ID == part_id)
|
||||||
{
|
{
|
||||||
@ -3779,6 +3780,7 @@ int ha_partition::rnd_pos(uchar * buf, uchar *pos)
|
|||||||
uint part_id;
|
uint part_id;
|
||||||
handler *file;
|
handler *file;
|
||||||
DBUG_ENTER("ha_partition::rnd_pos");
|
DBUG_ENTER("ha_partition::rnd_pos");
|
||||||
|
decrement_statistics(&SSV::ha_read_rnd_count);
|
||||||
|
|
||||||
part_id= uint2korr((const uchar *) pos);
|
part_id= uint2korr((const uchar *) pos);
|
||||||
DBUG_ASSERT(part_id < m_tot_parts);
|
DBUG_ASSERT(part_id < m_tot_parts);
|
||||||
@ -3991,6 +3993,7 @@ int ha_partition::index_read_map(uchar *buf, const uchar *key,
|
|||||||
enum ha_rkey_function find_flag)
|
enum ha_rkey_function find_flag)
|
||||||
{
|
{
|
||||||
DBUG_ENTER("ha_partition::index_read_map");
|
DBUG_ENTER("ha_partition::index_read_map");
|
||||||
|
decrement_statistics(&SSV::ha_read_key_count);
|
||||||
end_range= 0;
|
end_range= 0;
|
||||||
m_index_scan_type= partition_index_read;
|
m_index_scan_type= partition_index_read;
|
||||||
m_start_key.key= key;
|
m_start_key.key= key;
|
||||||
@ -4119,6 +4122,7 @@ int ha_partition::common_index_read(uchar *buf, bool have_start_key)
|
|||||||
int ha_partition::index_first(uchar * buf)
|
int ha_partition::index_first(uchar * buf)
|
||||||
{
|
{
|
||||||
DBUG_ENTER("ha_partition::index_first");
|
DBUG_ENTER("ha_partition::index_first");
|
||||||
|
decrement_statistics(&SSV::ha_read_first_count);
|
||||||
|
|
||||||
end_range= 0;
|
end_range= 0;
|
||||||
m_index_scan_type= partition_index_first;
|
m_index_scan_type= partition_index_first;
|
||||||
@ -4150,6 +4154,7 @@ int ha_partition::index_first(uchar * buf)
|
|||||||
int ha_partition::index_last(uchar * buf)
|
int ha_partition::index_last(uchar * buf)
|
||||||
{
|
{
|
||||||
DBUG_ENTER("ha_partition::index_last");
|
DBUG_ENTER("ha_partition::index_last");
|
||||||
|
decrement_statistics(&SSV::ha_read_last_count);
|
||||||
|
|
||||||
m_index_scan_type= partition_index_last;
|
m_index_scan_type= partition_index_last;
|
||||||
DBUG_RETURN(common_first_last(buf));
|
DBUG_RETURN(common_first_last(buf));
|
||||||
@ -4177,39 +4182,6 @@ int ha_partition::common_first_last(uchar *buf)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
Read last using key
|
|
||||||
|
|
||||||
SYNOPSIS
|
|
||||||
index_read_last_map()
|
|
||||||
buf Read row in MySQL Row Format
|
|
||||||
key Key
|
|
||||||
keypart_map Which part of key is used
|
|
||||||
|
|
||||||
RETURN VALUE
|
|
||||||
>0 Error code
|
|
||||||
0 Success
|
|
||||||
|
|
||||||
DESCRIPTION
|
|
||||||
This is used in join_read_last_key to optimise away an ORDER BY.
|
|
||||||
Can only be used on indexes supporting HA_READ_ORDER
|
|
||||||
*/
|
|
||||||
|
|
||||||
int ha_partition::index_read_last_map(uchar *buf, const uchar *key,
|
|
||||||
key_part_map keypart_map)
|
|
||||||
{
|
|
||||||
DBUG_ENTER("ha_partition::index_read_last");
|
|
||||||
|
|
||||||
m_ordered= TRUE; // Safety measure
|
|
||||||
end_range= 0;
|
|
||||||
m_index_scan_type= partition_index_read_last;
|
|
||||||
m_start_key.key= key;
|
|
||||||
m_start_key.keypart_map= keypart_map;
|
|
||||||
m_start_key.flag= HA_READ_PREFIX_LAST;
|
|
||||||
DBUG_RETURN(common_index_read(buf, TRUE));
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Read next record in a forward index scan
|
Read next record in a forward index scan
|
||||||
|
|
||||||
@ -4228,6 +4200,7 @@ int ha_partition::index_read_last_map(uchar *buf, const uchar *key,
|
|||||||
int ha_partition::index_next(uchar * buf)
|
int ha_partition::index_next(uchar * buf)
|
||||||
{
|
{
|
||||||
DBUG_ENTER("ha_partition::index_next");
|
DBUG_ENTER("ha_partition::index_next");
|
||||||
|
decrement_statistics(&SSV::ha_read_next_count);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
TODO(low priority):
|
TODO(low priority):
|
||||||
@ -4264,6 +4237,7 @@ int ha_partition::index_next(uchar * buf)
|
|||||||
int ha_partition::index_next_same(uchar *buf, const uchar *key, uint keylen)
|
int ha_partition::index_next_same(uchar *buf, const uchar *key, uint keylen)
|
||||||
{
|
{
|
||||||
DBUG_ENTER("ha_partition::index_next_same");
|
DBUG_ENTER("ha_partition::index_next_same");
|
||||||
|
decrement_statistics(&SSV::ha_read_next_count);
|
||||||
|
|
||||||
DBUG_ASSERT(keylen == m_start_key.length);
|
DBUG_ASSERT(keylen == m_start_key.length);
|
||||||
DBUG_ASSERT(m_index_scan_type != partition_index_last);
|
DBUG_ASSERT(m_index_scan_type != partition_index_last);
|
||||||
@ -4291,6 +4265,7 @@ int ha_partition::index_next_same(uchar *buf, const uchar *key, uint keylen)
|
|||||||
int ha_partition::index_prev(uchar * buf)
|
int ha_partition::index_prev(uchar * buf)
|
||||||
{
|
{
|
||||||
DBUG_ENTER("ha_partition::index_prev");
|
DBUG_ENTER("ha_partition::index_prev");
|
||||||
|
decrement_statistics(&SSV::ha_read_prev_count);
|
||||||
|
|
||||||
/* TODO: read comment in index_next */
|
/* TODO: read comment in index_next */
|
||||||
DBUG_ASSERT(m_index_scan_type != partition_index_first);
|
DBUG_ASSERT(m_index_scan_type != partition_index_first);
|
||||||
@ -4704,12 +4679,6 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
|
|||||||
error= file->ha_index_last(rec_buf_ptr);
|
error= file->ha_index_last(rec_buf_ptr);
|
||||||
reverse_order= TRUE;
|
reverse_order= TRUE;
|
||||||
break;
|
break;
|
||||||
case partition_index_read_last:
|
|
||||||
error= file->ha_index_read_last_map(rec_buf_ptr,
|
|
||||||
m_start_key.key,
|
|
||||||
m_start_key.keypart_map);
|
|
||||||
reverse_order= TRUE;
|
|
||||||
break;
|
|
||||||
case partition_read_range:
|
case partition_read_range:
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -64,9 +64,8 @@ private:
|
|||||||
partition_index_first= 1,
|
partition_index_first= 1,
|
||||||
partition_index_first_unordered= 2,
|
partition_index_first_unordered= 2,
|
||||||
partition_index_last= 3,
|
partition_index_last= 3,
|
||||||
partition_index_read_last= 4,
|
partition_read_range = 4,
|
||||||
partition_read_range = 5,
|
partition_no_index_scan= 5
|
||||||
partition_no_index_scan= 6
|
|
||||||
};
|
};
|
||||||
/* Data for the partition handler */
|
/* Data for the partition handler */
|
||||||
int m_mode; // Open mode
|
int m_mode; // Open mode
|
||||||
@ -458,8 +457,6 @@ public:
|
|||||||
virtual int index_first(uchar * buf);
|
virtual int index_first(uchar * buf);
|
||||||
virtual int index_last(uchar * buf);
|
virtual int index_last(uchar * buf);
|
||||||
virtual int index_next_same(uchar * buf, const uchar * key, uint keylen);
|
virtual int index_next_same(uchar * buf, const uchar * key, uint keylen);
|
||||||
virtual int index_read_last_map(uchar * buf, const uchar * key,
|
|
||||||
key_part_map keypart_map);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
read_first_row is virtual method but is only implemented by
|
read_first_row is virtual method but is only implemented by
|
||||||
|
@ -2044,12 +2044,6 @@ handler *handler::clone(MEM_ROOT *mem_root)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
void handler::ha_statistic_increment(ulong SSV::*offset) const
|
|
||||||
{
|
|
||||||
status_var_increment(table->in_use->status_var.*offset);
|
|
||||||
}
|
|
||||||
|
|
||||||
void **handler::ha_data(THD *thd) const
|
void **handler::ha_data(THD *thd) const
|
||||||
{
|
{
|
||||||
return thd_ha_data(thd, ht);
|
return thd_ha_data(thd, ht);
|
||||||
@ -2131,8 +2125,6 @@ int handler::read_first_row(uchar * buf, uint primary_key)
|
|||||||
register int error;
|
register int error;
|
||||||
DBUG_ENTER("handler::read_first_row");
|
DBUG_ENTER("handler::read_first_row");
|
||||||
|
|
||||||
ha_statistic_increment(&SSV::ha_read_first_count);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
If there is very few deleted rows in the table, find the first row by
|
If there is very few deleted rows in the table, find the first row by
|
||||||
scanning the table.
|
scanning the table.
|
||||||
@ -2142,14 +2134,14 @@ int handler::read_first_row(uchar * buf, uint primary_key)
|
|||||||
!(index_flags(primary_key, 0, 0) & HA_READ_ORDER))
|
!(index_flags(primary_key, 0, 0) & HA_READ_ORDER))
|
||||||
{
|
{
|
||||||
(void) ha_rnd_init(1);
|
(void) ha_rnd_init(1);
|
||||||
while ((error= rnd_next(buf)) == HA_ERR_RECORD_DELETED) ;
|
while ((error= ha_rnd_next(buf)) == HA_ERR_RECORD_DELETED) ;
|
||||||
(void) ha_rnd_end();
|
(void) ha_rnd_end();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
/* Find the first row through the primary key */
|
/* Find the first row through the primary key */
|
||||||
if (!(error = ha_index_init(primary_key, 0)))
|
if (!(error = ha_index_init(primary_key, 0)))
|
||||||
error= index_first(buf);
|
error= ha_index_first(buf);
|
||||||
(void) ha_index_end();
|
(void) ha_index_end();
|
||||||
}
|
}
|
||||||
DBUG_RETURN(error);
|
DBUG_RETURN(error);
|
||||||
@ -2520,10 +2512,10 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment,
|
|||||||
table->mark_columns_used_by_index_no_reset(table->s->next_number_index,
|
table->mark_columns_used_by_index_no_reset(table->s->next_number_index,
|
||||||
table->read_set);
|
table->read_set);
|
||||||
column_bitmaps_signal();
|
column_bitmaps_signal();
|
||||||
index_init(table->s->next_number_index, 1);
|
ha_index_init(table->s->next_number_index, 1);
|
||||||
if (table->s->next_number_keypart == 0)
|
if (table->s->next_number_keypart == 0)
|
||||||
{ // Autoincrement at key-start
|
{ // Autoincrement at key-start
|
||||||
error=index_last(table->record[1]);
|
error=ha_index_last(table->record[1]);
|
||||||
/*
|
/*
|
||||||
MySQL implicitely assumes such method does locking (as MySQL decides to
|
MySQL implicitely assumes such method does locking (as MySQL decides to
|
||||||
use nr+increment without checking again with the handler, in
|
use nr+increment without checking again with the handler, in
|
||||||
@ -2555,7 +2547,7 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment,
|
|||||||
else
|
else
|
||||||
nr= ((ulonglong) table->next_number_field->
|
nr= ((ulonglong) table->next_number_field->
|
||||||
val_int_offset(table->s->rec_buff_length)+1);
|
val_int_offset(table->s->rec_buff_length)+1);
|
||||||
index_end();
|
ha_index_end();
|
||||||
(void) extra(HA_EXTRA_NO_KEYREAD);
|
(void) extra(HA_EXTRA_NO_KEYREAD);
|
||||||
*first_value= nr;
|
*first_value= nr;
|
||||||
}
|
}
|
||||||
@ -3110,6 +3102,33 @@ int handler::ha_check(THD *thd, HA_CHECK_OPT *check_opt)
|
|||||||
return update_frm_version(table);
|
return update_frm_version(table);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Calculate cost of 'index only' scan for given index and number of records.
|
||||||
|
|
||||||
|
SYNOPSIS
|
||||||
|
handler->keyread_read_time()
|
||||||
|
index key to read
|
||||||
|
ranges number of ranges
|
||||||
|
rows #of records to read
|
||||||
|
|
||||||
|
NOTES
|
||||||
|
It is assumed that we will read trough all key ranges and that all
|
||||||
|
key blocks are half full (normally things are much better). It is also
|
||||||
|
assumed that each time we read the next key from the index, the handler
|
||||||
|
performs a random seek, thus the cost is proportional to the number of
|
||||||
|
blocks read.
|
||||||
|
*/
|
||||||
|
|
||||||
|
double handler::keyread_read_time(uint index, uint ranges, ha_rows rows)
|
||||||
|
{
|
||||||
|
double read_time;
|
||||||
|
uint keys_per_block= (stats.block_size/2/
|
||||||
|
(table->key_info[index].key_length + ref_length) + 1);
|
||||||
|
read_time=((double) (rows+keys_per_block-1)/ (double) keys_per_block);
|
||||||
|
return read_time;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
A helper function to mark a transaction read-write,
|
A helper function to mark a transaction read-write,
|
||||||
if it is started.
|
if it is started.
|
||||||
@ -3503,7 +3522,7 @@ int ha_enable_transaction(THD *thd, bool on)
|
|||||||
int handler::index_next_same(uchar *buf, const uchar *key, uint keylen)
|
int handler::index_next_same(uchar *buf, const uchar *key, uint keylen)
|
||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
DBUG_ENTER("index_next_same");
|
DBUG_ENTER("handler::index_next_same");
|
||||||
if (!(error=index_next(buf)))
|
if (!(error=index_next(buf)))
|
||||||
{
|
{
|
||||||
my_ptrdiff_t ptrdiff= buf - table->record[0];
|
my_ptrdiff_t ptrdiff= buf - table->record[0];
|
||||||
@ -3548,6 +3567,7 @@ int handler::index_next_same(uchar *buf, const uchar *key, uint keylen)
|
|||||||
key_part->field->move_field_offset(-ptrdiff);
|
key_part->field->move_field_offset(-ptrdiff);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
DBUG_PRINT("return",("%i", error));
|
||||||
DBUG_RETURN(error);
|
DBUG_RETURN(error);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4598,6 +4618,8 @@ bool ha_show_status(THD *thd, handlerton *db_type, enum ha_stat_type stat)
|
|||||||
|
|
||||||
if (!result)
|
if (!result)
|
||||||
my_eof(thd);
|
my_eof(thd);
|
||||||
|
else if (!thd->is_error())
|
||||||
|
my_error(ER_GET_ERRNO, MYF(0), 0);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4798,6 +4820,7 @@ int handler::ha_write_row(uchar *buf)
|
|||||||
DBUG_ENTER("handler::ha_write_row");
|
DBUG_ENTER("handler::ha_write_row");
|
||||||
|
|
||||||
mark_trx_read_write();
|
mark_trx_read_write();
|
||||||
|
increment_statistics(&SSV::ha_write_count);
|
||||||
|
|
||||||
if (unlikely(error= write_row(buf)))
|
if (unlikely(error= write_row(buf)))
|
||||||
DBUG_RETURN(error);
|
DBUG_RETURN(error);
|
||||||
@ -4820,6 +4843,7 @@ int handler::ha_update_row(const uchar *old_data, uchar *new_data)
|
|||||||
DBUG_ASSERT(new_data == table->record[0]);
|
DBUG_ASSERT(new_data == table->record[0]);
|
||||||
|
|
||||||
mark_trx_read_write();
|
mark_trx_read_write();
|
||||||
|
increment_statistics(&SSV::ha_update_count);
|
||||||
|
|
||||||
if (unlikely(error= update_row(old_data, new_data)))
|
if (unlikely(error= update_row(old_data, new_data)))
|
||||||
return error;
|
return error;
|
||||||
@ -4835,6 +4859,7 @@ int handler::ha_delete_row(const uchar *buf)
|
|||||||
Log_func *log_func= Delete_rows_log_event::binlog_row_logging_function;
|
Log_func *log_func= Delete_rows_log_event::binlog_row_logging_function;
|
||||||
|
|
||||||
mark_trx_read_write();
|
mark_trx_read_write();
|
||||||
|
increment_statistics(&SSV::ha_delete_count);
|
||||||
|
|
||||||
if (unlikely(error= delete_row(buf)))
|
if (unlikely(error= delete_row(buf)))
|
||||||
return error;
|
return error;
|
||||||
|
128
sql/handler.h
128
sql/handler.h
@ -843,7 +843,6 @@ inline LEX_STRING *hton_name(const handlerton *hton)
|
|||||||
#define HTON_ALTER_NOT_SUPPORTED (1 << 1) //Engine does not support alter
|
#define HTON_ALTER_NOT_SUPPORTED (1 << 1) //Engine does not support alter
|
||||||
#define HTON_CAN_RECREATE (1 << 2) //Delete all is used fro truncate
|
#define HTON_CAN_RECREATE (1 << 2) //Delete all is used fro truncate
|
||||||
#define HTON_HIDDEN (1 << 3) //Engine does not appear in lists
|
#define HTON_HIDDEN (1 << 3) //Engine does not appear in lists
|
||||||
#define HTON_FLUSH_AFTER_RENAME (1 << 4)
|
|
||||||
#define HTON_NOT_USER_SELECTABLE (1 << 5)
|
#define HTON_NOT_USER_SELECTABLE (1 << 5)
|
||||||
#define HTON_TEMPORARY_NOT_SUPPORTED (1 << 6) //Having temporary tables not supported
|
#define HTON_TEMPORARY_NOT_SUPPORTED (1 << 6) //Having temporary tables not supported
|
||||||
#define HTON_SUPPORT_LOG_TABLES (1 << 7) //Engine supports log tables
|
#define HTON_SUPPORT_LOG_TABLES (1 << 7) //Engine supports log tables
|
||||||
@ -1439,6 +1438,7 @@ public:
|
|||||||
{ return ulonglong2double(stats.data_file_length) / IO_SIZE + 2; }
|
{ return ulonglong2double(stats.data_file_length) / IO_SIZE + 2; }
|
||||||
virtual double read_time(uint index, uint ranges, ha_rows rows)
|
virtual double read_time(uint index, uint ranges, ha_rows rows)
|
||||||
{ return rows2double(ranges+rows); }
|
{ return rows2double(ranges+rows); }
|
||||||
|
virtual double keyread_read_time(uint index, uint ranges, ha_rows rows);
|
||||||
virtual const key_map *keys_to_use_for_scanning() { return &key_map_empty; }
|
virtual const key_map *keys_to_use_for_scanning() { return &key_map_empty; }
|
||||||
bool has_transactions()
|
bool has_transactions()
|
||||||
{ return (ha_table_flags() & HA_NO_TRANSACTIONS) == 0; }
|
{ return (ha_table_flags() & HA_NO_TRANSACTIONS) == 0; }
|
||||||
@ -1572,17 +1572,6 @@ protected:
|
|||||||
virtual int index_last(uchar * buf)
|
virtual int index_last(uchar * buf)
|
||||||
{ return HA_ERR_WRONG_COMMAND; }
|
{ return HA_ERR_WRONG_COMMAND; }
|
||||||
virtual int index_next_same(uchar *buf, const uchar *key, uint keylen);
|
virtual int index_next_same(uchar *buf, const uchar *key, uint keylen);
|
||||||
/**
|
|
||||||
@brief
|
|
||||||
The following functions works like index_read, but it find the last
|
|
||||||
row with the current key value or prefix.
|
|
||||||
*/
|
|
||||||
virtual int index_read_last_map(uchar * buf, const uchar * key,
|
|
||||||
key_part_map keypart_map)
|
|
||||||
{
|
|
||||||
uint key_len= calculate_key_len(table, active_index, key, keypart_map);
|
|
||||||
return index_read_last(buf, key, key_len);
|
|
||||||
}
|
|
||||||
inline void update_index_statistics()
|
inline void update_index_statistics()
|
||||||
{
|
{
|
||||||
index_rows_read[active_index]++;
|
index_rows_read[active_index]++;
|
||||||
@ -1593,68 +1582,15 @@ public:
|
|||||||
/* Similar functions like the above, but does statistics counting */
|
/* Similar functions like the above, but does statistics counting */
|
||||||
inline int ha_index_read_map(uchar * buf, const uchar * key,
|
inline int ha_index_read_map(uchar * buf, const uchar * key,
|
||||||
key_part_map keypart_map,
|
key_part_map keypart_map,
|
||||||
enum ha_rkey_function find_flag)
|
enum ha_rkey_function find_flag);
|
||||||
{
|
|
||||||
int error= index_read_map(buf, key, keypart_map, find_flag);
|
|
||||||
if (!error)
|
|
||||||
update_index_statistics();
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
inline int ha_index_read_idx_map(uchar * buf, uint index, const uchar * key,
|
inline int ha_index_read_idx_map(uchar * buf, uint index, const uchar * key,
|
||||||
key_part_map keypart_map,
|
key_part_map keypart_map,
|
||||||
enum ha_rkey_function find_flag)
|
enum ha_rkey_function find_flag);
|
||||||
{
|
inline int ha_index_next(uchar * buf);
|
||||||
int error= index_read_idx_map(buf, index, key, keypart_map, find_flag);
|
inline int ha_index_prev(uchar * buf);
|
||||||
if (!error)
|
inline int ha_index_first(uchar * buf);
|
||||||
{
|
inline int ha_index_last(uchar * buf);
|
||||||
rows_read++;
|
inline int ha_index_next_same(uchar *buf, const uchar *key, uint keylen);
|
||||||
index_rows_read[index]++;
|
|
||||||
}
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
inline int ha_index_next(uchar * buf)
|
|
||||||
{
|
|
||||||
int error= index_next(buf);
|
|
||||||
if (!error)
|
|
||||||
update_index_statistics();
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
inline int ha_index_prev(uchar * buf)
|
|
||||||
{
|
|
||||||
int error= index_prev(buf);
|
|
||||||
if (!error)
|
|
||||||
update_index_statistics();
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
inline int ha_index_first(uchar * buf)
|
|
||||||
{
|
|
||||||
int error= index_first(buf);
|
|
||||||
if (!error)
|
|
||||||
update_index_statistics();
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
inline int ha_index_last(uchar * buf)
|
|
||||||
{
|
|
||||||
int error= index_last(buf);
|
|
||||||
if (!error)
|
|
||||||
update_index_statistics();
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
inline int ha_index_next_same(uchar *buf, const uchar *key, uint keylen)
|
|
||||||
{
|
|
||||||
int error= index_next_same(buf, key, keylen);
|
|
||||||
if (!error)
|
|
||||||
update_index_statistics();
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
inline int ha_index_read_last_map(uchar * buf, const uchar * key,
|
|
||||||
key_part_map keypart_map)
|
|
||||||
{
|
|
||||||
int error= index_read_last_map(buf, key, keypart_map);
|
|
||||||
if (!error)
|
|
||||||
update_index_statistics();
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual int read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
|
virtual int read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
|
||||||
KEY_MULTI_RANGE *ranges, uint range_count,
|
KEY_MULTI_RANGE *ranges, uint range_count,
|
||||||
@ -1688,41 +1624,11 @@ private:
|
|||||||
public:
|
public:
|
||||||
|
|
||||||
/* Same as above, but with statistics */
|
/* Same as above, but with statistics */
|
||||||
inline int ha_ft_read(uchar *buf)
|
inline int ha_ft_read(uchar *buf);
|
||||||
{
|
inline int ha_rnd_next(uchar *buf);
|
||||||
int error= ft_read(buf);
|
inline int ha_rnd_pos(uchar *buf, uchar *pos);
|
||||||
if (!error)
|
inline int ha_rnd_pos_by_record(uchar *buf);
|
||||||
rows_read++;
|
inline int ha_read_first_row(uchar *buf, uint primary_key);
|
||||||
return error;
|
|
||||||
}
|
|
||||||
inline int ha_rnd_next(uchar *buf)
|
|
||||||
{
|
|
||||||
int error= rnd_next(buf);
|
|
||||||
if (!error)
|
|
||||||
rows_read++;
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
inline int ha_rnd_pos(uchar *buf, uchar *pos)
|
|
||||||
{
|
|
||||||
int error= rnd_pos(buf, pos);
|
|
||||||
if (!error)
|
|
||||||
rows_read++;
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
inline int ha_rnd_pos_by_record(uchar *buf)
|
|
||||||
{
|
|
||||||
int error= rnd_pos_by_record(buf);
|
|
||||||
if (!error)
|
|
||||||
rows_read++;
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
inline int ha_read_first_row(uchar *buf, uint primary_key)
|
|
||||||
{
|
|
||||||
int error= read_first_row(buf, primary_key);
|
|
||||||
if (!error)
|
|
||||||
rows_read++;
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
The following 3 function is only needed for tables that may be
|
The following 3 function is only needed for tables that may be
|
||||||
@ -2041,8 +1947,10 @@ public:
|
|||||||
virtual bool check_if_supported_virtual_columns(void) { return FALSE;}
|
virtual bool check_if_supported_virtual_columns(void) { return FALSE;}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
/* deprecated, don't use in new engines */
|
||||||
|
inline void ha_statistic_increment(ulong SSV::*offset) const { }
|
||||||
|
|
||||||
/* Service methods for use by storage engines. */
|
/* Service methods for use by storage engines. */
|
||||||
void ha_statistic_increment(ulong SSV::*offset) const;
|
|
||||||
void **ha_data(THD *) const;
|
void **ha_data(THD *) const;
|
||||||
THD *ha_thd(void) const;
|
THD *ha_thd(void) const;
|
||||||
|
|
||||||
@ -2068,6 +1976,8 @@ private:
|
|||||||
if (!mark_trx_done)
|
if (!mark_trx_done)
|
||||||
mark_trx_read_write_part2();
|
mark_trx_read_write_part2();
|
||||||
}
|
}
|
||||||
|
inline void increment_statistics(ulong SSV::*offset) const;
|
||||||
|
inline void decrement_statistics(ulong SSV::*offset) const;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Low-level primitives for storage engines. These should be
|
Low-level primitives for storage engines. These should be
|
||||||
@ -2155,8 +2065,6 @@ private:
|
|||||||
virtual int index_read(uchar * buf, const uchar * key, uint key_len,
|
virtual int index_read(uchar * buf, const uchar * key, uint key_len,
|
||||||
enum ha_rkey_function find_flag)
|
enum ha_rkey_function find_flag)
|
||||||
{ return HA_ERR_WRONG_COMMAND; }
|
{ return HA_ERR_WRONG_COMMAND; }
|
||||||
virtual int index_read_last(uchar * buf, const uchar * key, uint key_len)
|
|
||||||
{ return (my_errno= HA_ERR_WRONG_COMMAND); }
|
|
||||||
/**
|
/**
|
||||||
This method is similar to update_row, however the handler doesn't need
|
This method is similar to update_row, however the handler doesn't need
|
||||||
to execute the updates at this point in time. The handler can be certain
|
to execute the updates at this point in time. The handler can be certain
|
||||||
|
@ -278,8 +278,7 @@ bool key_cmp_if_same(TABLE *table,const uchar *key,uint idx,uint key_length)
|
|||||||
key++;
|
key++;
|
||||||
store_length--;
|
store_length--;
|
||||||
}
|
}
|
||||||
if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART |
|
if (!(key_part->key_part_flag & HA_CAN_MEMCMP))
|
||||||
HA_BIT_PART))
|
|
||||||
{
|
{
|
||||||
if (key_part->field->key_cmp(key, key_part->length))
|
if (key_part->field->key_cmp(key, key_part->length))
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -707,10 +707,7 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
|
|||||||
static
|
static
|
||||||
TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
|
TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
|
||||||
double read_time);
|
double read_time);
|
||||||
static
|
static TRP_GROUP_MIN_MAX *get_best_group_min_max(PARAM *param, SEL_TREE *tree);
|
||||||
TRP_GROUP_MIN_MAX *get_best_group_min_max(PARAM *param, SEL_TREE *tree);
|
|
||||||
static double get_index_only_read_time(const PARAM* param, ha_rows records,
|
|
||||||
int keynr);
|
|
||||||
|
|
||||||
#ifndef DBUG_OFF
|
#ifndef DBUG_OFF
|
||||||
static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map,
|
static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map,
|
||||||
@ -2314,9 +2311,9 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
|
|||||||
if (!head->covering_keys.is_clear_all())
|
if (!head->covering_keys.is_clear_all())
|
||||||
{
|
{
|
||||||
int key_for_use= find_shortest_key(head, &head->covering_keys);
|
int key_for_use= find_shortest_key(head, &head->covering_keys);
|
||||||
double key_read_time= (get_index_only_read_time(¶m, records,
|
double key_read_time= param.table->file->keyread_read_time(key_for_use,
|
||||||
key_for_use) +
|
1, records) +
|
||||||
(double) records / TIME_FOR_COMPARE);
|
(double) records / TIME_FOR_COMPARE;
|
||||||
DBUG_PRINT("info", ("'all'+'using index' scan will be using key %d, "
|
DBUG_PRINT("info", ("'all'+'using index' scan will be using key %d, "
|
||||||
"read time %g", key_for_use, key_read_time));
|
"read time %g", key_for_use, key_read_time));
|
||||||
if (key_read_time < read_time)
|
if (key_read_time < read_time)
|
||||||
@ -3938,42 +3935,6 @@ skip_to_ror_scan:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
Calculate cost of 'index only' scan for given index and number of records.
|
|
||||||
|
|
||||||
SYNOPSIS
|
|
||||||
get_index_only_read_time()
|
|
||||||
param parameters structure
|
|
||||||
records #of records to read
|
|
||||||
keynr key to read
|
|
||||||
|
|
||||||
NOTES
|
|
||||||
It is assumed that we will read trough the whole key range and that all
|
|
||||||
key blocks are half full (normally things are much better). It is also
|
|
||||||
assumed that each time we read the next key from the index, the handler
|
|
||||||
performs a random seek, thus the cost is proportional to the number of
|
|
||||||
blocks read.
|
|
||||||
|
|
||||||
TODO:
|
|
||||||
Move this to handler->read_time() by adding a flag 'index-only-read' to
|
|
||||||
this call. The reason for doing this is that the current function doesn't
|
|
||||||
handle the case when the row is stored in the b-tree (like in innodb
|
|
||||||
clustered index)
|
|
||||||
*/
|
|
||||||
|
|
||||||
static double get_index_only_read_time(const PARAM* param, ha_rows records,
|
|
||||||
int keynr)
|
|
||||||
{
|
|
||||||
double read_time;
|
|
||||||
uint keys_per_block= (param->table->file->stats.block_size/2/
|
|
||||||
(param->table->key_info[keynr].key_length+
|
|
||||||
param->table->file->ref_length) + 1);
|
|
||||||
read_time=((double) (records+keys_per_block-1)/
|
|
||||||
(double) keys_per_block);
|
|
||||||
return read_time;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
typedef struct st_ror_scan_info
|
typedef struct st_ror_scan_info
|
||||||
{
|
{
|
||||||
uint idx; /* # of used key in param->keys */
|
uint idx; /* # of used key in param->keys */
|
||||||
@ -4050,8 +4011,8 @@ ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg)
|
|||||||
bitmap_set_bit(&ror_scan->covered_fields, key_part->fieldnr-1);
|
bitmap_set_bit(&ror_scan->covered_fields, key_part->fieldnr-1);
|
||||||
}
|
}
|
||||||
ror_scan->index_read_cost=
|
ror_scan->index_read_cost=
|
||||||
get_index_only_read_time(param, param->table->quick_rows[ror_scan->keynr],
|
param->table->file->keyread_read_time(ror_scan->keynr, 1,
|
||||||
ror_scan->keynr);
|
param->table->quick_rows[ror_scan->keynr]);
|
||||||
DBUG_RETURN(ror_scan);
|
DBUG_RETURN(ror_scan);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4886,7 +4847,8 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
|
|||||||
We can resolve this by only reading through this key.
|
We can resolve this by only reading through this key.
|
||||||
0.01 is added to avoid races between range and 'index' scan.
|
0.01 is added to avoid races between range and 'index' scan.
|
||||||
*/
|
*/
|
||||||
found_read_time= get_index_only_read_time(param,found_records,keynr) +
|
found_read_time= param->table->file->keyread_read_time(keynr,1,
|
||||||
|
found_records) +
|
||||||
cpu_cost + 0.01;
|
cpu_cost + 0.01;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
16
sql/sp.cc
16
sql/sp.cc
@ -1276,6 +1276,7 @@ sp_drop_db_routines(THD *thd, char *db)
|
|||||||
TABLE *table;
|
TABLE *table;
|
||||||
int ret;
|
int ret;
|
||||||
uint key_len;
|
uint key_len;
|
||||||
|
uchar keybuf[MAX_KEY_LENGTH];
|
||||||
DBUG_ENTER("sp_drop_db_routines");
|
DBUG_ENTER("sp_drop_db_routines");
|
||||||
DBUG_PRINT("enter", ("db: %s", db));
|
DBUG_PRINT("enter", ("db: %s", db));
|
||||||
|
|
||||||
@ -1285,12 +1286,14 @@ sp_drop_db_routines(THD *thd, char *db)
|
|||||||
|
|
||||||
table->field[MYSQL_PROC_FIELD_DB]->store(db, strlen(db), system_charset_info);
|
table->field[MYSQL_PROC_FIELD_DB]->store(db, strlen(db), system_charset_info);
|
||||||
key_len= table->key_info->key_part[0].store_length;
|
key_len= table->key_info->key_part[0].store_length;
|
||||||
|
table->field[MYSQL_PROC_FIELD_DB]->get_key_image(keybuf, key_len, Field::itRAW);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
ret= SP_OK;
|
ret= SP_OK;
|
||||||
table->file->ha_index_init(0, 1);
|
table->file->ha_index_init(0, 1);
|
||||||
if (!table->file->ha_index_read_map(table->record[0],
|
if (!table->file->ha_index_read_map(table->record[0], keybuf, (key_part_map)1,
|
||||||
(uchar *) table->field[MYSQL_PROC_FIELD_DB]->ptr,
|
HA_READ_KEY_EXACT))
|
||||||
(key_part_map)1, HA_READ_KEY_EXACT))
|
|
||||||
{
|
{
|
||||||
int nxtres;
|
int nxtres;
|
||||||
bool deleted= FALSE;
|
bool deleted= FALSE;
|
||||||
@ -1305,11 +1308,8 @@ sp_drop_db_routines(THD *thd, char *db)
|
|||||||
nxtres= 0;
|
nxtres= 0;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} while (!(nxtres= table->file->
|
} while (!(nxtres= table->file->ha_index_next_same(table->record[0],
|
||||||
ha_index_next_same(table->record[0],
|
keybuf, key_len)));
|
||||||
(uchar *)table->field[MYSQL_PROC_FIELD_DB]->
|
|
||||||
ptr,
|
|
||||||
key_len)));
|
|
||||||
if (nxtres != HA_ERR_END_OF_FILE)
|
if (nxtres != HA_ERR_END_OF_FILE)
|
||||||
ret= SP_KEY_NOT_FOUND;
|
ret= SP_KEY_NOT_FOUND;
|
||||||
if (deleted)
|
if (deleted)
|
||||||
|
146
sql/sql_class.h
146
sql/sql_class.h
@ -3133,4 +3133,150 @@ void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
|
|||||||
STATUS_VAR *dec_var);
|
STATUS_VAR *dec_var);
|
||||||
void mark_transaction_to_rollback(THD *thd, bool all);
|
void mark_transaction_to_rollback(THD *thd, bool all);
|
||||||
|
|
||||||
|
/*
|
||||||
|
inline handler methods that need to know TABLE and THD structures
|
||||||
|
*/
|
||||||
|
inline void handler::increment_statistics(ulong SSV::*offset) const
|
||||||
|
{
|
||||||
|
status_var_increment(table->in_use->status_var.*offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void handler::decrement_statistics(ulong SSV::*offset) const
|
||||||
|
{
|
||||||
|
status_var_decrement(table->in_use->status_var.*offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline int handler::ha_index_read_map(uchar * buf, const uchar * key,
|
||||||
|
key_part_map keypart_map,
|
||||||
|
enum ha_rkey_function find_flag)
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(inited==INDEX);
|
||||||
|
increment_statistics(&SSV::ha_read_key_count);
|
||||||
|
int error= index_read_map(buf, key, keypart_map, find_flag);
|
||||||
|
if (!error)
|
||||||
|
update_index_statistics();
|
||||||
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline int handler::ha_index_read_idx_map(uchar * buf, uint index,
|
||||||
|
const uchar * key,
|
||||||
|
key_part_map keypart_map,
|
||||||
|
enum ha_rkey_function find_flag)
|
||||||
|
{
|
||||||
|
increment_statistics(&SSV::ha_read_key_count);
|
||||||
|
int error= index_read_idx_map(buf, index, key, keypart_map, find_flag);
|
||||||
|
if (!error)
|
||||||
|
{
|
||||||
|
rows_read++;
|
||||||
|
index_rows_read[index]++;
|
||||||
|
}
|
||||||
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline int handler::ha_index_next(uchar * buf)
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(inited==INDEX);
|
||||||
|
increment_statistics(&SSV::ha_read_next_count);
|
||||||
|
int error= index_next(buf);
|
||||||
|
if (!error)
|
||||||
|
update_index_statistics();
|
||||||
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline int handler::ha_index_prev(uchar * buf)
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(inited==INDEX);
|
||||||
|
increment_statistics(&SSV::ha_read_prev_count);
|
||||||
|
int error= index_prev(buf);
|
||||||
|
if (!error)
|
||||||
|
update_index_statistics();
|
||||||
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline int handler::ha_index_first(uchar * buf)
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(inited==INDEX);
|
||||||
|
increment_statistics(&SSV::ha_read_first_count);
|
||||||
|
int error= index_first(buf);
|
||||||
|
if (!error)
|
||||||
|
update_index_statistics();
|
||||||
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline int handler::ha_index_last(uchar * buf)
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(inited==INDEX);
|
||||||
|
increment_statistics(&SSV::ha_read_last_count);
|
||||||
|
int error= index_last(buf);
|
||||||
|
if (!error)
|
||||||
|
update_index_statistics();
|
||||||
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline int handler::ha_index_next_same(uchar *buf, const uchar *key,
|
||||||
|
uint keylen)
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(inited==INDEX);
|
||||||
|
increment_statistics(&SSV::ha_read_next_count);
|
||||||
|
int error= index_next_same(buf, key, keylen);
|
||||||
|
if (!error)
|
||||||
|
update_index_statistics();
|
||||||
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline int handler::ha_ft_read(uchar *buf)
|
||||||
|
{
|
||||||
|
int error= ft_read(buf);
|
||||||
|
if (!error)
|
||||||
|
rows_read++;
|
||||||
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline int handler::ha_rnd_next(uchar *buf)
|
||||||
|
{
|
||||||
|
increment_statistics(&SSV::ha_read_rnd_next_count);
|
||||||
|
int error= rnd_next(buf);
|
||||||
|
if (!error)
|
||||||
|
rows_read++;
|
||||||
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline int handler::ha_rnd_pos(uchar *buf, uchar *pos)
|
||||||
|
{
|
||||||
|
increment_statistics(&SSV::ha_read_rnd_count);
|
||||||
|
int error= rnd_pos(buf, pos);
|
||||||
|
if (!error)
|
||||||
|
rows_read++;
|
||||||
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline int handler::ha_rnd_pos_by_record(uchar *buf)
|
||||||
|
{
|
||||||
|
int error= rnd_pos_by_record(buf);
|
||||||
|
if (!error)
|
||||||
|
rows_read++;
|
||||||
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline int handler::ha_read_first_row(uchar *buf, uint primary_key)
|
||||||
|
{
|
||||||
|
int error= read_first_row(buf, primary_key);
|
||||||
|
if (!error)
|
||||||
|
rows_read++;
|
||||||
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
#endif /* MYSQL_SERVER */
|
#endif /* MYSQL_SERVER */
|
||||||
|
@ -12249,9 +12249,10 @@ join_read_last_key(JOIN_TAB *tab)
|
|||||||
}
|
}
|
||||||
if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
|
if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
|
||||||
return -1;
|
return -1;
|
||||||
if ((error= table->file->ha_index_read_last_map(table->record[0],
|
if ((error= table->file->ha_index_read_map(table->record[0],
|
||||||
tab->ref.key_buff,
|
tab->ref.key_buff,
|
||||||
make_prev_keypart_map(tab->ref.key_parts))))
|
make_prev_keypart_map(tab->ref.key_parts),
|
||||||
|
HA_READ_PREFIX_LAST)))
|
||||||
{
|
{
|
||||||
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
|
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
|
||||||
return report_error(table, error);
|
return report_error(table, error);
|
||||||
|
@ -7668,27 +7668,6 @@ view_err:
|
|||||||
if (write_bin_log(thd, TRUE, thd->query(), thd->query_length()))
|
if (write_bin_log(thd, TRUE, thd->query(), thd->query_length()))
|
||||||
DBUG_RETURN(TRUE);
|
DBUG_RETURN(TRUE);
|
||||||
|
|
||||||
if (ha_check_storage_engine_flag(old_db_type, HTON_FLUSH_AFTER_RENAME))
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
For the alter table to be properly flushed to the logs, we
|
|
||||||
have to open the new table. If not, we get a problem on server
|
|
||||||
shutdown. But we do not need to attach MERGE children.
|
|
||||||
*/
|
|
||||||
char path[FN_REFLEN];
|
|
||||||
TABLE *t_table;
|
|
||||||
build_table_filename(path + 1, sizeof(path) - 1, new_db, table_name, "", 0);
|
|
||||||
t_table= open_temporary_table(thd, path, new_db, tmp_name, 0);
|
|
||||||
if (t_table)
|
|
||||||
{
|
|
||||||
intern_close_table(t_table);
|
|
||||||
my_free(t_table, MYF(0));
|
|
||||||
}
|
|
||||||
else
|
|
||||||
sql_print_warning("Could not open table %s.%s after rename\n",
|
|
||||||
new_db,table_name);
|
|
||||||
ha_flush_logs(old_db_type);
|
|
||||||
}
|
|
||||||
table_list->table=0; // For query cache
|
table_list->table=0; // For query cache
|
||||||
query_cache_invalidate3(thd, table_list, 0);
|
query_cache_invalidate3(thd, table_list, 0);
|
||||||
|
|
||||||
|
@ -1594,6 +1594,15 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
|
|||||||
*/
|
*/
|
||||||
if (field->real_maybe_null())
|
if (field->real_maybe_null())
|
||||||
key_part->key_part_flag|= HA_NULL_PART;
|
key_part->key_part_flag|= HA_NULL_PART;
|
||||||
|
/*
|
||||||
|
Sometimes we can compare key parts for equality with memcmp.
|
||||||
|
But not always.
|
||||||
|
*/
|
||||||
|
if (!(key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART |
|
||||||
|
HA_BIT_PART)) &&
|
||||||
|
key_part->type != HA_KEYTYPE_FLOAT &&
|
||||||
|
key_part->type == HA_KEYTYPE_DOUBLE)
|
||||||
|
key_part->key_part_flag|= HA_CAN_MEMCMP;
|
||||||
}
|
}
|
||||||
keyinfo->usable_key_parts= usable_parts; // Filesort
|
keyinfo->usable_key_parts= usable_parts; // Filesort
|
||||||
|
|
||||||
|
@ -1813,6 +1813,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
|
|||||||
uint tzid, ttid;
|
uint tzid, ttid;
|
||||||
my_time_t ttime;
|
my_time_t ttime;
|
||||||
char buff[MAX_FIELD_WIDTH];
|
char buff[MAX_FIELD_WIDTH];
|
||||||
|
uchar keybuff[32];
|
||||||
String abbr(buff, sizeof(buff), &my_charset_latin1);
|
String abbr(buff, sizeof(buff), &my_charset_latin1);
|
||||||
char *alloc_buff, *tz_name_buff;
|
char *alloc_buff, *tz_name_buff;
|
||||||
/*
|
/*
|
||||||
@ -1891,9 +1892,10 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
|
|||||||
table= tz_tables->table;
|
table= tz_tables->table;
|
||||||
tz_tables= tz_tables->next_local;
|
tz_tables= tz_tables->next_local;
|
||||||
table->field[0]->store((longlong) tzid, TRUE);
|
table->field[0]->store((longlong) tzid, TRUE);
|
||||||
|
table->field[0]->get_key_image(keybuff, sizeof(keybuff), Field::itRAW);
|
||||||
(void)table->file->ha_index_init(0, 1);
|
(void)table->file->ha_index_init(0, 1);
|
||||||
|
|
||||||
if (table->file->ha_index_read_map(table->record[0], table->field[0]->ptr,
|
if (table->file->ha_index_read_map(table->record[0], keybuff,
|
||||||
HA_WHOLE_KEY, HA_READ_KEY_EXACT))
|
HA_WHOLE_KEY, HA_READ_KEY_EXACT))
|
||||||
{
|
{
|
||||||
sql_print_error("Can't find description of time zone '%u'", tzid);
|
sql_print_error("Can't find description of time zone '%u'", tzid);
|
||||||
@ -1918,9 +1920,10 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
|
|||||||
table= tz_tables->table;
|
table= tz_tables->table;
|
||||||
tz_tables= tz_tables->next_local;
|
tz_tables= tz_tables->next_local;
|
||||||
table->field[0]->store((longlong) tzid, TRUE);
|
table->field[0]->store((longlong) tzid, TRUE);
|
||||||
|
table->field[0]->get_key_image(keybuff, sizeof(keybuff), Field::itRAW);
|
||||||
(void)table->file->ha_index_init(0, 1);
|
(void)table->file->ha_index_init(0, 1);
|
||||||
|
|
||||||
res= table->file->ha_index_read_map(table->record[0], table->field[0]->ptr,
|
res= table->file->ha_index_read_map(table->record[0], keybuff,
|
||||||
(key_part_map)1, HA_READ_KEY_EXACT);
|
(key_part_map)1, HA_READ_KEY_EXACT);
|
||||||
while (!res)
|
while (!res)
|
||||||
{
|
{
|
||||||
@ -1968,8 +1971,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
|
|||||||
|
|
||||||
tmp_tz_info.typecnt= ttid + 1;
|
tmp_tz_info.typecnt= ttid + 1;
|
||||||
|
|
||||||
res= table->file->ha_index_next_same(table->record[0],
|
res= table->file->ha_index_next_same(table->record[0], keybuff, 4);
|
||||||
table->field[0]->ptr, 4);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (res != HA_ERR_END_OF_FILE)
|
if (res != HA_ERR_END_OF_FILE)
|
||||||
@ -1991,7 +1993,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
|
|||||||
table->field[0]->store((longlong) tzid, TRUE);
|
table->field[0]->store((longlong) tzid, TRUE);
|
||||||
(void)table->file->ha_index_init(0, 1);
|
(void)table->file->ha_index_init(0, 1);
|
||||||
|
|
||||||
res= table->file->ha_index_read_map(table->record[0], table->field[0]->ptr,
|
res= table->file->ha_index_read_map(table->record[0], keybuff,
|
||||||
(key_part_map)1, HA_READ_KEY_EXACT);
|
(key_part_map)1, HA_READ_KEY_EXACT);
|
||||||
while (!res)
|
while (!res)
|
||||||
{
|
{
|
||||||
@ -2021,8 +2023,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
|
|||||||
("time_zone_transition table: tz_id: %u tt_time: %lu tt_id: %u",
|
("time_zone_transition table: tz_id: %u tt_time: %lu tt_id: %u",
|
||||||
tzid, (ulong) ttime, ttid));
|
tzid, (ulong) ttime, ttid));
|
||||||
|
|
||||||
res= table->file->ha_index_next_same(table->record[0],
|
res= table->file->ha_index_next_same(table->record[0], keybuff, 4);
|
||||||
table->field[0]->ptr, 4);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -776,8 +776,6 @@ int ha_myisam::close(void)
|
|||||||
|
|
||||||
int ha_myisam::write_row(uchar *buf)
|
int ha_myisam::write_row(uchar *buf)
|
||||||
{
|
{
|
||||||
ha_statistic_increment(&SSV::ha_write_count);
|
|
||||||
|
|
||||||
/* If we have a timestamp column, update it to the current time */
|
/* If we have a timestamp column, update it to the current time */
|
||||||
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
|
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
|
||||||
table->timestamp_field->set_time();
|
table->timestamp_field->set_time();
|
||||||
@ -1663,7 +1661,6 @@ bool ha_myisam::is_crashed() const
|
|||||||
|
|
||||||
int ha_myisam::update_row(const uchar *old_data, uchar *new_data)
|
int ha_myisam::update_row(const uchar *old_data, uchar *new_data)
|
||||||
{
|
{
|
||||||
ha_statistic_increment(&SSV::ha_update_count);
|
|
||||||
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
|
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
|
||||||
table->timestamp_field->set_time();
|
table->timestamp_field->set_time();
|
||||||
return mi_update(file,old_data,new_data);
|
return mi_update(file,old_data,new_data);
|
||||||
@ -1671,7 +1668,6 @@ int ha_myisam::update_row(const uchar *old_data, uchar *new_data)
|
|||||||
|
|
||||||
int ha_myisam::delete_row(const uchar *buf)
|
int ha_myisam::delete_row(const uchar *buf)
|
||||||
{
|
{
|
||||||
ha_statistic_increment(&SSV::ha_delete_count);
|
|
||||||
return mi_delete(file,buf);
|
return mi_delete(file,buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1679,84 +1675,48 @@ int ha_myisam::index_read_map(uchar *buf, const uchar *key,
|
|||||||
key_part_map keypart_map,
|
key_part_map keypart_map,
|
||||||
enum ha_rkey_function find_flag)
|
enum ha_rkey_function find_flag)
|
||||||
{
|
{
|
||||||
DBUG_ASSERT(inited==INDEX);
|
return mi_rkey(file, buf, active_index, key, keypart_map, find_flag);
|
||||||
ha_statistic_increment(&SSV::ha_read_key_count);
|
|
||||||
int error=mi_rkey(file, buf, active_index, key, keypart_map, find_flag);
|
|
||||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
|
||||||
return error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int ha_myisam::index_read_idx_map(uchar *buf, uint index, const uchar *key,
|
int ha_myisam::index_read_idx_map(uchar *buf, uint index, const uchar *key,
|
||||||
key_part_map keypart_map,
|
key_part_map keypart_map,
|
||||||
enum ha_rkey_function find_flag)
|
enum ha_rkey_function find_flag)
|
||||||
{
|
{
|
||||||
ha_statistic_increment(&SSV::ha_read_key_count);
|
return mi_rkey(file, buf, index, key, keypart_map, find_flag);
|
||||||
int error=mi_rkey(file, buf, index, key, keypart_map, find_flag);
|
|
||||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
|
|
||||||
int ha_myisam::index_read_last_map(uchar *buf, const uchar *key,
|
|
||||||
key_part_map keypart_map)
|
|
||||||
{
|
|
||||||
DBUG_ENTER("ha_myisam::index_read_last");
|
|
||||||
DBUG_ASSERT(inited==INDEX);
|
|
||||||
ha_statistic_increment(&SSV::ha_read_key_count);
|
|
||||||
int error=mi_rkey(file, buf, active_index, key, keypart_map,
|
|
||||||
HA_READ_PREFIX_LAST);
|
|
||||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
|
||||||
DBUG_RETURN(error);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int ha_myisam::index_next(uchar *buf)
|
int ha_myisam::index_next(uchar *buf)
|
||||||
{
|
{
|
||||||
DBUG_ASSERT(inited==INDEX);
|
return mi_rnext(file,buf,active_index);
|
||||||
ha_statistic_increment(&SSV::ha_read_next_count);
|
|
||||||
int error=mi_rnext(file,buf,active_index);
|
|
||||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
|
||||||
return error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int ha_myisam::index_prev(uchar *buf)
|
int ha_myisam::index_prev(uchar *buf)
|
||||||
{
|
{
|
||||||
DBUG_ASSERT(inited==INDEX);
|
return mi_rprev(file,buf, active_index);
|
||||||
ha_statistic_increment(&SSV::ha_read_prev_count);
|
|
||||||
int error=mi_rprev(file,buf, active_index);
|
|
||||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
|
||||||
return error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int ha_myisam::index_first(uchar *buf)
|
int ha_myisam::index_first(uchar *buf)
|
||||||
{
|
{
|
||||||
DBUG_ASSERT(inited==INDEX);
|
return mi_rfirst(file, buf, active_index);
|
||||||
ha_statistic_increment(&SSV::ha_read_first_count);
|
|
||||||
int error=mi_rfirst(file, buf, active_index);
|
|
||||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
|
||||||
return error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int ha_myisam::index_last(uchar *buf)
|
int ha_myisam::index_last(uchar *buf)
|
||||||
{
|
{
|
||||||
DBUG_ASSERT(inited==INDEX);
|
return mi_rlast(file, buf, active_index);
|
||||||
ha_statistic_increment(&SSV::ha_read_last_count);
|
|
||||||
int error=mi_rlast(file, buf, active_index);
|
|
||||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
|
||||||
return error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int ha_myisam::index_next_same(uchar *buf,
|
int ha_myisam::index_next_same(uchar *buf,
|
||||||
const uchar *key __attribute__((unused)),
|
const uchar *key __attribute__((unused)),
|
||||||
uint length __attribute__((unused)))
|
uint length __attribute__((unused)))
|
||||||
{
|
{
|
||||||
|
DBUG_ENTER("ha_myisam::index_next_same");
|
||||||
int error;
|
int error;
|
||||||
DBUG_ASSERT(inited==INDEX);
|
|
||||||
ha_statistic_increment(&SSV::ha_read_next_count);
|
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
error= mi_rnext_same(file,buf);
|
error= mi_rnext_same(file,buf);
|
||||||
} while (error == HA_ERR_RECORD_DELETED);
|
} while (error == HA_ERR_RECORD_DELETED);
|
||||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
DBUG_PRINT("return",("%i", error));
|
||||||
return error;
|
DBUG_RETURN(error);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1769,10 +1729,7 @@ int ha_myisam::rnd_init(bool scan)
|
|||||||
|
|
||||||
int ha_myisam::rnd_next(uchar *buf)
|
int ha_myisam::rnd_next(uchar *buf)
|
||||||
{
|
{
|
||||||
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
|
return mi_scan(file, buf);
|
||||||
int error=mi_scan(file, buf);
|
|
||||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
|
||||||
return error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int ha_myisam::remember_rnd_pos()
|
int ha_myisam::remember_rnd_pos()
|
||||||
@ -1788,10 +1745,7 @@ int ha_myisam::restart_rnd_next(uchar *buf)
|
|||||||
|
|
||||||
int ha_myisam::rnd_pos(uchar *buf, uchar *pos)
|
int ha_myisam::rnd_pos(uchar *buf, uchar *pos)
|
||||||
{
|
{
|
||||||
ha_statistic_increment(&SSV::ha_read_rnd_count);
|
return mi_rrnd(file, buf, my_get_ptr(pos,ref_length));
|
||||||
int error=mi_rrnd(file, buf, my_get_ptr(pos,ref_length));
|
|
||||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
|
||||||
return error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ha_myisam::position(const uchar *record)
|
void ha_myisam::position(const uchar *record)
|
||||||
@ -2092,8 +2046,6 @@ int ha_myisam::ft_read(uchar *buf)
|
|||||||
&LOCK_status); // why ?
|
&LOCK_status); // why ?
|
||||||
|
|
||||||
error=ft_handler->please->read_next(ft_handler,(char*) buf);
|
error=ft_handler->please->read_next(ft_handler,(char*) buf);
|
||||||
|
|
||||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,7 +71,6 @@ class ha_myisam: public handler
|
|||||||
int index_read_idx_map(uchar *buf, uint index, const uchar *key,
|
int index_read_idx_map(uchar *buf, uint index, const uchar *key,
|
||||||
key_part_map keypart_map,
|
key_part_map keypart_map,
|
||||||
enum ha_rkey_function find_flag);
|
enum ha_rkey_function find_flag);
|
||||||
int index_read_last_map(uchar *buf, const uchar *key, key_part_map keypart_map);
|
|
||||||
int index_next(uchar * buf);
|
int index_next(uchar * buf);
|
||||||
int index_prev(uchar * buf);
|
int index_prev(uchar * buf);
|
||||||
int index_first(uchar * buf);
|
int index_first(uchar * buf);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user