New status variables: Rows_tmp_read, Handler_tmp_update and Handler_tmp_write

Split status variable Rows_read to Rows_read and Rows_tmp_read so that one can see how much real data is read.
Same was done with with Handler_update and Handler_write.
Fixed bug in MEMORY tables where some variables was counted twice.
Added new internal handler call 'ha_close()' to have one place to gather statistics.
Fixed bug where thd->open_options was set to wrong value when doing admin_recreate_table()


mysql-test/r/status.result:
  Updated test results and added new tests
mysql-test/r/status_user.result:
  Udated test results
mysql-test/t/status.test:
  Added new test for temporary table status variables
sql/ha_partition.cc:
  Changed to call ha_close() instead of close()
sql/handler.cc:
  Added internal_tmp_table variable for easy checking of temporary tables.
  Added new internal handler call 'ha_close()' to have one place to gather statistics.
  Gather statistics for internal temporary tables.
sql/handler.h:
  Added handler variables internal_tmp_table, rows_tmp_read.
  Split function update_index_statistics() to two.
  Added ha_update_tmp_row() for faster tmp table handling with more statistics.
sql/item_sum.cc:
  ha_write_row() -> ha_write_tmp_row()
sql/multi_range_read.cc:
  close() -> ha_close()
sql/mysqld.cc:
  New status variables: Rows_tmp_read, Handler_tmp_update and Handler_tmp_write
sql/opt_range.cc:
  close() -> ha_close()
sql/sql_base.cc:
  close() -> ha_close()
sql/sql_class.cc:
  Added handling of rows_tmp_read
sql/sql_class.h:
  Added new satistics variables.
  rows_read++  ->  update_rows_read() to be able to correctly count reads to internal temp tables.
  Added handler::ha_update_tmp_row()
sql/sql_connect.cc:
  Added comment
sql/sql_expression_cache.cc:
  ha_write_row() -> ha_write_tmp_row()
sql/sql_select.cc:
  close() -> ha_close()
  ha_update_row() -> ha_update_tmp_row()
sql/sql_show.cc:
  ha_write_row() -> ha_write_tmp_row()
sql/sql_table.cc:
  Fixed bug where thd->open_options was set to wrong value when doing admin_recreate_table()
sql/sql_union.cc:
  ha_write_row() -> ha_write_tmp_row()
sql/sql_update.cc:
  ha_write_row() -> ha_write_tmp_row()
sql/table.cc:
  close() -> ha_close()
storage/heap/ha_heap.cc:
  Removed double counting of statistic variables.
  close -> ha_close() to get tmp table statistics.
storage/maria/ha_maria.cc:
  close -> ha_close() to get tmp table statistics.
This commit is contained in:
Michael Widenius 2011-06-27 19:07:24 +03:00
parent a6542a13ab
commit ba9a890f0c
23 changed files with 166 additions and 57 deletions

View File

@ -156,25 +156,33 @@ Variable_name Value
Com_show_status 3 Com_show_status 3
show status like 'hand%write%'; show status like 'hand%write%';
Variable_name Value Variable_name Value
Handler_tmp_write 0
Handler_write 0 Handler_write 0
show status like '%tmp%'; show status like '%tmp%';
Variable_name Value Variable_name Value
Created_tmp_disk_tables 0 Created_tmp_disk_tables 0
Created_tmp_files 0 Created_tmp_files 0
Created_tmp_tables 0 Created_tmp_tables 0
Handler_tmp_update 0
Handler_tmp_write 0
Rows_tmp_read 5
show status like 'hand%write%'; show status like 'hand%write%';
Variable_name Value Variable_name Value
Handler_tmp_write 0
Handler_write 0 Handler_write 0
show status like '%tmp%'; show status like '%tmp%';
Variable_name Value Variable_name Value
Created_tmp_disk_tables 0 Created_tmp_disk_tables 0
Created_tmp_files 0 Created_tmp_files 0
Created_tmp_tables 0 Created_tmp_tables 0
Handler_tmp_update 0
Handler_tmp_write 0
Rows_tmp_read 13
show status like 'com_show_status'; show status like 'com_show_status';
Variable_name Value Variable_name Value
Com_show_status 8 Com_show_status 8
rnd_diff tmp_table_diff rnd_diff tmp_table_diff
20 8 28 8
flush status; flush status;
show status like 'Com%function'; show status like 'Com%function';
Variable_name Value Variable_name Value
@ -238,5 +246,57 @@ SELECT 9;
9 9
DROP PROCEDURE p1; DROP PROCEDURE p1;
DROP FUNCTION f1; DROP FUNCTION f1;
flush status;
create table t1 (a int not null auto_increment primary key, g int, b blob);
insert into t1 (g,b) values (1,'a'), (2, 'b'), (3, 'b'), (1, 'c');
select * from t1;
a g b
1 1 a
2 2 b
3 3 b
4 1 c
select b, count(*) from t1 group by b;
b count(*)
a 1
b 2
c 1
select g, count(*) from t1 group by g;
g count(*)
1 2
2 1
3 1
show status like 'Row%';
Variable_name Value
Rows_read 12
Rows_sent 10
Rows_tmp_read 14
show status like 'Handler%';
Variable_name Value
Handler_commit 0
Handler_delete 0
Handler_discover 0
Handler_prepare 0
Handler_read_first 0
Handler_read_key 4
Handler_read_next 0
Handler_read_prev 0
Handler_read_rnd 7
Handler_read_rnd_next 23
Handler_rollback 0
Handler_savepoint 0
Handler_savepoint_rollback 0
Handler_tmp_update 2
Handler_tmp_write 7
Handler_update 0
Handler_write 4
show status like '%tmp%';
Variable_name Value
Created_tmp_disk_tables 1
Created_tmp_files 0
Created_tmp_tables 2
Handler_tmp_update 2
Handler_tmp_write 7
Rows_tmp_read 34
drop table t1;
set @@global.concurrent_insert= @old_concurrent_insert; set @@global.concurrent_insert= @old_concurrent_insert;
SET GLOBAL log_output = @old_log_output; SET GLOBAL log_output = @old_log_output;

View File

@ -94,6 +94,7 @@ show status like "rows%";
Variable_name Value Variable_name Value
Rows_read 6 Rows_read 6
Rows_sent 1 Rows_sent 1
Rows_tmp_read 0
show status like "ha%"; show status like "ha%";
Variable_name Value Variable_name Value
Handler_commit 19 Handler_commit 19
@ -109,6 +110,8 @@ Handler_read_rnd_next 5
Handler_rollback 2 Handler_rollback 2
Handler_savepoint 0 Handler_savepoint 0
Handler_savepoint_rollback 0 Handler_savepoint_rollback 0
Handler_tmp_update 0
Handler_tmp_write 0
Handler_update 5 Handler_update 5
Handler_write 7 Handler_write 7
select variable_value - @global_read_key as "handler_read_key" from information_schema.global_status where variable_name="handler_read_key"; select variable_value - @global_read_key as "handler_read_key" from information_schema.global_status where variable_name="handler_read_key";
@ -133,7 +136,7 @@ CONCURRENT_CONNECTIONS 0
ROWS_READ 6 ROWS_READ 6
ROWS_SENT 2 ROWS_SENT 2
ROWS_DELETED 1 ROWS_DELETED 1
ROWS_INSERTED 8 ROWS_INSERTED 7
ROWS_UPDATED 5 ROWS_UPDATED 5
SELECT_COMMANDS 3 SELECT_COMMANDS 3
UPDATE_COMMANDS 11 UPDATE_COMMANDS 11
@ -150,7 +153,7 @@ CONCURRENT_CONNECTIONS 0
ROWS_READ 6 ROWS_READ 6
ROWS_SENT 2 ROWS_SENT 2
ROWS_DELETED 1 ROWS_DELETED 1
ROWS_INSERTED 8 ROWS_INSERTED 7
ROWS_UPDATED 5 ROWS_UPDATED 5
SELECT_COMMANDS 3 SELECT_COMMANDS 3
UPDATE_COMMANDS 11 UPDATE_COMMANDS 11

View File

@ -353,6 +353,23 @@ DROP FUNCTION f1;
# End of 5.1 tests # End of 5.1 tests
#
# Test of internal temporary table status variables
#
flush status;
create table t1 (a int not null auto_increment primary key, g int, b blob);
insert into t1 (g,b) values (1,'a'), (2, 'b'), (3, 'b'), (1, 'c');
select * from t1;
select b, count(*) from t1 group by b;
select g, count(*) from t1 group by g;
show status like 'Row%';
show status like 'Handler%';
show status like '%tmp%';
drop table t1;
# End of 5.3 tests
# Restore global concurrent_insert value. Keep in the end of the test file. # Restore global concurrent_insert value. Keep in the end of the test file.
--connection default --connection default
set @@global.concurrent_insert= @old_concurrent_insert; set @@global.concurrent_insert= @old_concurrent_insert;

View File

@ -1301,7 +1301,7 @@ int ha_partition::prepare_new_partition(TABLE *tbl,
DBUG_RETURN(0); DBUG_RETURN(0);
error_external_lock: error_external_lock:
VOID(file->close()); VOID(file->ha_close());
error_open: error_open:
VOID(file->ha_delete_table(part_name)); VOID(file->ha_delete_table(part_name));
error_create: error_create:
@ -1347,7 +1347,7 @@ void ha_partition::cleanup_new_partition(uint part_count)
while ((part_count > 0) && (*file)) while ((part_count > 0) && (*file))
{ {
(*file)->ha_external_lock(thd, F_UNLCK); (*file)->ha_external_lock(thd, F_UNLCK);
(*file)->close(); (*file)->ha_close();
/* Leave the (*file)->ha_delete_table(part_name) to the ddl-log */ /* Leave the (*file)->ha_delete_table(part_name) to the ddl-log */
@ -2842,7 +2842,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
err_handler: err_handler:
DEBUG_SYNC(ha_thd(), "partition_open_error"); DEBUG_SYNC(ha_thd(), "partition_open_error");
while (file-- != m_file) while (file-- != m_file)
(*file)->close(); (*file)->ha_close();
err_alloc: err_alloc:
bitmap_free(&m_bulk_insert_started); bitmap_free(&m_bulk_insert_started);
if (!m_is_clone_of) if (!m_is_clone_of)
@ -2928,7 +2928,7 @@ int ha_partition::close(void)
repeat: repeat:
do do
{ {
(*file)->close(); (*file)->ha_close();
} while (*(++file)); } while (*(++file));
if (first && m_added_file && m_added_file[0]) if (first && m_added_file && m_added_file[0])

View File

@ -2168,7 +2168,7 @@ THD *handler::ha_thd(void) const
Don't wait for locks if not HA_OPEN_WAIT_IF_LOCKED is set Don't wait for locks if not HA_OPEN_WAIT_IF_LOCKED is set
*/ */
int handler::ha_open(TABLE *table_arg, const char *name, int mode, int handler::ha_open(TABLE *table_arg, const char *name, int mode,
int test_if_locked) uint test_if_locked)
{ {
int error; int error;
DBUG_ENTER("handler::ha_open"); DBUG_ENTER("handler::ha_open");
@ -2212,11 +2212,22 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
dup_ref=ref+ALIGN_SIZE(ref_length); dup_ref=ref+ALIGN_SIZE(ref_length);
cached_table_flags= table_flags(); cached_table_flags= table_flags();
} }
rows_read= rows_changed= 0; reset_statistics();
memset(index_rows_read, 0, sizeof(index_rows_read)); internal_tmp_table= test(test_if_locked & HA_OPEN_INTERNAL_TABLE);
DBUG_RETURN(error); DBUG_RETURN(error);
} }
int handler::ha_close()
{
DBUG_ENTER("ha_close");
/*
Increment global statistics for temporary tables.
In_use is 0 for tables that was closed from the table cache.
*/
if (table->in_use)
status_var_add(table->in_use->status_var.rows_tmp_read, rows_tmp_read);
DBUG_RETURN(close());
}
/* Initialize handler for random reading, with error handling */ /* Initialize handler for random reading, with error handling */
@ -3238,7 +3249,7 @@ int handler::rename_table(const char * from, const char * to)
void handler::drop_table(const char *name) void handler::drop_table(const char *name)
{ {
close(); ha_close();
delete_table(name); delete_table(name);
} }
@ -3757,6 +3768,7 @@ void handler::update_global_table_stats()
TABLE_STATS * table_stats; TABLE_STATS * table_stats;
status_var_add(table->in_use->status_var.rows_read, rows_read); status_var_add(table->in_use->status_var.rows_read, rows_read);
DBUG_ASSERT(rows_tmp_read == 0);
if (!table->in_use->userstat_running) if (!table->in_use->userstat_running)
{ {

View File

@ -1,4 +1,5 @@
/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc. /* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc.
Copyright 2009-2011 Monty Program Ab
This program is free software; you can redistribute it and/or modify This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by it under the terms of the GNU General Public License as published by
@ -1599,6 +1600,7 @@ public:
KEY_PART_INFO *range_key_part; KEY_PART_INFO *range_key_part;
int key_compare_result_on_equal; int key_compare_result_on_equal;
bool eq_range; bool eq_range;
bool internal_tmp_table; /* If internal tmp table */
/* /*
TRUE <=> the engine guarantees that returned records are within the range TRUE <=> the engine guarantees that returned records are within the range
@ -1643,6 +1645,7 @@ public:
*/ */
/* Statistics variables */ /* Statistics variables */
ulonglong rows_read; ulonglong rows_read;
ulonglong rows_tmp_read;
ulonglong rows_changed; ulonglong rows_changed;
/* One bigger than needed to avoid to test if key == MAX_KEY */ /* One bigger than needed to avoid to test if key == MAX_KEY */
ulonglong index_rows_read[MAX_KEY+1]; ulonglong index_rows_read[MAX_KEY+1];
@ -1685,7 +1688,7 @@ public:
} }
/* ha_ methods: pubilc wrappers for private virtual API */ /* ha_ methods: pubilc wrappers for private virtual API */
int ha_open(TABLE *table, const char *name, int mode, int test_if_locked); int ha_open(TABLE *table, const char *name, int mode, uint test_if_locked);
int ha_index_init(uint idx, bool sorted) int ha_index_init(uint idx, bool sorted)
{ {
int result; int result;
@ -1809,7 +1812,7 @@ public:
uint get_dup_key(int error); uint get_dup_key(int error);
void reset_statistics() void reset_statistics()
{ {
rows_read= rows_changed= 0; rows_read= rows_changed= rows_tmp_read= 0;
bzero(index_rows_read, sizeof(index_rows_read)); bzero(index_rows_read, sizeof(index_rows_read));
} }
virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share) virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share)
@ -1894,7 +1897,7 @@ public:
*/ */
uint get_index(void) const uint get_index(void) const
{ return inited == INDEX ? active_index : MAX_KEY; } { return inited == INDEX ? active_index : MAX_KEY; }
virtual int close(void)=0; int ha_close(void);
/** /**
@retval 0 Bulk update used by handler @retval 0 Bulk update used by handler
@ -1970,10 +1973,18 @@ protected:
virtual int index_last(uchar * buf) virtual int index_last(uchar * buf)
{ return HA_ERR_WRONG_COMMAND; } { return HA_ERR_WRONG_COMMAND; }
virtual int index_next_same(uchar *buf, const uchar *key, uint keylen); virtual int index_next_same(uchar *buf, const uchar *key, uint keylen);
virtual int close(void)=0;
inline void update_rows_read()
{
if (likely(!internal_tmp_table))
rows_read++;
else
rows_tmp_read++;
}
inline void update_index_statistics() inline void update_index_statistics()
{ {
index_rows_read[active_index]++; index_rows_read[active_index]++;
rows_read++; update_rows_read();
} }
public: public:
@ -2604,6 +2615,7 @@ public:
virtual handlerton *partition_ht() const virtual handlerton *partition_ht() const
{ return ht; } { return ht; }
inline int ha_write_tmp_row(uchar *buf); inline int ha_write_tmp_row(uchar *buf);
inline int ha_update_tmp_row(const uchar * old_data, uchar * new_data);
}; };
#include "multi_range_read.h" #include "multi_range_read.h"

View File

@ -2587,7 +2587,7 @@ bool Item_sum_count_distinct::add()
*/ */
return tree->unique_add(table->record[0] + table->s->null_bytes); return tree->unique_add(table->record[0] + table->s->null_bytes);
} }
if ((error= table->file->ha_write_row(table->record[0])) && if ((error= table->file->ha_write_tmp_row(table->record[0])) &&
table->file->is_fatal_error(error, HA_CHECK_DUP)) table->file->is_fatal_error(error, HA_CHECK_DUP))
return TRUE; return TRUE;
return FALSE; return FALSE;

View File

@ -1075,7 +1075,7 @@ void DsMrr_impl::close_second_handler()
{ {
secondary_file->ha_index_or_rnd_end(); secondary_file->ha_index_or_rnd_end();
secondary_file->ha_external_lock(current_thd, F_UNLCK); secondary_file->ha_external_lock(current_thd, F_UNLCK);
secondary_file->close(); secondary_file->ha_close();
delete secondary_file; delete secondary_file;
secondary_file= NULL; secondary_file= NULL;
} }

View File

@ -8266,6 +8266,8 @@ SHOW_VAR status_vars[]= {
{"Handler_savepoint_rollback",(char*) offsetof(STATUS_VAR, ha_savepoint_rollback_count), SHOW_LONG_STATUS}, {"Handler_savepoint_rollback",(char*) offsetof(STATUS_VAR, ha_savepoint_rollback_count), SHOW_LONG_STATUS},
{"Handler_update", (char*) offsetof(STATUS_VAR, ha_update_count), SHOW_LONG_STATUS}, {"Handler_update", (char*) offsetof(STATUS_VAR, ha_update_count), SHOW_LONG_STATUS},
{"Handler_write", (char*) offsetof(STATUS_VAR, ha_write_count), SHOW_LONG_STATUS}, {"Handler_write", (char*) offsetof(STATUS_VAR, ha_write_count), SHOW_LONG_STATUS},
{"Handler_tmp_update", (char*) offsetof(STATUS_VAR, ha_tmp_update_count), SHOW_LONG_STATUS},
{"Handler_tmp_write", (char*) offsetof(STATUS_VAR, ha_tmp_write_count), SHOW_LONG_STATUS},
{"Key", (char*) &show_default_keycache, SHOW_FUNC}, {"Key", (char*) &show_default_keycache, SHOW_FUNC},
{"Last_query_cost", (char*) offsetof(STATUS_VAR, last_query_cost), SHOW_DOUBLE_STATUS}, {"Last_query_cost", (char*) offsetof(STATUS_VAR, last_query_cost), SHOW_DOUBLE_STATUS},
{"Max_used_connections", (char*) &max_used_connections, SHOW_LONG}, {"Max_used_connections", (char*) &max_used_connections, SHOW_LONG},
@ -8280,6 +8282,7 @@ SHOW_VAR status_vars[]= {
{"Prepared_stmt_count", (char*) &show_prepared_stmt_count, SHOW_FUNC}, {"Prepared_stmt_count", (char*) &show_prepared_stmt_count, SHOW_FUNC},
{"Rows_sent", (char*) offsetof(STATUS_VAR, rows_sent), SHOW_LONGLONG_STATUS}, {"Rows_sent", (char*) offsetof(STATUS_VAR, rows_sent), SHOW_LONGLONG_STATUS},
{"Rows_read", (char*) offsetof(STATUS_VAR, rows_read), SHOW_LONGLONG_STATUS}, {"Rows_read", (char*) offsetof(STATUS_VAR, rows_read), SHOW_LONGLONG_STATUS},
{"Rows_tmp_read", (char*) offsetof(STATUS_VAR, rows_tmp_read), SHOW_LONGLONG_STATUS},
#ifdef HAVE_QUERY_CACHE #ifdef HAVE_QUERY_CACHE
{"Qcache_free_blocks", (char*) &query_cache.free_memory_blocks, SHOW_LONG_NOFLUSH}, {"Qcache_free_blocks", (char*) &query_cache.free_memory_blocks, SHOW_LONG_NOFLUSH},
{"Qcache_free_memory", (char*) &query_cache.free_memory, SHOW_LONG_NOFLUSH}, {"Qcache_free_memory", (char*) &query_cache.free_memory, SHOW_LONG_NOFLUSH},

View File

@ -1804,7 +1804,7 @@ QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT()
DBUG_PRINT("info", ("Freeing separate handler 0x%lx (free: %d)", (long) file, DBUG_PRINT("info", ("Freeing separate handler 0x%lx (free: %d)", (long) file,
free_file)); free_file));
file->ha_external_lock(current_thd, F_UNLCK); file->ha_external_lock(current_thd, F_UNLCK);
file->close(); file->ha_close();
delete file; delete file;
} }
} }
@ -1999,7 +1999,7 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
if (init() || reset()) if (init() || reset())
{ {
file->ha_external_lock(thd, F_UNLCK); file->ha_external_lock(thd, F_UNLCK);
file->close(); file->ha_close();
goto failure; goto failure;
} }
free_file= TRUE; free_file= TRUE;

View File

@ -675,7 +675,7 @@ void close_handle_and_leave_table_as_lock(TABLE *table)
*/ */
if (table->child_l || table->parent) if (table->child_l || table->parent)
detach_merge_children(table, FALSE); detach_merge_children(table, FALSE);
table->file->close(); table->file->ha_close();
table->db_stat= 0; // Mark file closed table->db_stat= 0; // Mark file closed
release_table_share(table->s, RELEASE_NORMAL); release_table_share(table->s, RELEASE_NORMAL);
table->s= share; table->s= share;
@ -3708,7 +3708,7 @@ TABLE *drop_locked_tables(THD *thd,const char *db, const char *table_name)
if (table->db_stat) if (table->db_stat)
{ {
table->db_stat= 0; table->db_stat= 0;
table->file->close(); table->file->ha_close();
} }
} }
else else

View File

@ -1201,6 +1201,7 @@ void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var)
to_var->bytes_sent+= from_var->bytes_sent; to_var->bytes_sent+= from_var->bytes_sent;
to_var->rows_read+= from_var->rows_read; to_var->rows_read+= from_var->rows_read;
to_var->rows_sent+= from_var->rows_sent; to_var->rows_sent+= from_var->rows_sent;
to_var->rows_tmp_read+= from_var->rows_tmp_read;
to_var->binlog_bytes_written+= from_var->binlog_bytes_written; to_var->binlog_bytes_written+= from_var->binlog_bytes_written;
to_var->cpu_time+= from_var->cpu_time; to_var->cpu_time+= from_var->cpu_time;
to_var->busy_time+= from_var->busy_time; to_var->busy_time+= from_var->busy_time;
@ -1236,6 +1237,7 @@ void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
to_var->bytes_sent+= from_var->bytes_sent - dec_var->bytes_sent; to_var->bytes_sent+= from_var->bytes_sent - dec_var->bytes_sent;
to_var->rows_read+= from_var->rows_read - dec_var->rows_read; to_var->rows_read+= from_var->rows_read - dec_var->rows_read;
to_var->rows_sent+= from_var->rows_sent - dec_var->rows_sent; to_var->rows_sent+= from_var->rows_sent - dec_var->rows_sent;
to_var->rows_tmp_read+= from_var->rows_tmp_read - dec_var->rows_tmp_read;
to_var->binlog_bytes_written+= from_var->binlog_bytes_written - to_var->binlog_bytes_written+= from_var->binlog_bytes_written -
dec_var->binlog_bytes_written; dec_var->binlog_bytes_written;
to_var->cpu_time+= from_var->cpu_time - dec_var->cpu_time; to_var->cpu_time+= from_var->cpu_time - dec_var->cpu_time;

View File

@ -530,6 +530,9 @@ typedef struct system_status_var
ulong ha_rollback_count; ulong ha_rollback_count;
ulong ha_update_count; ulong ha_update_count;
ulong ha_write_count; ulong ha_write_count;
/* The following are for internal temporary tables */
ulong ha_tmp_update_count;
ulong ha_tmp_write_count;
ulong ha_prepare_count; ulong ha_prepare_count;
ulong ha_discover_count; ulong ha_discover_count;
ulong ha_savepoint_count; ulong ha_savepoint_count;
@ -582,6 +585,7 @@ typedef struct system_status_var
ulonglong bytes_sent; ulonglong bytes_sent;
ulonglong rows_read; ulonglong rows_read;
ulonglong rows_sent; ulonglong rows_sent;
ulonglong rows_tmp_read;
ulonglong binlog_bytes_written; ulonglong binlog_bytes_written;
double last_query_cost; double last_query_cost;
double cpu_time, busy_time; double cpu_time, busy_time;
@ -3610,7 +3614,7 @@ inline int handler::ha_index_read_idx_map(uchar * buf, uint index,
int error= index_read_idx_map(buf, index, key, keypart_map, find_flag); int error= index_read_idx_map(buf, index, key, keypart_map, find_flag);
if (!error) if (!error)
{ {
rows_read++; update_rows_read();
index_rows_read[index]++; index_rows_read[index]++;
} }
table->status=error ? STATUS_NOT_FOUND: 0; table->status=error ? STATUS_NOT_FOUND: 0;
@ -3677,7 +3681,8 @@ inline int handler::ha_ft_read(uchar *buf)
{ {
int error= ft_read(buf); int error= ft_read(buf);
if (!error) if (!error)
rows_read++; update_rows_read();
table->status=error ? STATUS_NOT_FOUND: 0; table->status=error ? STATUS_NOT_FOUND: 0;
return error; return error;
} }
@ -3687,7 +3692,7 @@ inline int handler::ha_rnd_next(uchar *buf)
increment_statistics(&SSV::ha_read_rnd_next_count); increment_statistics(&SSV::ha_read_rnd_next_count);
int error= rnd_next(buf); int error= rnd_next(buf);
if (!error) if (!error)
rows_read++; update_rows_read();
table->status=error ? STATUS_NOT_FOUND: 0; table->status=error ? STATUS_NOT_FOUND: 0;
return error; return error;
} }
@ -3697,7 +3702,7 @@ inline int handler::ha_rnd_pos(uchar *buf, uchar *pos)
increment_statistics(&SSV::ha_read_rnd_count); increment_statistics(&SSV::ha_read_rnd_count);
int error= rnd_pos(buf, pos); int error= rnd_pos(buf, pos);
if (!error) if (!error)
rows_read++; update_rows_read();
table->status=error ? STATUS_NOT_FOUND: 0; table->status=error ? STATUS_NOT_FOUND: 0;
return error; return error;
} }
@ -3706,7 +3711,7 @@ inline int handler::ha_rnd_pos_by_record(uchar *buf)
{ {
int error= rnd_pos_by_record(buf); int error= rnd_pos_by_record(buf);
if (!error) if (!error)
rows_read++; update_rows_read();
table->status=error ? STATUS_NOT_FOUND: 0; table->status=error ? STATUS_NOT_FOUND: 0;
return error; return error;
} }
@ -3715,15 +3720,21 @@ inline int handler::ha_read_first_row(uchar *buf, uint primary_key)
{ {
int error= read_first_row(buf, primary_key); int error= read_first_row(buf, primary_key);
if (!error) if (!error)
rows_read++; update_rows_read();
table->status=error ? STATUS_NOT_FOUND: 0; table->status=error ? STATUS_NOT_FOUND: 0;
return error; return error;
} }
inline int handler::ha_write_tmp_row(uchar *buf) inline int handler::ha_write_tmp_row(uchar *buf)
{ {
increment_statistics(&SSV::ha_write_count); increment_statistics(&SSV::ha_tmp_write_count);
return write_row(buf); return write_row(buf);
} }
inline int handler::ha_update_tmp_row(const uchar *old_data, uchar *new_data)
{
increment_statistics(&SSV::ha_tmp_update_count);
return update_row(old_data, new_data);
}
#endif /* MYSQL_SERVER */ #endif /* MYSQL_SERVER */

View File

@ -690,6 +690,7 @@ static void update_global_user_stats_with_user(THD *thd,
user_stats->binlog_bytes_written+= user_stats->binlog_bytes_written+=
(thd->status_var.binlog_bytes_written - (thd->status_var.binlog_bytes_written -
thd->org_status_var.binlog_bytes_written); thd->org_status_var.binlog_bytes_written);
/* We are not counting rows in internal temporary tables here ! */
user_stats->rows_read+= (thd->status_var.rows_read - user_stats->rows_read+= (thd->status_var.rows_read -
thd->org_status_var.rows_read); thd->org_status_var.rows_read);
user_stats->rows_sent+= (thd->status_var.rows_sent - user_stats->rows_sent+= (thd->status_var.rows_sent -

View File

@ -249,7 +249,7 @@ my_bool Expression_cache_tmptable::put_value(Item *value)
if (table_thd->is_error()) if (table_thd->is_error())
goto err;; goto err;;
if ((error= cache_table->file->ha_write_row(cache_table->record[0]))) if ((error= cache_table->file->ha_write_tmp_row(cache_table->record[0])))
{ {
/* create_myisam_from_heap will generate error if needed */ /* create_myisam_from_heap will generate error if needed */
if (cache_table->file->is_fatal_error(error, HA_CHECK_DUP) && if (cache_table->file->is_fatal_error(error, HA_CHECK_DUP) &&

View File

@ -14322,7 +14322,7 @@ create_internal_tmp_table_from_heap2(THD *thd, TABLE *table,
/* remove heap table and change to use myisam table */ /* remove heap table and change to use myisam table */
(void) table->file->ha_rnd_end(); (void) table->file->ha_rnd_end();
(void) table->file->close(); // This deletes the table ! (void) table->file->ha_close(); // This deletes the table !
delete table->file; delete table->file;
table->file=0; table->file=0;
plugin_unlock(0, table->s->db_plugin); plugin_unlock(0, table->s->db_plugin);
@ -14343,7 +14343,7 @@ create_internal_tmp_table_from_heap2(THD *thd, TABLE *table,
table->file->print_error(write_err, MYF(0)); table->file->print_error(write_err, MYF(0));
err_killed: err_killed:
(void) table->file->ha_rnd_end(); (void) table->file->ha_rnd_end();
(void) new_table.file->close(); (void) new_table.file->ha_close();
err1: err1:
new_table.file->ha_delete_table(new_table.s->table_name.str); new_table.file->ha_delete_table(new_table.s->table_name.str);
err2: err2:
@ -16183,8 +16183,8 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{ /* Update old record */ { /* Update old record */
restore_record(table,record[1]); restore_record(table,record[1]);
update_tmptable_sum_func(join->sum_funcs,table); update_tmptable_sum_func(join->sum_funcs,table);
if ((error= table->file->ha_update_row(table->record[1], if ((error= table->file->ha_update_tmp_row(table->record[1],
table->record[0]))) table->record[0])))
{ {
table->file->print_error(error,MYF(0)); /* purecov: inspected */ table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
@ -16267,8 +16267,8 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
} }
restore_record(table,record[1]); restore_record(table,record[1]);
update_tmptable_sum_func(join->sum_funcs,table); update_tmptable_sum_func(join->sum_funcs,table);
if ((error= table->file->ha_update_row(table->record[1], if ((error= table->file->ha_update_tmp_row(table->record[1],
table->record[0]))) table->record[0])))
{ {
table->file->print_error(error,MYF(0)); /* purecov: inspected */ table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */

View File

@ -2814,7 +2814,7 @@ typedef struct st_lookup_field_values
bool schema_table_store_record(THD *thd, TABLE *table) bool schema_table_store_record(THD *thd, TABLE *table)
{ {
int error; int error;
if ((error= table->file->ha_write_row(table->record[0]))) if ((error= table->file->ha_write_tmp_row(table->record[0])))
{ {
TMP_TABLE_PARAM *param= table->pos_in_table_list->schema_table_param; TMP_TABLE_PARAM *param= table->pos_in_table_list->schema_table_param;
if (create_internal_tmp_table_from_heap(thd, table, param->start_recinfo, if (create_internal_tmp_table_from_heap(thd, table, param->start_recinfo,

View File

@ -4681,6 +4681,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
int result_code; int result_code;
bool need_repair_or_alter= 0; bool need_repair_or_alter= 0;
DBUG_ENTER("mysql_admin_table"); DBUG_ENTER("mysql_admin_table");
DBUG_PRINT("enter", ("extra_open_options: %u", extra_open_options));
if (end_active_trans(thd)) if (end_active_trans(thd))
DBUG_RETURN(1); DBUG_RETURN(1);
@ -4705,9 +4706,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
bool fatal_error=0; bool fatal_error=0;
DBUG_PRINT("admin", ("table: '%s'.'%s'", table->db, table->table_name)); DBUG_PRINT("admin", ("table: '%s'.'%s'", table->db, table->table_name));
DBUG_PRINT("admin", ("extra_open_options: %u", extra_open_options));
strxmov(table_name, db, ".", table->table_name, NullS); strxmov(table_name, db, ".", table->table_name, NullS);
thd->open_options|= extra_open_options;
table->lock_type= lock_type; table->lock_type= lock_type;
/* open only one table from local list of command */ /* open only one table from local list of command */
{ {
@ -4734,12 +4733,13 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
lex->sql_command == SQLCOM_ANALYZE || lex->sql_command == SQLCOM_ANALYZE ||
lex->sql_command == SQLCOM_OPTIMIZE) lex->sql_command == SQLCOM_OPTIMIZE)
thd->prepare_derived_at_open= TRUE; thd->prepare_derived_at_open= TRUE;
thd->open_options|= extra_open_options;
open_and_lock_tables(thd, table); open_and_lock_tables(thd, table);
thd->open_options&= ~extra_open_options;
thd->prepare_derived_at_open= FALSE; thd->prepare_derived_at_open= FALSE;
thd->no_warnings_for_error= 0; thd->no_warnings_for_error= 0;
table->next_global= save_next_global; table->next_global= save_next_global;
table->next_local= save_next_local; table->next_local= save_next_local;
thd->open_options&= ~extra_open_options;
#ifdef WITH_PARTITION_STORAGE_ENGINE #ifdef WITH_PARTITION_STORAGE_ENGINE
if (table->table) if (table->table)
{ {
@ -4923,7 +4923,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
/* We use extra_open_options to be able to open crashed tables */ /* We use extra_open_options to be able to open crashed tables */
thd->open_options|= extra_open_options; thd->open_options|= extra_open_options;
result_code= admin_recreate_table(thd, table); result_code= admin_recreate_table(thd, table);
thd->open_options= ~extra_open_options; thd->open_options&= ~extra_open_options;
goto send_result; goto send_result;
} }
if (check_old_types || check_for_upgrade) if (check_old_types || check_for_upgrade)

View File

@ -60,7 +60,7 @@ int select_union::send_data(List<Item> &values)
if (thd->is_error()) if (thd->is_error())
return 1; return 1;
if ((write_err= table->file->ha_write_row(table->record[0]))) if ((write_err= table->file->ha_write_tmp_row(table->record[0])))
{ {
if (write_err == HA_ERR_FOUND_DUPP_KEY) if (write_err == HA_ERR_FOUND_DUPP_KEY)
{ {

View File

@ -1868,7 +1868,7 @@ int multi_update::send_data(List<Item> &not_used_values)
*values_for_table[offset], TRUE, FALSE); *values_for_table[offset], TRUE, FALSE);
/* Write row, ignoring duplicated updates to a row */ /* Write row, ignoring duplicated updates to a row */
error= tmp_table->file->ha_write_row(tmp_table->record[0]); error= tmp_table->file->ha_write_tmp_row(tmp_table->record[0]);
if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE) if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)
{ {
if (error && if (error &&

View File

@ -2460,7 +2460,7 @@ int closefrm(register TABLE *table, bool free_share)
{ {
if (table->s->deleting) if (table->s->deleting)
table->file->extra(HA_EXTRA_PREPARE_FOR_DROP); table->file->extra(HA_EXTRA_PREPARE_FOR_DROP);
error=table->file->close(); error=table->file->ha_close();
} }
table->alias.free(); table->alias.free();
if (table->expr_arena) if (table->expr_arena)

View File

@ -226,7 +226,6 @@ void ha_heap::update_key_stats()
int ha_heap::write_row(uchar * buf) int ha_heap::write_row(uchar * buf)
{ {
int res; int res;
ha_statistic_increment(&SSV::ha_write_count);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time(); table->timestamp_field->set_time();
if (table->next_number_field && buf == table->record[0]) if (table->next_number_field && buf == table->record[0])
@ -250,7 +249,6 @@ int ha_heap::write_row(uchar * buf)
int ha_heap::update_row(const uchar * old_data, uchar * new_data) int ha_heap::update_row(const uchar * old_data, uchar * new_data)
{ {
int res; int res;
ha_statistic_increment(&SSV::ha_update_count);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
table->timestamp_field->set_time(); table->timestamp_field->set_time();
res= heap_update(file,old_data,new_data); res= heap_update(file,old_data,new_data);
@ -269,7 +267,6 @@ int ha_heap::update_row(const uchar * old_data, uchar * new_data)
int ha_heap::delete_row(const uchar * buf) int ha_heap::delete_row(const uchar * buf)
{ {
int res; int res;
ha_statistic_increment(&SSV::ha_delete_count);
res= heap_delete(file,buf); res= heap_delete(file,buf);
if (!res && table->s->tmp_table == NO_TMP_TABLE && if (!res && table->s->tmp_table == NO_TMP_TABLE &&
++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records) ++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records)
@ -288,7 +285,6 @@ int ha_heap::index_read_map(uchar *buf, const uchar *key,
enum ha_rkey_function find_flag) enum ha_rkey_function find_flag)
{ {
DBUG_ASSERT(inited==INDEX); DBUG_ASSERT(inited==INDEX);
ha_statistic_increment(&SSV::ha_read_key_count);
int error = heap_rkey(file,buf,active_index, key, keypart_map, find_flag); int error = heap_rkey(file,buf,active_index, key, keypart_map, find_flag);
table->status = error ? STATUS_NOT_FOUND : 0; table->status = error ? STATUS_NOT_FOUND : 0;
return error; return error;
@ -298,7 +294,6 @@ int ha_heap::index_read_last_map(uchar *buf, const uchar *key,
key_part_map keypart_map) key_part_map keypart_map)
{ {
DBUG_ASSERT(inited==INDEX); DBUG_ASSERT(inited==INDEX);
ha_statistic_increment(&SSV::ha_read_key_count);
int error= heap_rkey(file, buf, active_index, key, keypart_map, int error= heap_rkey(file, buf, active_index, key, keypart_map,
HA_READ_PREFIX_LAST); HA_READ_PREFIX_LAST);
table->status= error ? STATUS_NOT_FOUND : 0; table->status= error ? STATUS_NOT_FOUND : 0;
@ -309,7 +304,6 @@ int ha_heap::index_read_idx_map(uchar *buf, uint index, const uchar *key,
key_part_map keypart_map, key_part_map keypart_map,
enum ha_rkey_function find_flag) enum ha_rkey_function find_flag)
{ {
ha_statistic_increment(&SSV::ha_read_key_count);
int error = heap_rkey(file, buf, index, key, keypart_map, find_flag); int error = heap_rkey(file, buf, index, key, keypart_map, find_flag);
table->status = error ? STATUS_NOT_FOUND : 0; table->status = error ? STATUS_NOT_FOUND : 0;
return error; return error;
@ -318,7 +312,6 @@ int ha_heap::index_read_idx_map(uchar *buf, uint index, const uchar *key,
int ha_heap::index_next(uchar * buf) int ha_heap::index_next(uchar * buf)
{ {
DBUG_ASSERT(inited==INDEX); DBUG_ASSERT(inited==INDEX);
ha_statistic_increment(&SSV::ha_read_next_count);
int error=heap_rnext(file,buf); int error=heap_rnext(file,buf);
table->status=error ? STATUS_NOT_FOUND: 0; table->status=error ? STATUS_NOT_FOUND: 0;
return error; return error;
@ -327,7 +320,6 @@ int ha_heap::index_next(uchar * buf)
int ha_heap::index_prev(uchar * buf) int ha_heap::index_prev(uchar * buf)
{ {
DBUG_ASSERT(inited==INDEX); DBUG_ASSERT(inited==INDEX);
ha_statistic_increment(&SSV::ha_read_prev_count);
int error=heap_rprev(file,buf); int error=heap_rprev(file,buf);
table->status=error ? STATUS_NOT_FOUND: 0; table->status=error ? STATUS_NOT_FOUND: 0;
return error; return error;
@ -336,7 +328,6 @@ int ha_heap::index_prev(uchar * buf)
int ha_heap::index_first(uchar * buf) int ha_heap::index_first(uchar * buf)
{ {
DBUG_ASSERT(inited==INDEX); DBUG_ASSERT(inited==INDEX);
ha_statistic_increment(&SSV::ha_read_first_count);
int error=heap_rfirst(file, buf, active_index); int error=heap_rfirst(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0; table->status=error ? STATUS_NOT_FOUND: 0;
return error; return error;
@ -345,7 +336,6 @@ int ha_heap::index_first(uchar * buf)
int ha_heap::index_last(uchar * buf) int ha_heap::index_last(uchar * buf)
{ {
DBUG_ASSERT(inited==INDEX); DBUG_ASSERT(inited==INDEX);
ha_statistic_increment(&SSV::ha_read_last_count);
int error=heap_rlast(file, buf, active_index); int error=heap_rlast(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0; table->status=error ? STATUS_NOT_FOUND: 0;
return error; return error;
@ -358,7 +348,6 @@ int ha_heap::rnd_init(bool scan)
int ha_heap::rnd_next(uchar *buf) int ha_heap::rnd_next(uchar *buf)
{ {
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
int error=heap_scan(file, buf); int error=heap_scan(file, buf);
table->status=error ? STATUS_NOT_FOUND: 0; table->status=error ? STATUS_NOT_FOUND: 0;
return error; return error;
@ -368,7 +357,6 @@ int ha_heap::rnd_pos(uchar * buf, uchar *pos)
{ {
int error; int error;
HEAP_PTR heap_position; HEAP_PTR heap_position;
ha_statistic_increment(&SSV::ha_read_rnd_count);
memcpy_fixed((char*) &heap_position, pos, sizeof(HEAP_PTR)); memcpy_fixed((char*) &heap_position, pos, sizeof(HEAP_PTR));
error=heap_rrnd(file, buf, heap_position); error=heap_rrnd(file, buf, heap_position);
table->status=error ? STATUS_NOT_FOUND: 0; table->status=error ? STATUS_NOT_FOUND: 0;
@ -582,7 +570,7 @@ int ha_heap::delete_table(const char *name)
void ha_heap::drop_table(const char *name) void ha_heap::drop_table(const char *name)
{ {
file->s->delete_on_close= 1; file->s->delete_on_close= 1;
close(); ha_close();
} }

View File

@ -2524,7 +2524,7 @@ int ha_maria::delete_table(const char *name)
void ha_maria::drop_table(const char *name) void ha_maria::drop_table(const char *name)
{ {
DBUG_ASSERT(file->s->temporary); DBUG_ASSERT(file->s->temporary);
(void) close(); (void) ha_close();
(void) maria_delete_table_files(name, 0); (void) maria_delete_table_files(name, 0);
} }