Merge willster.(none):/home/stewart/Documents/MySQL/5.0/bug19914-mk2-merge

into  willster.(none):/home/stewart/Documents/MySQL/5.1/bug19914-mk2-merge
This commit is contained in:
stewart@willster.(none) 2006-10-18 18:51:39 +10:00
commit 71636edc16
26 changed files with 79 additions and 42 deletions

View File

@ -469,11 +469,13 @@ ha_rows ha_ndbcluster::records()
DBUG_RETURN(retval + info->no_uncommitted_rows_count);
}
void ha_ndbcluster::records_update()
int ha_ndbcluster::records_update()
{
if (m_ha_not_exact_count)
return;
return 0;
DBUG_ENTER("ha_ndbcluster::records_update");
int result= 0;
struct Ndb_local_table_statistics *info= m_table_info;
DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
((const NDBTAB *)m_table)->getTableId(),
@ -483,7 +485,7 @@ void ha_ndbcluster::records_update()
Ndb *ndb= get_ndb();
struct Ndb_statistics stat;
ndb->setDatabaseName(m_dbname);
if (ndb_get_table_statistics(ndb, m_table, &stat) == 0)
if ((result= ndb_get_table_statistics(ndb, m_table, &stat)) == 0)
{
stats.mean_rec_length= stat.row_size;
stats.data_file_length= stat.fragment_memory;
@ -496,7 +498,7 @@ void ha_ndbcluster::records_update()
info->no_uncommitted_rows_count= 0;
}
stats.records= info->records+ info->no_uncommitted_rows_count;
DBUG_VOID_RETURN;
DBUG_RETURN(result);
}
void ha_ndbcluster::no_uncommitted_rows_execute_failure()
@ -3635,8 +3637,9 @@ void ha_ndbcluster::position(const byte *record)
}
void ha_ndbcluster::info(uint flag)
int ha_ndbcluster::info(uint flag)
{
int result= 0;
DBUG_ENTER("info");
DBUG_PRINT("enter", ("flag: %d", flag));
@ -3654,18 +3657,18 @@ void ha_ndbcluster::info(uint flag)
if (m_ha_not_exact_count)
stats.records= 100;
else
records_update();
result= records_update();
}
else
{
if ((my_errno= check_ndb_connection()))
DBUG_VOID_RETURN;
DBUG_RETURN(my_errno);
Ndb *ndb= get_ndb();
ndb->setDatabaseName(m_dbname);
struct Ndb_statistics stat;
ndb->setDatabaseName(m_dbname);
if (current_thd->variables.ndb_use_exact_count &&
ndb_get_table_statistics(ndb, m_table, &stat) == 0)
(result= ndb_get_table_statistics(ndb, m_table, &stat)) == 0)
{
stats.mean_rec_length= stat.row_size;
stats.data_file_length= stat.fragment_memory;
@ -3709,7 +3712,11 @@ void ha_ndbcluster::info(uint flag)
stats.auto_increment_value= (ulonglong)auto_increment_value64;
}
}
DBUG_VOID_RETURN;
if(result == -1)
result= HA_ERR_NO_CONNECTION;
DBUG_RETURN(result);
}

View File

@ -673,7 +673,7 @@ class ha_ndbcluster: public handler
bool get_error_message(int error, String *buf);
ha_rows records();
void info(uint);
int info(uint);
void get_dynamic_partition_info(PARTITION_INFO *stat_info, uint part_id);
int extra(enum ha_extra_function operation);
int extra_opt(enum ha_extra_function operation, ulong cache_size);
@ -878,7 +878,7 @@ private:
int check_ndb_connection(THD* thd= current_thd);
void set_rec_per_key();
void records_update();
int records_update();
void no_uncommitted_rows_execute_failure();
void no_uncommitted_rows_update(int);
void no_uncommitted_rows_reset(THD *);

View File

@ -1243,7 +1243,7 @@ public:
key_range *max_key)
{ return (ha_rows) 10; }
virtual void position(const byte *record)=0;
virtual void info(uint)=0; // see my_base.h for full description
virtual int info(uint)=0; // see my_base.h for full description
virtual void get_dynamic_partition_info(PARTITION_INFO *stat_info,
uint part_id);
virtual int extra(enum ha_extra_function operation)

View File

@ -2693,6 +2693,7 @@ bool Item_sum_count_distinct::add()
longlong Item_sum_count_distinct::val_int()
{
int error;
DBUG_ASSERT(fixed == 1);
if (!table) // Empty query
return LL(0);
@ -2706,7 +2707,14 @@ longlong Item_sum_count_distinct::val_int()
tree->walk(count_distinct_walk, (void*) &count);
return (longlong) count;
}
table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
error= table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
if(error)
{
table->file->print_error(error, MYF(0));
}
return table->file->stats.records;
}

View File

@ -167,7 +167,12 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
}
else
{
tl->table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
error= tl->table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
if(error)
{
tl->table->file->print_error(error, MYF(0));
return error;
}
count*= tl->table->file->stats.records;
}
}

View File

@ -676,6 +676,8 @@ JOIN::optimize()
{
if (res > 1)
{
thd->fatal_error();
error= res;
DBUG_PRINT("error",("Error from opt_sum_query"));
DBUG_RETURN(1);
}
@ -2119,7 +2121,12 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables, COND *conds,
s->needed_reg.init();
table_vector[i]=s->table=table=tables->table;
table->pos_in_table_list= tables;
table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);// record count
error= table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
if(error)
{
table->file->print_error(error, MYF(0));
DBUG_RETURN(1);
}
table->quick_keys.clear_all();
table->reginfo.join_tab=s;
table->reginfo.not_exists_optimize=0;

View File

@ -496,7 +496,12 @@ bool st_select_lex_unit::exec()
DBUG_RETURN(res);
}
/* Needed for the following test and for records_at_start in next loop */
table->file->info(HA_STATUS_VARIABLE);
int error= table->file->info(HA_STATUS_VARIABLE);
if(error)
{
table->file->print_error(error, MYF(0));
DBUG_RETURN(1);
}
if (found_rows_for_union && !sl->braces &&
select_limit_cnt != HA_POS_ERROR)
{

View File

@ -1433,7 +1433,7 @@ void ha_archive::update_create_info(HA_CREATE_INFO *create_info)
/*
Hints for optimizer, see ha_tina for more information
*/
void ha_archive::info(uint flag)
int ha_archive::info(uint flag)
{
DBUG_ENTER("ha_archive::info");
/*
@ -1461,7 +1461,7 @@ void ha_archive::info(uint flag)
if (flag & HA_STATUS_AUTO)
stats.auto_increment_value= share->auto_increment_value;
DBUG_VOID_RETURN;
DBUG_RETURN(0);
}

View File

@ -121,7 +121,7 @@ public:
int read_data_header(azio_stream *file_to_read);
int write_data_header(azio_stream *file_to_write);
void position(const byte *record);
void info(uint);
int info(uint);
void update_create_info(HA_CREATE_INFO *create_info);
int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
int optimize(THD* thd, HA_CHECK_OPT* check_opt);

View File

@ -120,14 +120,14 @@ void ha_blackhole::position(const byte *record)
}
void ha_blackhole::info(uint flag)
int ha_blackhole::info(uint flag)
{
DBUG_ENTER("ha_blackhole::info");
bzero((char*) &stats, sizeof(stats));
if (flag & HA_STATUS_AUTO)
stats.auto_increment_value= 1;
DBUG_VOID_RETURN;
DBUG_RETURN(0);
}
int ha_blackhole::external_lock(THD *thd, int lock_type)

View File

@ -75,7 +75,7 @@ public:
int index_first(byte * buf);
int index_last(byte * buf);
void position(const byte *record);
void info(uint flag);
int info(uint flag);
int external_lock(THD *thd, int lock_type);
uint lock_count(void) const;
int create(const char *name, TABLE *table_arg,

View File

@ -1128,13 +1128,13 @@ int ha_tina::rnd_pos(byte * buf, byte *pos)
Currently this table handler doesn't implement most of the fields
really needed. SHOW also makes use of this data
*/
void ha_tina::info(uint flag)
int ha_tina::info(uint flag)
{
DBUG_ENTER("ha_tina::info");
/* This is a lie, but you don't want the optimizer to see zero or 1 */
if (!records_is_known && stats.records < 2)
stats.records= 2;
DBUG_VOID_RETURN;
DBUG_RETURN(0);
}
/*

View File

@ -189,7 +189,7 @@ public:
/* This is required for SQL layer to know that we support autorepair */
bool auto_repair() const { return 1; }
void position(const byte *record);
void info(uint);
int info(uint);
int extra(enum ha_extra_function operation);
int delete_all_rows(void);
int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);

View File

@ -530,10 +530,10 @@ int ha_example::rnd_pos(byte * buf, byte *pos)
sql_update.cc
*/
void ha_example::info(uint flag)
int ha_example::info(uint flag)
{
DBUG_ENTER("ha_example::info");
DBUG_VOID_RETURN;
DBUG_RETURN(0);
}

View File

@ -136,7 +136,7 @@ public:
int rnd_next(byte *buf); //required
int rnd_pos(byte * buf, byte *pos); //required
void position(const byte *record); //required
void info(uint); //required
int info(uint); //required
int extra(enum ha_extra_function operation);
int reset(void);

View File

@ -2490,7 +2490,7 @@ int ha_federated::rnd_pos(byte *buf, byte *pos)
*/
void ha_federated::info(uint flag)
int ha_federated::info(uint flag)
{
char error_buffer[FEDERATED_QUERY_BUFFER_SIZE];
char status_buf[FEDERATED_QUERY_BUFFER_SIZE];
@ -2571,7 +2571,7 @@ void ha_federated::info(uint flag)
if (result)
mysql_free_result(result);
DBUG_VOID_RETURN;
DBUG_RETURN(0);
error:
if (result)
@ -2580,7 +2580,7 @@ error:
my_sprintf(error_buffer, (error_buffer, ": %d : %s",
mysql_errno(mysql), mysql_error(mysql)));
my_error(error_code, MYF(0), error_buffer);
DBUG_VOID_RETURN;
DBUG_RETURN(error_code);
}

View File

@ -206,7 +206,7 @@ public:
int rnd_next(byte *buf); //required
int rnd_pos(byte *buf, byte *pos); //required
void position(const byte *record); //required
void info(uint); //required
int info(uint); //required
void update_auto_increment(void);
int repair(THD* thd, HA_CHECK_OPT* check_opt);

View File

@ -342,7 +342,7 @@ void ha_heap::position(const byte *record)
*(HEAP_PTR*) ref= heap_position(file); // Ref is aligned
}
void ha_heap::info(uint flag)
int ha_heap::info(uint flag)
{
HEAPINFO info;
(void) heap_info(file,&info,flag);
@ -364,6 +364,7 @@ void ha_heap::info(uint flag)
*/
if (key_stat_version != file->s->key_stat_version)
update_key_stats();
return 0;
}

View File

@ -89,7 +89,7 @@ public:
int rnd_next(byte *buf);
int rnd_pos(byte * buf, byte *pos);
void position(const byte *record);
void info(uint);
int info(uint);
int extra(enum ha_extra_function operation);
int reset();
int external_lock(THD *thd, int lock_type);

View File

@ -5485,7 +5485,7 @@ ha_innobase::read_time(
Returns statistics information of the table to the MySQL interpreter,
in various fields of the handle object. */
void
int
ha_innobase::info(
/*==============*/
uint flag) /* in: what information MySQL requests */
@ -5508,7 +5508,7 @@ ha_innobase::info(
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
DBUG_VOID_RETURN;
DBUG_RETURN(HA_ERR_CRASHED);
}
/* We do not know if MySQL can call this function before calling
@ -5700,7 +5700,7 @@ ha_innobase::info(
prebuilt->trx->op_info = (char*)"";
DBUG_VOID_RETURN;
DBUG_RETURN(0);
}
/**************************************************************************

View File

@ -143,7 +143,7 @@ class ha_innobase: public handler
int rnd_pos(byte * buf, byte *pos);
void position(const byte *record);
void info(uint);
int info(uint);
int analyze(THD* thd,HA_CHECK_OPT* check_opt);
int optimize(THD* thd,HA_CHECK_OPT* check_opt);
int discard_or_import_tablespace(my_bool discard);

View File

@ -1325,7 +1325,7 @@ void ha_myisam::position(const byte* record)
my_store_ptr(ref, ref_length, position);
}
void ha_myisam::info(uint flag)
int ha_myisam::info(uint flag)
{
MI_ISAMINFO info;
char name_buff[FN_REFLEN];
@ -1386,6 +1386,8 @@ void ha_myisam::info(uint flag)
stats.update_time = info.update_time;
if (flag & HA_STATUS_AUTO)
stats.auto_increment_value= info.auto_increment;
return 0;
}

View File

@ -99,7 +99,7 @@ class ha_myisam: public handler
int rnd_pos(byte * buf, byte *pos);
int restart_rnd_next(byte *buf, byte *pos);
void position(const byte *record);
void info(uint);
int info(uint);
int extra(enum ha_extra_function operation);
int extra_opt(enum ha_extra_function operation, ulong cache_size);
int reset(void);

View File

@ -262,7 +262,7 @@ ha_rows ha_myisammrg::records_in_range(uint inx, key_range *min_key,
}
void ha_myisammrg::info(uint flag)
int ha_myisammrg::info(uint flag)
{
MYMERGE_INFO info;
(void) myrg_status(file,&info,flag);
@ -329,6 +329,7 @@ void ha_myisammrg::info(uint flag)
min(file->keys, table->s->key_parts));
}
}
return 0;
}

View File

@ -72,7 +72,7 @@ class ha_myisammrg: public handler
int rnd_pos(byte * buf, byte *pos);
void position(const byte *record);
ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
void info(uint);
int info(uint);
int reset(void);
int extra(enum ha_extra_function operation);
int extra_opt(enum ha_extra_function operation, ulong cache_size);

View File

@ -71,5 +71,6 @@ char ndb_version_string_buf[NDB_VERSION_STRING_BUF_SZ];
#define NDBD_UPDATE_FRAG_DIST_KEY_51 MAKE_VERSION(5,1,12)
#define NDBD_QMGR_SINGLEUSER_VERSION_5 MAKE_VERSION(5,0,25)
#endif