Merge work:/home/bk/mysql-4.0
into serg.mysql.com:/usr/home/serg/Abk/mysql-4.0
This commit is contained in:
commit
215df4fc23
@ -106,9 +106,6 @@ enum ha_extra_function {
|
||||
HA_EXTRA_IGNORE_DUP_KEY, /* Dup keys don't rollback everything*/
|
||||
HA_EXTRA_NO_IGNORE_DUP_KEY,
|
||||
HA_EXTRA_DONT_USE_CURSOR_TO_UPDATE, /* Cursor will not be used for update */
|
||||
HA_EXTRA_BULK_INSERT_BEGIN,
|
||||
HA_EXTRA_BULK_INSERT_FLUSH, /* Flush one index */
|
||||
HA_EXTRA_BULK_INSERT_END,
|
||||
HA_EXTRA_PREPARE_FOR_DELETE,
|
||||
HA_EXTRA_PREPARE_FOR_UPDATE /* Remove read cache if problems */
|
||||
};
|
||||
|
@ -433,6 +433,10 @@ void mi_disable_non_unique_index(MI_INFO *info, ha_rows rows);
|
||||
my_bool mi_test_if_sort_rep(MI_INFO *info, ha_rows rows, ulonglong key_map,
|
||||
my_bool force);
|
||||
|
||||
int mi_init_bulk_insert(MI_INFO *info, ulong cache_size, ha_rows rows);
|
||||
void mi_flush_bulk_insert(MI_INFO *info, uint inx);
|
||||
void mi_end_bulk_insert(MI_INFO *info);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@ -51,6 +51,7 @@ typedef struct st_mymerge_info /* Struct from h_info */
|
||||
uint reclength; /* Recordlength */
|
||||
int errkey; /* With key was dupplicated on err */
|
||||
uint options; /* HA_OPTION_... used */
|
||||
ulong *rec_per_key; /* for sql optimizing */
|
||||
} MYMERGE_INFO;
|
||||
|
||||
typedef struct st_myrg_table_info
|
||||
@ -71,6 +72,7 @@ typedef struct st_myrg_info
|
||||
my_bool cache_in_use;
|
||||
LIST open_list;
|
||||
QUEUE by_key;
|
||||
ulong *rec_per_key_part; /* for sql optimizing */
|
||||
} MYRG_INFO;
|
||||
|
||||
|
||||
|
@ -358,33 +358,6 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg)
|
||||
case HA_EXTRA_QUICK:
|
||||
info->quick_mode=1;
|
||||
break;
|
||||
case HA_EXTRA_BULK_INSERT_BEGIN:
|
||||
error=_mi_init_bulk_insert(info, (extra_arg ? *(ulong*) extra_arg :
|
||||
myisam_bulk_insert_tree_size));
|
||||
break;
|
||||
case HA_EXTRA_BULK_INSERT_FLUSH:
|
||||
if (info->bulk_insert)
|
||||
{
|
||||
uint index_to_flush= *(uint*) extra_arg;
|
||||
if (is_tree_inited(&info->bulk_insert[index_to_flush]))
|
||||
reset_tree(&info->bulk_insert[index_to_flush]);
|
||||
}
|
||||
break;
|
||||
case HA_EXTRA_BULK_INSERT_END:
|
||||
if (info->bulk_insert)
|
||||
{
|
||||
uint i;
|
||||
for (i=0 ; i < share->base.keys ; i++)
|
||||
{
|
||||
if (is_tree_inited(& info->bulk_insert[i]))
|
||||
{
|
||||
delete_tree(& info->bulk_insert[i]);
|
||||
}
|
||||
}
|
||||
my_free((void *)info->bulk_insert, MYF(0));
|
||||
info->bulk_insert=0;
|
||||
}
|
||||
break;
|
||||
case HA_EXTRA_NO_ROWS:
|
||||
if (!share->state.header.uniques)
|
||||
info->opt_flag|= OPT_NO_ROWS;
|
||||
|
@ -801,26 +801,27 @@ static int keys_free(uchar *key, TREE_FREE mode, bulk_insert_param *param)
|
||||
}
|
||||
|
||||
|
||||
int _mi_init_bulk_insert(MI_INFO *info, ulong cache_size)
|
||||
int mi_init_bulk_insert(MI_INFO *info, ulong cache_size, ha_rows rows)
|
||||
{
|
||||
MYISAM_SHARE *share=info->s;
|
||||
MI_KEYDEF *key=share->keyinfo;
|
||||
bulk_insert_param *params;
|
||||
uint i, num_keys;
|
||||
uint i, num_keys, total_keylength;
|
||||
ulonglong key_map=0;
|
||||
DBUG_ENTER("_mi_init_bulk_insert");
|
||||
DBUG_PRINT("enter",("cache_size: %lu", cache_size));
|
||||
|
||||
if (info->bulk_insert)
|
||||
if (info->bulk_insert || (rows && rows < MI_MIN_ROWS_TO_USE_BULK_INSERT))
|
||||
DBUG_RETURN(0);
|
||||
|
||||
for (i=num_keys=0 ; i < share->base.keys ; i++)
|
||||
for (i=total_keylength=num_keys=0 ; i < share->base.keys ; i++)
|
||||
{
|
||||
if (!(key[i].flag & HA_NOSAME) && share->base.auto_key != i+1
|
||||
&& test(share->state.key_map & ((ulonglong) 1 << i)))
|
||||
{
|
||||
num_keys++;
|
||||
key_map |=((ulonglong) 1 << i);
|
||||
total_keylength+=key[i].maxlength+TREE_ELEMENT_EXTRA_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
@ -828,6 +829,11 @@ int _mi_init_bulk_insert(MI_INFO *info, ulong cache_size)
|
||||
num_keys * MI_MIN_SIZE_BULK_INSERT_TREE > cache_size)
|
||||
DBUG_RETURN(0);
|
||||
|
||||
if (rows && rows*total_keylength < cache_size)
|
||||
cache_size=rows;
|
||||
else
|
||||
cache_size/=total_keylength*16;
|
||||
|
||||
info->bulk_insert=(TREE *)
|
||||
my_malloc((sizeof(TREE)*share->base.keys+
|
||||
sizeof(bulk_insert_param)*num_keys),MYF(0));
|
||||
@ -836,7 +842,7 @@ int _mi_init_bulk_insert(MI_INFO *info, ulong cache_size)
|
||||
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
|
||||
|
||||
params=(bulk_insert_param *)(info->bulk_insert+share->base.keys);
|
||||
for (i=0 ; i < share->base.keys ; i++,key++)
|
||||
for (i=0 ; i < share->base.keys ; i++)
|
||||
{
|
||||
if (test(key_map & ((ulonglong) 1 << i)))
|
||||
{
|
||||
@ -844,8 +850,8 @@ int _mi_init_bulk_insert(MI_INFO *info, ulong cache_size)
|
||||
params->keynr=i;
|
||||
/* Only allocate a 16'th of the buffer at a time */
|
||||
init_tree(&info->bulk_insert[i],
|
||||
cache_size / num_keys / 16 + 10,
|
||||
cache_size / num_keys, 0,
|
||||
cache_size * key[i].maxlength,
|
||||
cache_size * key[i].maxlength, 0,
|
||||
(qsort_cmp2)keys_compare, 0,
|
||||
(tree_element_free) keys_free, (void *)params++);
|
||||
}
|
||||
@ -855,3 +861,30 @@ int _mi_init_bulk_insert(MI_INFO *info, ulong cache_size)
|
||||
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
void mi_flush_bulk_insert(MI_INFO *info, uint inx)
|
||||
{
|
||||
if (info->bulk_insert)
|
||||
{
|
||||
if (is_tree_inited(&info->bulk_insert[inx]))
|
||||
reset_tree(&info->bulk_insert[inx]);
|
||||
}
|
||||
}
|
||||
|
||||
void mi_end_bulk_insert(MI_INFO *info)
|
||||
{
|
||||
if (info->bulk_insert)
|
||||
{
|
||||
uint i;
|
||||
for (i=0 ; i < info->s->base.keys ; i++)
|
||||
{
|
||||
if (is_tree_inited(& info->bulk_insert[i]))
|
||||
{
|
||||
delete_tree(& info->bulk_insert[i]);
|
||||
}
|
||||
}
|
||||
my_free((void *)info->bulk_insert, MYF(0));
|
||||
info->bulk_insert=0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -373,6 +373,7 @@ struct st_myisam_info {
|
||||
#define MI_MIN_KEYBLOCK_LENGTH 50 /* When to split delete blocks */
|
||||
|
||||
#define MI_MIN_SIZE_BULK_INSERT_TREE 16384 /* this is per key */
|
||||
#define MI_MIN_ROWS_TO_USE_BULK_INSERT 100
|
||||
|
||||
/* The UNIQUE check is done with a hashed long key */
|
||||
|
||||
@ -658,8 +659,6 @@ int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share, File file_to_dup);
|
||||
int mi_open_keyfile(MYISAM_SHARE *share);
|
||||
void mi_setup_functions(register MYISAM_SHARE *share);
|
||||
|
||||
int _mi_init_bulk_insert(MI_INFO *info, ulong cache_size);
|
||||
|
||||
/* Functions needed by mi_check */
|
||||
void mi_check_print_error _VARARGS((MI_CHECK *param, const char *fmt,...));
|
||||
void mi_check_print_warning _VARARGS((MI_CHECK *param, const char *fmt,...));
|
||||
|
@ -28,8 +28,6 @@ ulonglong myrg_position(MYRG_INFO *info)
|
||||
~(ulonglong) 0;
|
||||
}
|
||||
|
||||
/* If flag != 0 one only gets pos of last record */
|
||||
|
||||
int myrg_status(MYRG_INFO *info,register MYMERGE_INFO *x,int flag)
|
||||
{
|
||||
MYRG_TABLE *current_table;
|
||||
@ -55,15 +53,16 @@ int myrg_status(MYRG_INFO *info,register MYMERGE_INFO *x,int flag)
|
||||
DBUG_PRINT("info2",("table: %s, offset: %lu",
|
||||
file->table->filename,(ulong) file->file_offset));
|
||||
}
|
||||
x->records = info->records;
|
||||
x->deleted = info->del;
|
||||
x->data_file_length = info->data_file_length;
|
||||
x->reclength = info->reclength;
|
||||
x->options = info->options;
|
||||
x->records= info->records;
|
||||
x->deleted= info->del;
|
||||
x->data_file_length= info->data_file_length;
|
||||
x->reclength= info->reclength;
|
||||
x->options= info->options;
|
||||
if (current_table)
|
||||
x->errkey = current_table->table->errkey;
|
||||
x->errkey= current_table->table->errkey;
|
||||
else
|
||||
x->errkey=0;
|
||||
x->errkey= 0;
|
||||
x->rec_per_key = info->rec_per_key_part;
|
||||
}
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
@ -32,8 +32,8 @@
|
||||
|
||||
MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking)
|
||||
{
|
||||
int save_errno,i,errpos;
|
||||
uint files,dir_length,length,options;
|
||||
int save_errno,i,j,errpos;
|
||||
uint files,dir_length,length,options, key_parts;
|
||||
ulonglong file_offset;
|
||||
char name_buff[FN_REFLEN*2],buff[FN_REFLEN],*end;
|
||||
MYRG_INFO info,*m_info;
|
||||
@ -89,13 +89,25 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking)
|
||||
}
|
||||
info.reclength=isam->s->base.reclength;
|
||||
}
|
||||
key_parts=(isam ? isam->s->base.key_parts : 0);
|
||||
if (!(m_info= (MYRG_INFO*) my_malloc(sizeof(MYRG_INFO)+
|
||||
files*sizeof(MYRG_TABLE),
|
||||
files*sizeof(MYRG_TABLE)+
|
||||
sizeof(long)*key_parts,
|
||||
MYF(MY_WME))))
|
||||
goto err;
|
||||
*m_info=info;
|
||||
m_info->open_tables=(files) ? (MYRG_TABLE *) (m_info+1) : 0;
|
||||
m_info->tables=files;
|
||||
if (files)
|
||||
{
|
||||
m_info->open_tables=(MYRG_TABLE *) (m_info+1);
|
||||
m_info->rec_per_key_part=(ulong *) (m_info->open_tables+files);
|
||||
bzero((char*) m_info->rec_per_key_part,sizeof(long)*key_parts);
|
||||
}
|
||||
else
|
||||
{
|
||||
m_info->open_tables=0;
|
||||
m_info->rec_per_key_part=0;
|
||||
}
|
||||
errpos=2;
|
||||
|
||||
options= (uint) ~0;
|
||||
@ -107,6 +119,8 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking)
|
||||
m_info->records+=isam->state->records;
|
||||
m_info->del+=isam->state->del;
|
||||
m_info->data_file_length+=isam->state->data_file_length;
|
||||
for (j=0; j < key_parts; j++)
|
||||
m_info->rec_per_key_part[j]+=isam->s->state.rec_per_key_part[j] / files;
|
||||
if (i)
|
||||
isam=(MI_INFO*) (isam->open_list.next->data);
|
||||
}
|
||||
|
@ -173,9 +173,9 @@ INSERT INTO t2 values (1),(2),(3);
|
||||
INSERT INTO t3 VALUES (1,'1'),(2,'2'),(1,'1'),(2,'2');
|
||||
explain SELECT distinct t3.a FROM t3,t2,t1 WHERE t3.a=t1.b AND t1.a=t2.a;
|
||||
table type possible_keys key key_len ref rows Extra
|
||||
t3 index a a 5 NULL 6 Using index; Using temporary
|
||||
t2 index a a 4 NULL 5 Using index; Distinct
|
||||
t1 eq_ref PRIMARY PRIMARY 4 t2.a 1 Using where; Distinct
|
||||
t1 ALL PRIMARY NULL NULL NULL 2 Using temporary
|
||||
t2 ref a a 4 t1.a 1 Using index
|
||||
t3 ref a a 5 t1.b 1 Using where; Using index
|
||||
SELECT distinct t3.a FROM t3,t2,t1 WHERE t3.a=t1.b AND t1.a=t2.a;
|
||||
a
|
||||
1
|
||||
@ -190,7 +190,7 @@ insert into t3 select * from t4;
|
||||
explain select distinct t1.a from t1,t3 where t1.a=t3.a;
|
||||
table type possible_keys key key_len ref rows Extra
|
||||
t1 index PRIMARY PRIMARY 4 NULL 2 Using index; Using temporary
|
||||
t3 ref a a 5 t1.a 10 Using where; Using index; Distinct
|
||||
t3 ref a a 5 t1.a 1 Using where; Using index; Distinct
|
||||
select distinct t1.a from t1,t3 where t1.a=t3.a;
|
||||
a
|
||||
1
|
||||
|
@ -135,8 +135,8 @@ id
|
||||
3
|
||||
show keys from t2;
|
||||
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
|
||||
t2 1 tig 1 ticket A NULL NULL NULL YES BTREE
|
||||
t2 1 tix 1 inhalt A NULL 1 NULL YES FULLTEXT
|
||||
t2 1 tig 1 ticket A 3 NULL NULL YES BTREE
|
||||
t2 1 tix 1 inhalt A 1 1 NULL YES FULLTEXT
|
||||
show create table t2;
|
||||
Table Create Table
|
||||
t2 CREATE TABLE `t2` (
|
||||
|
@ -36,7 +36,7 @@ a a a a
|
||||
explain select t1.*,t2.* from t1,t1 as t2 where t1.A=t2.B;
|
||||
table type possible_keys key key_len ref rows Extra
|
||||
t1 ALL a NULL NULL NULL 5
|
||||
t2 ALL b NULL NULL NULL 5 Using where
|
||||
t2 ref b b 4 t1.a 1 Using where
|
||||
select t1.*,t2.* from t1,t1 as t2 where t1.A=t2.B order by binary t1.a,t2.a;
|
||||
a b a b
|
||||
A B a a
|
||||
|
@ -450,9 +450,9 @@ gid sid uid
|
||||
103853 5 250
|
||||
EXPLAIN select t1.gid, t2.sid, t3.uid from t3, t2, t1 where t2.gid = t1.gid and t2.uid = t3.uid order by t1.gid, t3.uid;
|
||||
table type possible_keys key key_len ref rows Extra
|
||||
t1 index PRIMARY PRIMARY 4 NULL 6 Using index
|
||||
t2 eq_ref PRIMARY,uid PRIMARY 4 t1.gid 1
|
||||
t3 eq_ref PRIMARY PRIMARY 2 t2.uid 1 Using where; Using index
|
||||
t3 index PRIMARY PRIMARY 2 NULL 6 Using index; Using temporary; Using filesort
|
||||
t2 ref PRIMARY,uid uid 2 t3.uid 1 Using where
|
||||
t1 eq_ref PRIMARY PRIMARY 4 t2.gid 1 Using index
|
||||
EXPLAIN SELECT t1.gid, t3.uid from t1, t3 where t1.gid = t3.uid order by t1.gid,t3.skr;
|
||||
table type possible_keys key key_len ref rows Extra
|
||||
t1 index PRIMARY PRIMARY 4 NULL 6 Using index
|
||||
|
@ -3239,8 +3239,8 @@ Field Type Null Key Default Extra Privileges
|
||||
show keys from t2;
|
||||
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
|
||||
t2 0 PRIMARY 1 auto A 1199 NULL NULL BTREE
|
||||
t2 0 fld1 1 fld1 A 1199 NULL NULL BTREE
|
||||
t2 1 fld3 1 fld3 A NULL NULL NULL BTREE
|
||||
t2 0 fld1 1 fld1 A 0 NULL NULL BTREE
|
||||
t2 1 fld3 1 fld3 A 1199 NULL NULL BTREE
|
||||
drop table t4, t3, t2, t1;
|
||||
DO 1;
|
||||
DO benchmark(100,1+1),1,1;
|
||||
|
@ -9,7 +9,7 @@ Table Op Msg_type Msg_text
|
||||
test.t1 check status Table is already up to date
|
||||
check table t1 changed;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check status OK
|
||||
test.t1 check status Table is already up to date
|
||||
insert into t1 values (5,5,5);
|
||||
check table t1 changed;
|
||||
Table Op Msg_type Msg_text
|
||||
|
@ -46,6 +46,8 @@ drop table t1;
|
||||
create table t1 (sid char(20), id int(2) NOT NULL auto_increment, key(sid, id));
|
||||
insert into t1 values ('skr',NULL),('skr',NULL),('test',NULL);
|
||||
select * from t1;
|
||||
insert into t1 values ('rts',NULL),('rts',NULL),('test',NULL);
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
|
@ -685,11 +685,14 @@ void ha_myisam::deactivate_non_unique_index(ha_rows rows)
|
||||
/* Only disable old index if the table was empty */
|
||||
if (file->state->records == 0)
|
||||
mi_disable_non_unique_index(file,rows);
|
||||
ha_myisam::extra_opt(HA_EXTRA_BULK_INSERT_BEGIN,
|
||||
current_thd->variables.bulk_insert_buff_size);
|
||||
else
|
||||
{
|
||||
mi_init_bulk_insert(file,
|
||||
current_thd->variables.bulk_insert_buff_size, rows);
|
||||
table->bulk_insert= 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
enable_activate_all_index=1;
|
||||
}
|
||||
else
|
||||
@ -704,7 +707,7 @@ bool ha_myisam::activate_all_index(THD *thd)
|
||||
MYISAM_SHARE* share = file->s;
|
||||
DBUG_ENTER("activate_all_index");
|
||||
|
||||
mi_extra(file, HA_EXTRA_BULK_INSERT_END, 0);
|
||||
mi_end_bulk_insert(file);
|
||||
table->bulk_insert= 0;
|
||||
if (enable_activate_all_index &&
|
||||
share->state.key_map != set_bits(ulonglong, share->base.keys))
|
||||
@ -945,13 +948,11 @@ int ha_myisam::extra(enum ha_extra_function operation)
|
||||
}
|
||||
|
||||
|
||||
/* To be used with WRITE_CACHE, EXTRA_CACHE and BULK_INSERT_BEGIN */
|
||||
/* To be used with WRITE_CACHE and EXTRA_CACHE */
|
||||
|
||||
int ha_myisam::extra_opt(enum ha_extra_function operation, ulong cache_size)
|
||||
{
|
||||
if ((specialflag & SPECIAL_SAFE_MODE) &
|
||||
(operation == HA_EXTRA_WRITE_CACHE ||
|
||||
operation == HA_EXTRA_BULK_INSERT_BEGIN))
|
||||
if ((specialflag & SPECIAL_SAFE_MODE) & operation == HA_EXTRA_WRITE_CACHE)
|
||||
return 0;
|
||||
return mi_extra(file, operation, (void*) &cache_size);
|
||||
}
|
||||
@ -1213,8 +1214,7 @@ longlong ha_myisam::get_auto_increment()
|
||||
}
|
||||
|
||||
if (table->bulk_insert)
|
||||
mi_extra(file, HA_EXTRA_BULK_INSERT_FLUSH,
|
||||
(void*) &table->next_number_index);
|
||||
mi_flush_bulk_insert(file, table->next_number_index);
|
||||
|
||||
longlong nr;
|
||||
int error;
|
||||
|
@ -229,6 +229,13 @@ void ha_myisammrg::info(uint flag)
|
||||
#else
|
||||
ref_length=4; // Can't be > than my_off_t
|
||||
#endif
|
||||
if (flag & HA_STATUS_CONST)
|
||||
{
|
||||
if (table->key_parts)
|
||||
memcpy((char*) table->key_info[0].rec_per_key,
|
||||
(char*) info.rec_per_key,
|
||||
sizeof(table->key_info[0].rec_per_key)*table->key_parts);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -247,9 +254,7 @@ int ha_myisammrg::extra(enum ha_extra_function operation)
|
||||
|
||||
int ha_myisammrg::extra_opt(enum ha_extra_function operation, ulong cache_size)
|
||||
{
|
||||
if ((specialflag & SPECIAL_SAFE_MODE) &
|
||||
(operation == HA_EXTRA_WRITE_CACHE ||
|
||||
operation == HA_EXTRA_BULK_INSERT_BEGIN))
|
||||
if ((specialflag & SPECIAL_SAFE_MODE) & operation == HA_EXTRA_WRITE_CACHE)
|
||||
return 0;
|
||||
return myrg_extra(file, operation, (void*) &cache_size);
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields,
|
||||
int error;
|
||||
bool log_on= ((thd->options & OPTION_UPDATE_LOG) ||
|
||||
!(thd->master_access & SUPER_ACL));
|
||||
bool transactional_table, log_delayed, bulk_insert=0;
|
||||
bool transactional_table, log_delayed, bulk_insert;
|
||||
uint value_count;
|
||||
ulong counter = 1;
|
||||
ulonglong id;
|
||||
@ -187,21 +187,16 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields,
|
||||
thd->proc_info="update";
|
||||
if (duplic == DUP_IGNORE || duplic == DUP_REPLACE)
|
||||
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
|
||||
if ((bulk_insert= (values_list.elements >= MIN_ROWS_TO_USE_BULK_INSERT &&
|
||||
lock_type != TL_WRITE_DELAYED &&
|
||||
!(specialflag & SPECIAL_SAFE_MODE))))
|
||||
if ((lock_type != TL_WRITE_DELAYED && !(specialflag & SPECIAL_SAFE_MODE)))
|
||||
{
|
||||
table->file->extra_opt(HA_EXTRA_WRITE_CACHE,
|
||||
min(thd->variables.read_buff_size,
|
||||
table->avg_row_length*values_list.elements));
|
||||
if (thd->variables.bulk_insert_buff_size)
|
||||
table->file->extra_opt(HA_EXTRA_BULK_INSERT_BEGIN,
|
||||
min(thd->variables.bulk_insert_buff_size,
|
||||
(table->total_key_length +
|
||||
table->keys * TREE_ELEMENT_EXTRA_SIZE)*
|
||||
values_list.elements));
|
||||
table->bulk_insert= 1;
|
||||
table->file->deactivate_non_unique_index(values_list.elements);
|
||||
bulk_insert=1;
|
||||
}
|
||||
else
|
||||
bulk_insert=0;
|
||||
|
||||
while ((values= its++))
|
||||
{
|
||||
@ -278,7 +273,7 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields,
|
||||
error=1;
|
||||
}
|
||||
}
|
||||
if (table->file->extra(HA_EXTRA_BULK_INSERT_END))
|
||||
if (table->file->activate_all_index(thd))
|
||||
{
|
||||
if (!error)
|
||||
{
|
||||
@ -286,7 +281,6 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields,
|
||||
error=1;
|
||||
}
|
||||
}
|
||||
table->bulk_insert= 0;
|
||||
}
|
||||
if (id && values_list.elements != 1)
|
||||
thd->insert_id(id); // For update log
|
||||
|
@ -1267,7 +1267,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
|
||||
select->quick=0;
|
||||
if (records != HA_POS_ERROR)
|
||||
{
|
||||
s->records=s->found_records=records;
|
||||
s->found_records=records;
|
||||
s->read_time= (ha_rows) (s->quick ? s->quick->read_time : 0.0);
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user