This commit is contained in:
Andrei Elkin 2008-07-18 17:02:42 +03:00
commit 5ec6659e1e
24 changed files with 211 additions and 111 deletions

View File

@ -2101,6 +2101,37 @@ static bool add_line(String &buffer,char *line,char *in_string,
continue;
}
}
else if (!*ml_comment && !*in_string &&
(end_of_line - pos) >= 10 &&
!my_strnncoll(charset_info, (uchar*) pos, 10,
(const uchar*) "delimiter ", 10))
{
// Flush previously accepted characters
if (out != line)
{
buffer.append(line, (uint32) (out - line));
out= line;
}
// Flush possible comments in the buffer
if (!buffer.is_empty())
{
if (com_go(&buffer, 0) > 0) // < 0 is not fatal
DBUG_RETURN(1);
buffer.length(0);
}
/*
Delimiter wants the get rest of the given line as argument to
allow one to change ';' to ';;' and back
*/
buffer.append(pos);
if (com_delimiter(&buffer, pos) > 0)
DBUG_RETURN(1);
buffer.length(0);
break;
}
else if (!*ml_comment && !*in_string && is_prefix(pos, delimiter))
{
// Found a statement. Continue parsing after the delimiter

View File

@ -1103,6 +1103,24 @@ set @my_innodb_commit_concurrency=@@global.innodb_commit_concurrency;
set global innodb_commit_concurrency=0;
set global innodb_commit_concurrency=@my_innodb_commit_concurrency;
#
# Bug #37830: ORDER BY ASC/DESC - no difference
#
CREATE TABLE t1 (a int, b int, c int, PRIMARY KEY (a), KEY t1_b (b))
ENGINE=InnoDB;
INSERT INTO t1 (a,b,c) VALUES (1,1,1), (2,1,1), (3,1,1), (4,1,1);
INSERT INTO t1 (a,b,c) SELECT a+4,b,c FROM t1;
# should be range access
EXPLAIN SELECT a, b, c FROM t1 WHERE b = 1 ORDER BY a DESC LIMIT 5;
# should produce '8 7 6 5 4' for a
SELECT a, b, c FROM t1 WHERE b = 1 ORDER BY a DESC LIMIT 5;
DROP TABLE t1;
--echo End of 5.0 tests
# Fix for BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY

View File

@ -1,5 +1,6 @@
set @old_concurrent_insert= @@global.concurrent_insert;
set @@global.concurrent_insert= 0;
drop table if exists t1;
create table t1 (
`a&b` int,
`a<b` int,

View File

@ -1362,6 +1362,21 @@ set global innodb_autoextend_increment=@my_innodb_autoextend_increment;
set @my_innodb_commit_concurrency=@@global.innodb_commit_concurrency;
set global innodb_commit_concurrency=0;
set global innodb_commit_concurrency=@my_innodb_commit_concurrency;
CREATE TABLE t1 (a int, b int, c int, PRIMARY KEY (a), KEY t1_b (b))
ENGINE=InnoDB;
INSERT INTO t1 (a,b,c) VALUES (1,1,1), (2,1,1), (3,1,1), (4,1,1);
INSERT INTO t1 (a,b,c) SELECT a+4,b,c FROM t1;
EXPLAIN SELECT a, b, c FROM t1 WHERE b = 1 ORDER BY a DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index t1_b PRIMARY 4 NULL 8 Using where
SELECT a, b, c FROM t1 WHERE b = 1 ORDER BY a DESC LIMIT 5;
a b c
8 1 1
7 1 1
6 1 1
5 1 1
4 1 1
DROP TABLE t1;
End of 5.0 tests
CREATE TABLE `t2` (
`k` int(11) NOT NULL auto_increment,

View File

@ -38,8 +38,6 @@ t2
t3
Tables_in_test
t1
delimiter
1
_
Test delimiter : from command line
a

View File

@ -6,6 +6,9 @@
# the data is actually in the table).
set @old_concurrent_insert= @@global.concurrent_insert;
set @@global.concurrent_insert= 0;
--disable_warnings
drop table if exists t1;
--enable_warnings
# Test of the xml output of the 'mysql' and 'mysqldump' clients -- makes
# sure that basic encoding issues are handled properly

View File

@ -60,12 +60,6 @@ use test//
show tables//
delimiter ; # Reset delimiter
#
# Bug #33812: mysql client incorrectly parsing DELIMITER
#
select a as delimiter from t1
delimiter ; # Reset delimiter
#
# Bug #36244: MySQL CLI doesn't recognize standalone -- as comment
# before DELIMITER statement

View File

@ -1026,7 +1026,7 @@ static const char *my_get_module_parent(char *buf, size_t size)
{
char *last= NULL;
char *end;
if (!GetModuleFileName(NULL, buf, size))
if (!GetModuleFileName(NULL, buf, (DWORD) size))
return NULL;
end= strend(buf);

View File

@ -452,7 +452,7 @@ Event_db_repository::table_scan_all_for_i_s(THD *thd, TABLE *schema_table,
READ_RECORD read_record_info;
DBUG_ENTER("Event_db_repository::table_scan_all_for_i_s");
init_read_record(&read_record_info, thd, event_table, NULL, 1, 0);
init_read_record(&read_record_info, thd, event_table, NULL, 1, 0, FALSE);
/*
rr_sequential, in read_record(), returns 137==HA_ERR_END_OF_FILE,
@ -925,7 +925,7 @@ Event_db_repository::drop_events_by_field(THD *thd,
DBUG_VOID_RETURN;
/* only enabled events are in memory, so we go now and delete the rest */
init_read_record(&read_record_info, thd, table, NULL, 1, 0);
init_read_record(&read_record_info, thd, table, NULL, 1, 0, FALSE);
while (!ret && !(read_record_info.read_record(&read_record_info)) )
{
char *et_field= get_field(thd->mem_root, table->field[field]);

View File

@ -1149,7 +1149,7 @@ Events::load_events_from_db(THD *thd)
DBUG_RETURN(TRUE);
}
init_read_record(&read_record_info, thd, table, NULL, 0, 1);
init_read_record(&read_record_info, thd, table, NULL, 0, 1, FALSE);
while (!(read_record_info.read_record(&read_record_info)))
{
Event_queue_element *et;

View File

@ -410,6 +410,56 @@ static uchar *read_buffpek_from_file(IO_CACHE *buffpek_pointers, uint count,
DBUG_RETURN(tmp);
}
#ifndef DBUG_OFF
/*
Print a text, SQL-like record representation into dbug trace.
Note: this function is a work in progress: at the moment
- column read bitmap is ignored (can print garbage for unused columns)
- there is no quoting
*/
static void dbug_print_record(TABLE *table, bool print_rowid)
{
char buff[1024];
Field **pfield;
String tmp(buff,sizeof(buff),&my_charset_bin);
DBUG_LOCK_FILE;
fprintf(DBUG_FILE, "record (");
for (pfield= table->field; *pfield ; pfield++)
fprintf(DBUG_FILE, "%s%s", (*pfield)->field_name, (pfield[1])? ", ":"");
fprintf(DBUG_FILE, ") = ");
fprintf(DBUG_FILE, "(");
for (pfield= table->field; *pfield ; pfield++)
{
Field *field= *pfield;
if (field->is_null())
fwrite("NULL", sizeof(char), 4, DBUG_FILE);
if (field->type() == MYSQL_TYPE_BIT)
(void) field->val_int_as_str(&tmp, 1);
else
field->val_str(&tmp);
fwrite(tmp.ptr(),sizeof(char),tmp.length(),DBUG_FILE);
if (pfield[1])
fwrite(", ", sizeof(char), 2, DBUG_FILE);
}
fprintf(DBUG_FILE, ")");
if (print_rowid)
{
fprintf(DBUG_FILE, " rowid ");
for (uint i=0; i < table->file->ref_length; i++)
{
fprintf(DBUG_FILE, "%x", (uchar)table->file->ref[i]);
}
}
fprintf(DBUG_FILE, "\n");
DBUG_UNLOCK_FILE;
}
#endif
/**
Search after sort_keys and write them into tempfile.
@ -488,13 +538,10 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
current_thd->variables.read_buff_size);
}
READ_RECORD read_record_info;
if (quick_select)
{
if (select->quick->reset())
DBUG_RETURN(HA_POS_ERROR);
init_read_record(&read_record_info, current_thd, select->quick->head,
select, 1, 1);
}
/* Remember original bitmaps */
@ -514,12 +561,13 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
{
if (quick_select)
{
if ((error= read_record_info.read_record(&read_record_info)))
if ((error= select->quick->get_next()))
{
error= HA_ERR_END_OF_FILE;
break;
}
file->position(sort_form->record[0]);
DBUG_EXECUTE_IF("debug_filesort", dbug_print_record(sort_form, TRUE););
}
else /* Not quick-select */
{
@ -576,15 +624,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
if (thd->is_error())
break;
}
if (quick_select)
{
/*
index_merge quick select uses table->sort when retrieving rows, so free
resoures it has allocated.
*/
end_read_record(&read_record_info);
}
else
if (!quick_select)
{
(void) file->extra(HA_EXTRA_NO_CACHE); /* End cacheing of records */
if (!next_pos)

View File

@ -2175,8 +2175,8 @@ ulonglong get_datetime_value(THD *thd, Item ***item_arg, Item **cache_arg,
int test_if_number(char *str,int *res,bool allow_wildcards);
void change_byte(uchar *,uint,char,char);
void init_read_record(READ_RECORD *info, THD *thd, TABLE *reg_form,
SQL_SELECT *select,
int use_record_cache, bool print_errors);
SQL_SELECT *select, int use_record_cache,
bool print_errors, bool disable_rr_cache);
void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
bool print_error, uint idx);
void end_read_record(READ_RECORD *info);

View File

@ -7936,6 +7936,7 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
handler *file= head->file;
DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::read_keys_and_merge");
/* We're going to just read rowids. */
file->extra(HA_EXTRA_KEYREAD);
head->prepare_for_position();
@ -7994,15 +7995,17 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
}
DBUG_PRINT("info", ("ok"));
/* ok, all row ids are in Unique */
/*
Ok all rowids are in the Unique now. The next call will initialize
head->sort structure so it can be used to iterate through the rowids
sequence.
*/
result= unique->get(head);
delete unique;
doing_pk_scan= FALSE;
/* index_merge currently doesn't support "using index" at all */
file->extra(HA_EXTRA_NO_KEYREAD);
/* start table scan */
init_read_record(&read_record, thd, head, (SQL_SELECT*) 0, 1, 1);
init_read_record(&read_record, thd, head, (SQL_SELECT*) 0, 1 , 1, TRUE);
DBUG_RETURN(result);
}
@ -8028,6 +8031,7 @@ int QUICK_INDEX_MERGE_SELECT::get_next()
{
result= HA_ERR_END_OF_FILE;
end_read_record(&read_record);
free_io_cache(head);
/* All rows from Unique have been retrieved, do a clustered PK scan */
if (pk_quick_select)
{
@ -8556,7 +8560,8 @@ bool QUICK_RANGE_SELECT::row_in_ranges()
QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUICK_RANGE_SELECT *q,
uint used_key_parts_arg)
:QUICK_RANGE_SELECT(*q), rev_it(rev_ranges)
:QUICK_RANGE_SELECT(*q), rev_it(rev_ranges),
used_key_parts (used_key_parts_arg)
{
QUICK_RANGE *r;
@ -8598,10 +8603,11 @@ int QUICK_SELECT_DESC::get_next()
int result;
if (last_range)
{ // Already read through key
result = ((last_range->flag & EQ_RANGE)
? file->index_next_same(record, last_range->min_key,
last_range->min_length) :
file->index_prev(record));
result = ((last_range->flag & EQ_RANGE &&
used_key_parts <= head->key_info[index].key_parts) ?
file->index_next_same(record, last_range->min_key,
last_range->min_length) :
file->index_prev(record));
if (!result)
{
if (cmp_prev(*rev_it.ref()) == 0)
@ -8625,7 +8631,9 @@ int QUICK_SELECT_DESC::get_next()
continue;
}
if (last_range->flag & EQ_RANGE)
if (last_range->flag & EQ_RANGE &&
used_key_parts <= head->key_info[index].key_parts)
{
result = file->index_read_map(record, last_range->max_key,
last_range->max_keypart_map,
@ -8634,6 +8642,8 @@ int QUICK_SELECT_DESC::get_next()
else
{
DBUG_ASSERT(last_range->flag & NEAR_MAX ||
(last_range->flag & EQ_RANGE &&
used_key_parts > head->key_info[index].key_parts) ||
range_reads_after_key(last_range));
result=file->index_read_map(record, last_range->max_key,
last_range->max_keypart_map,
@ -8731,54 +8741,6 @@ bool QUICK_SELECT_DESC::range_reads_after_key(QUICK_RANGE *range_arg)
}
/* TRUE if we are reading over a key that may have a NULL value */
#ifdef NOT_USED
bool QUICK_SELECT_DESC::test_if_null_range(QUICK_RANGE *range_arg,
uint used_key_parts)
{
uint offset, end;
KEY_PART *key_part = key_parts,
*key_part_end= key_part+used_key_parts;
for (offset= 0, end = min(range_arg->min_length, range_arg->max_length) ;
offset < end && key_part != key_part_end ;
offset+= key_part++->store_length)
{
if (!memcmp((char*) range_arg->min_key+offset,
(char*) range_arg->max_key+offset,
key_part->store_length))
continue;
if (key_part->null_bit && range_arg->min_key[offset])
return 1; // min_key is null and max_key isn't
// Range doesn't cover NULL. This is ok if there is no more null parts
break;
}
/*
If the next min_range is > NULL, then we can use this, even if
it's a NULL key
Example: SELECT * FROM t1 WHERE a = 2 AND b >0 ORDER BY a DESC,b DESC;
*/
if (key_part != key_part_end && key_part->null_bit)
{
if (offset >= range_arg->min_length || range_arg->min_key[offset])
return 1; // Could be null
key_part++;
}
/*
If any of the key parts used in the ORDER BY could be NULL, we can't
use the key to sort the data.
*/
for (; key_part != key_part_end ; key_part++)
if (key_part->null_bit)
return 1; // Covers null part
return 0;
}
#endif
void QUICK_RANGE_SELECT::add_info_string(String *str)
{
KEY *key_info= head->key_info + index;

View File

@ -686,12 +686,10 @@ public:
int get_type() { return QS_TYPE_RANGE_DESC; }
private:
bool range_reads_after_key(QUICK_RANGE *range);
#ifdef NOT_USED
bool test_if_null_range(QUICK_RANGE *range, uint used_key_parts);
#endif
int reset(void) { rev_it.rewind(); return QUICK_RANGE_SELECT::reset(); }
List<QUICK_RANGE> rev_ranges;
List_iterator<QUICK_RANGE> rev_it;
uint used_key_parts;
};

View File

@ -86,6 +86,23 @@ void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
The temporary file is normally used when the references doesn't fit into
a properly sized memory buffer. For most small queries the references
are stored in the memory buffer.
SYNOPSIS
init_read_record()
info OUT read structure
thd Thread handle
table Table the data [originally] comes from.
select SQL_SELECT structure. We may select->quick or
select->file as data source
use_record_cache Call file->extra_opt(HA_EXTRA_CACHE,...)
if we're going to do sequential read and some
additional conditions are satisfied.
print_error Copy this to info->print_error
disable_rr_cache Don't use rr_from_cache (used by sort-union
index-merge which produces rowid sequences that
are already ordered)
DESCRIPTION
This function sets up reading data via one of the methods:
The temporary file is also used when performing an update where a key is
modified.
@ -140,7 +157,8 @@ void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
*/
void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
SQL_SELECT *select,
int use_record_cache, bool print_error)
int use_record_cache, bool print_error,
bool disable_rr_cache)
{
IO_CACHE *tempfile;
DBUG_ENTER("init_read_record");
@ -191,7 +209,8 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
it doesn't make sense to use cache - we don't read from the table
and table->sort.io_cache is read sequentially
*/
if (!table->sort.addon_field &&
if (!disable_rr_cache &&
!table->sort.addon_field &&
! (specialflag & SPECIAL_SAFE_MODE) &&
thd->variables.read_rnd_buff_size &&
!(table->file->ha_table_flags() & HA_FAST_KEY_READ) &&

View File

@ -324,7 +324,8 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
acl_cache->clear(1); // Clear locked hostname cache
init_sql_alloc(&mem, ACL_ALLOC_BLOCK_SIZE, 0);
init_read_record(&read_record_info,thd,table= tables[0].table,NULL,1,0);
init_read_record(&read_record_info,thd,table= tables[0].table,NULL,1,0,
FALSE);
table->use_all_columns();
VOID(my_init_dynamic_array(&acl_hosts,sizeof(ACL_HOST),20,50));
while (!(read_record_info.read_record(&read_record_info)))
@ -373,7 +374,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
end_read_record(&read_record_info);
freeze_size(&acl_hosts);
init_read_record(&read_record_info,thd,table=tables[1].table,NULL,1,0);
init_read_record(&read_record_info,thd,table=tables[1].table,NULL,1,0,FALSE);
table->use_all_columns();
VOID(my_init_dynamic_array(&acl_users,sizeof(ACL_USER),50,100));
password_length= table->field[2]->field_length /
@ -561,7 +562,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
end_read_record(&read_record_info);
freeze_size(&acl_users);
init_read_record(&read_record_info,thd,table=tables[2].table,NULL,1,0);
init_read_record(&read_record_info,thd,table=tables[2].table,NULL,1,0,FALSE);
table->use_all_columns();
VOID(my_init_dynamic_array(&acl_dbs,sizeof(ACL_DB),50,100));
while (!(read_record_info.read_record(&read_record_info)))

View File

@ -245,7 +245,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
DBUG_RETURN(TRUE);
}
if (usable_index==MAX_KEY)
init_read_record(&info,thd,table,select,1,1);
init_read_record(&info, thd, table, select, 1, 1, FALSE);
else
init_read_record_idx(&info, thd, table, 1, usable_index);
@ -834,7 +834,7 @@ int multi_delete::do_deletes()
}
READ_RECORD info;
init_read_record(&info,thd,table,NULL,0,1);
init_read_record(&info, thd, table, NULL, 0, 1, FALSE);
/*
Ignore any rows not found in reference tables as they may already have
been deleted by foreign key handling

View File

@ -186,7 +186,7 @@ int search_topics(THD *thd, TABLE *topics, struct st_find_field *find_fields,
int count= 0;
READ_RECORD read_record_info;
init_read_record(&read_record_info, thd, topics, select,1,0);
init_read_record(&read_record_info, thd, topics, select, 1, 0, FALSE);
while (!read_record_info.read_record(&read_record_info))
{
if (!select->cond->val_int()) // Doesn't match like
@ -226,7 +226,7 @@ int search_keyword(THD *thd, TABLE *keywords, struct st_find_field *find_fields,
int count= 0;
READ_RECORD read_record_info;
init_read_record(&read_record_info, thd, keywords, select,1,0);
init_read_record(&read_record_info, thd, keywords, select, 1, 0, FALSE);
while (!read_record_info.read_record(&read_record_info) && count<2)
{
if (!select->cond->val_int()) // Dosn't match like
@ -350,7 +350,7 @@ int search_categories(THD *thd, TABLE *categories,
DBUG_ENTER("search_categories");
init_read_record(&read_record_info, thd, categories, select,1,0);
init_read_record(&read_record_info, thd, categories, select,1,0,FALSE);
while (!read_record_info.read_record(&read_record_info))
{
if (select && !select->cond->val_int())
@ -384,7 +384,7 @@ void get_all_items_for_category(THD *thd, TABLE *items, Field *pfname,
DBUG_ENTER("get_all_items_for_category");
READ_RECORD read_record_info;
init_read_record(&read_record_info, thd, items, select,1,0);
init_read_record(&read_record_info, thd, items, select,1,0,FALSE);
while (!read_record_info.read_record(&read_record_info))
{
if (!select->cond->val_int())

View File

@ -1361,7 +1361,7 @@ static void plugin_load(MEM_ROOT *tmp_root, int *argc, char **argv)
goto end;
}
table= tables.table;
init_read_record(&read_record_info, new_thd, table, NULL, 1, 0);
init_read_record(&read_record_info, new_thd, table, NULL, 1, 0, FALSE);
table->use_all_columns();
/*
there're no other threads running yet, so we don't need a mutex.

View File

@ -11713,7 +11713,7 @@ join_init_read_record(JOIN_TAB *tab)
if (tab->select && tab->select->quick && tab->select->quick->reset())
return 1;
init_read_record(&tab->read_record, tab->join->thd, tab->table,
tab->select,1,1);
tab->select,1,1, FALSE);
return (*tab->read_record.read_record)(&tab->read_record);
}
@ -12508,6 +12508,9 @@ part_of_refkey(TABLE *table,Field *field)
@note
used_key_parts is set to correct key parts used if return value != 0
(On other cases, used_key_part may be changed)
Note that the value may actually be greater than the number of index
key parts. This can happen for storage engines that have the primary
key parts as a suffix for every secondary key.
@retval
1 key is ok.
@ -12580,11 +12583,27 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
reverse=flag; // Remember if reverse
key_part++;
}
*used_key_parts= on_primary_key ? table->key_info[idx].key_parts :
(uint) (key_part - table->key_info[idx].key_part);
if (reverse == -1 && !(table->file->index_flags(idx, *used_key_parts-1, 1) &
HA_READ_PREV))
reverse= 0; // Index can't be used
if (on_primary_key)
{
uint used_key_parts_secondary= table->key_info[idx].key_parts;
uint used_key_parts_pk=
(uint) (key_part - table->key_info[table->s->primary_key].key_part);
*used_key_parts= used_key_parts_pk + used_key_parts_secondary;
if (reverse == -1 &&
(!(table->file->index_flags(idx, used_key_parts_secondary - 1, 1) &
HA_READ_PREV) ||
!(table->file->index_flags(table->s->primary_key,
used_key_parts_pk - 1, 1) & HA_READ_PREV)))
reverse= 0; // Index can't be used
}
else
{
*used_key_parts= (uint) (key_part - table->key_info[idx].key_part);
if (reverse == -1 &&
!(table->file->index_flags(idx, *used_key_parts-1, 1) & HA_READ_PREV))
reverse= 0; // Index can't be used
}
DBUG_RETURN(reverse);
}

View File

@ -182,7 +182,8 @@ static bool servers_load(THD *thd, TABLE_LIST *tables)
free_root(&mem, MYF(0));
init_alloc_root(&mem, ACL_ALLOC_BLOCK_SIZE, 0);
init_read_record(&read_record_info,thd,table=tables[0].table,NULL,1,0);
init_read_record(&read_record_info,thd,table=tables[0].table,NULL,1,0,
FALSE);
while (!(read_record_info.read_record(&read_record_info)))
{
/* return_val is already TRUE, so no need to set */

View File

@ -7113,7 +7113,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
/* Tell handler that we have values for all columns in the to table */
to->use_all_columns();
init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1);
init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1, 1, FALSE);
if (ignore)
to->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
thd->row_count= 0;

View File

@ -152,7 +152,7 @@ void udf_init()
}
table= tables.table;
init_read_record(&read_record_info, new_thd, table, NULL,1,0);
init_read_record(&read_record_info, new_thd, table, NULL,1,0,FALSE);
table->use_all_columns();
while (!(error= read_record_info.read_record(&read_record_info)))
{

View File

@ -457,7 +457,7 @@ int mysql_update(THD *thd,
*/
if (used_index == MAX_KEY || (select && select->quick))
init_read_record(&info,thd,table,select,0,1);
init_read_record(&info, thd, table, select, 0, 1, FALSE);
else
init_read_record_idx(&info, thd, table, 1, used_index);
@ -523,7 +523,7 @@ int mysql_update(THD *thd,
if (select && select->quick && select->quick->reset())
goto err;
table->file->try_semi_consistent_read(1);
init_read_record(&info,thd,table,select,0,1);
init_read_record(&info, thd, table, select, 0, 1, FALSE);
updated= found= 0;
/* Generate an error when trying to set a NOT NULL field to NULL. */