Auto-merge from mysql-5.5-merge.
This commit is contained in:
commit
f6f9e3e3ac
5
mysql-test/include/not_blackhole.inc
Normal file
5
mysql-test/include/not_blackhole.inc
Normal file
@ -0,0 +1,5 @@
|
||||
if (`SELECT count(*) FROM information_schema.engines WHERE
|
||||
(support = 'YES' OR support = 'DEFAULT') AND
|
||||
engine = 'blackhole'`){
|
||||
skip Blackhole engine enabled;
|
||||
}
|
@ -1003,6 +1003,7 @@ SELECT 1 FROM
|
||||
1
|
||||
1
|
||||
DROP TABLE t1;
|
||||
End of 5.0 tests
|
||||
#
|
||||
# Bug #52397: another crash with explain extended and group_concat
|
||||
#
|
||||
@ -1019,6 +1020,25 @@ Warnings:
|
||||
Note 1003 select 1 AS `1` from dual
|
||||
DROP TABLE t1;
|
||||
End of 5.0 tests
|
||||
#
|
||||
# Bug #54476: crash when group_concat and 'with rollup' in prepared statements
|
||||
#
|
||||
CREATE TABLE t1 (a INT);
|
||||
INSERT INTO t1 VALUES (1), (2);
|
||||
PREPARE stmt FROM "SELECT GROUP_CONCAT(t1.a ORDER BY t1.a) FROM t1 JOIN t1 t2 GROUP BY t1.a WITH ROLLUP";
|
||||
EXECUTE stmt;
|
||||
GROUP_CONCAT(t1.a ORDER BY t1.a)
|
||||
1,1
|
||||
2,2
|
||||
1,1,2,2
|
||||
EXECUTE stmt;
|
||||
GROUP_CONCAT(t1.a ORDER BY t1.a)
|
||||
1,1
|
||||
2,2
|
||||
1,1,2,2
|
||||
DEALLOCATE PREPARE stmt;
|
||||
DROP TABLE t1;
|
||||
End of 5.1 tests
|
||||
DROP TABLE IF EXISTS t1, t2;
|
||||
CREATE TABLE t1 (a VARCHAR(6), b INT);
|
||||
CREATE TABLE t2 (a VARCHAR(6), b INT);
|
||||
|
@ -337,6 +337,21 @@ select connection_id() > 0;
|
||||
connection_id() > 0
|
||||
1
|
||||
#
|
||||
# Bug #54461: crash with longblob and union or update with subquery
|
||||
#
|
||||
CREATE TABLE t1 (a INT, b LONGBLOB);
|
||||
INSERT INTO t1 VALUES (1, '2'), (2, '3'), (3, '2');
|
||||
SELECT DISTINCT LEAST(a, (SELECT b FROM t1 LIMIT 1)) FROM t1 UNION SELECT 1;
|
||||
LEAST(a, (SELECT b FROM t1 LIMIT 1))
|
||||
1
|
||||
2
|
||||
SELECT DISTINCT GREATEST(a, (SELECT b FROM t1 LIMIT 1)) FROM t1 UNION SELECT 1;
|
||||
GREATEST(a, (SELECT b FROM t1 LIMIT 1))
|
||||
2
|
||||
3
|
||||
1
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# Bug #52165: Assertion failed: file .\dtoa.c, line 465
|
||||
#
|
||||
CREATE TABLE t1 (a SET('a'), b INT);
|
||||
|
@ -1305,4 +1305,12 @@ date_sub("0069-01-01 00:00:01",INTERVAL 2 SECOND)
|
||||
select date_sub("0169-01-01 00:00:01",INTERVAL 2 SECOND);
|
||||
date_sub("0169-01-01 00:00:01",INTERVAL 2 SECOND)
|
||||
0168-12-31 23:59:59
|
||||
CREATE TABLE t1(a DOUBLE NOT NULL);
|
||||
INSERT INTO t1 VALUES (0),(9.216e-096);
|
||||
# should not crash
|
||||
SELECT 1 FROM t1 ORDER BY @x:=makedate(a,a);
|
||||
1
|
||||
1
|
||||
1
|
||||
DROP TABLE t1;
|
||||
End of 5.1 tests
|
||||
|
16
mysql-test/r/partition_not_blackhole.result
Normal file
16
mysql-test/r/partition_not_blackhole.result
Normal file
@ -0,0 +1,16 @@
|
||||
DROP TABLE IF EXISTS t1;
|
||||
#
|
||||
# Bug#46086: crash when dropping a partitioned table and
|
||||
# the original engine is disabled
|
||||
# Copy a .frm and .par file which was created with:
|
||||
# create table `t1` (`id` int primary key) engine=blackhole
|
||||
# partition by key () partitions 1;
|
||||
SHOW TABLES;
|
||||
Tables_in_test
|
||||
t1
|
||||
SHOW CREATE TABLE t1;
|
||||
ERROR HY000: Incorrect information in file: './test/t1.frm'
|
||||
DROP TABLE t1;
|
||||
ERROR 42S02: Unknown table 't1'
|
||||
t1.frm
|
||||
t1.par
|
BIN
mysql-test/std_data/parts/t1_blackhole.frm
Normal file
BIN
mysql-test/std_data/parts/t1_blackhole.frm
Normal file
Binary file not shown.
BIN
mysql-test/std_data/parts/t1_blackhole.par
Normal file
BIN
mysql-test/std_data/parts/t1_blackhole.par
Normal file
Binary file not shown.
@ -747,6 +747,7 @@ UNLOCK TABLES;
|
||||
DROP TABLE t1;
|
||||
|
||||
|
||||
|
||||
--echo End of 5.1 tests
|
||||
|
||||
|
||||
|
@ -10,3 +10,4 @@ drop table t1, t2;
|
||||
sync_slave_with_master;
|
||||
|
||||
# End of 4.1 tests
|
||||
|
||||
|
@ -708,6 +708,7 @@ SELECT 1 FROM
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo End of 5.0 tests
|
||||
|
||||
--echo #
|
||||
--echo # Bug #52397: another crash with explain extended and group_concat
|
||||
@ -722,6 +723,26 @@ DROP TABLE t1;
|
||||
|
||||
--echo End of 5.0 tests
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # Bug #54476: crash when group_concat and 'with rollup' in prepared statements
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (a INT);
|
||||
INSERT INTO t1 VALUES (1), (2);
|
||||
|
||||
PREPARE stmt FROM "SELECT GROUP_CONCAT(t1.a ORDER BY t1.a) FROM t1 JOIN t1 t2 GROUP BY t1.a WITH ROLLUP";
|
||||
EXECUTE stmt;
|
||||
EXECUTE stmt;
|
||||
|
||||
DEALLOCATE PREPARE stmt;
|
||||
DROP TABLE t1;
|
||||
|
||||
|
||||
--echo End of 5.1 tests
|
||||
|
||||
|
||||
|
||||
#
|
||||
# Bug#36785: Wrong error message when group_concat() exceeds max length
|
||||
#
|
||||
|
@ -467,6 +467,19 @@ select NAME_CONST('_id',1234) as id;
|
||||
|
||||
select connection_id() > 0;
|
||||
|
||||
--echo #
|
||||
--echo # Bug #54461: crash with longblob and union or update with subquery
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (a INT, b LONGBLOB);
|
||||
INSERT INTO t1 VALUES (1, '2'), (2, '3'), (3, '2');
|
||||
|
||||
SELECT DISTINCT LEAST(a, (SELECT b FROM t1 LIMIT 1)) FROM t1 UNION SELECT 1;
|
||||
SELECT DISTINCT GREATEST(a, (SELECT b FROM t1 LIMIT 1)) FROM t1 UNION SELECT 1;
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # Bug #52165: Assertion failed: file .\dtoa.c, line 465
|
||||
--echo #
|
||||
@ -478,4 +491,5 @@ SELECT COALESCE(a) = COALESCE(b) FROM t1;
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
|
||||
--echo End of tests
|
||||
|
@ -821,4 +821,15 @@ select date_sub("0069-01-01 00:00:01",INTERVAL 2 SECOND);
|
||||
select date_sub("0169-01-01 00:00:01",INTERVAL 2 SECOND);
|
||||
|
||||
|
||||
#
|
||||
# Bug #55565: debug assertion when ordering by expressions with user
|
||||
# variable assignments
|
||||
#
|
||||
|
||||
CREATE TABLE t1(a DOUBLE NOT NULL);
|
||||
INSERT INTO t1 VALUES (0),(9.216e-096);
|
||||
--echo # should not crash
|
||||
SELECT 1 FROM t1 ORDER BY @x:=makedate(a,a);
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo End of 5.1 tests
|
||||
|
1
mysql-test/t/partition_not_blackhole-master.opt
Normal file
1
mysql-test/t/partition_not_blackhole-master.opt
Normal file
@ -0,0 +1 @@
|
||||
--loose-skip-blackhole
|
26
mysql-test/t/partition_not_blackhole.test
Normal file
26
mysql-test/t/partition_not_blackhole.test
Normal file
@ -0,0 +1,26 @@
|
||||
--source include/have_partition.inc
|
||||
--source include/not_blackhole.inc
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1;
|
||||
--enable_warnings
|
||||
|
||||
let $MYSQLD_DATADIR= `SELECT @@datadir`;
|
||||
|
||||
--echo #
|
||||
--echo # Bug#46086: crash when dropping a partitioned table and
|
||||
--echo # the original engine is disabled
|
||||
--echo # Copy a .frm and .par file which was created with:
|
||||
--echo # create table `t1` (`id` int primary key) engine=blackhole
|
||||
--echo # partition by key () partitions 1;
|
||||
--copy_file std_data/parts/t1_blackhole.frm $MYSQLD_DATADIR/test/t1.frm
|
||||
--copy_file std_data/parts/t1_blackhole.par $MYSQLD_DATADIR/test/t1.par
|
||||
SHOW TABLES;
|
||||
--replace_result $MYSQLD_DATADIR ./
|
||||
--error ER_NOT_FORM_FILE
|
||||
SHOW CREATE TABLE t1;
|
||||
--error ER_BAD_TABLE_ERROR
|
||||
DROP TABLE t1;
|
||||
--list_files $MYSQLD_DATADIR/test t1*
|
||||
--remove_file $MYSQLD_DATADIR/test/t1.frm
|
||||
--remove_file $MYSQLD_DATADIR/test/t1.par
|
@ -2446,9 +2446,14 @@ bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root)
|
||||
tot_partition_words= (m_tot_parts + 3) / 4;
|
||||
engine_array= (handlerton **) my_alloca(m_tot_parts * sizeof(handlerton*));
|
||||
for (i= 0; i < m_tot_parts; i++)
|
||||
{
|
||||
engine_array[i]= ha_resolve_by_legacy_type(ha_thd(),
|
||||
(enum legacy_db_type)
|
||||
*(uchar *) ((file_buffer) + 12 + i));
|
||||
*(uchar *) ((file_buffer) +
|
||||
12 + i));
|
||||
if (!engine_array[i])
|
||||
goto err3;
|
||||
}
|
||||
address_tot_name_len= file_buffer + 12 + 4 * tot_partition_words;
|
||||
tot_name_words= (uint4korr(address_tot_name_len) + 3) / 4;
|
||||
if (len_words != (tot_partition_words + tot_name_words + 4))
|
||||
|
@ -2533,6 +2533,8 @@ void Item_func_min_max::fix_length_and_dec()
|
||||
decimals,
|
||||
unsigned_flag));
|
||||
}
|
||||
else if (cmp_type == REAL_RESULT)
|
||||
fix_char_length(float_length(decimals));
|
||||
cached_field_type= agg_field_type(args, arg_count);
|
||||
}
|
||||
|
||||
|
@ -984,7 +984,8 @@ bool Aggregator_distinct::add()
|
||||
{
|
||||
int error;
|
||||
copy_fields(tmp_table_param);
|
||||
copy_funcs(tmp_table_param->items_to_copy);
|
||||
if (copy_funcs(tmp_table_param->items_to_copy, table->in_use))
|
||||
return TRUE;
|
||||
|
||||
for (Field **field=table->field ; *field ; field++)
|
||||
if ((*field)->is_real_null(0))
|
||||
@ -3058,7 +3059,6 @@ Item_func_group_concat::Item_func_group_concat(THD *thd,
|
||||
tree(item->tree),
|
||||
unique_filter(item->unique_filter),
|
||||
table(item->table),
|
||||
order(item->order),
|
||||
context(item->context),
|
||||
arg_count_order(item->arg_count_order),
|
||||
arg_count_field(item->arg_count_field),
|
||||
@ -3071,6 +3071,24 @@ Item_func_group_concat::Item_func_group_concat(THD *thd,
|
||||
{
|
||||
quick_group= item->quick_group;
|
||||
result.set_charset(collation.collation);
|
||||
|
||||
/*
|
||||
Since the ORDER structures pointed to by the elements of the 'order' array
|
||||
may be modified in find_order_in_list() called from
|
||||
Item_func_group_concat::setup(), create a copy of those structures so that
|
||||
such modifications done in this object would not have any effect on the
|
||||
object being copied.
|
||||
*/
|
||||
ORDER *tmp;
|
||||
if (!(order= (ORDER **) thd->alloc(sizeof(ORDER *) * arg_count_order +
|
||||
sizeof(ORDER) * arg_count_order)))
|
||||
return;
|
||||
tmp= (ORDER *)(order + arg_count_order);
|
||||
for (uint i= 0; i < arg_count_order; i++, tmp++)
|
||||
{
|
||||
memcpy(tmp, item->order[i], sizeof(ORDER));
|
||||
order[i]= tmp;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -3136,7 +3154,8 @@ bool Item_func_group_concat::add()
|
||||
if (always_null)
|
||||
return 0;
|
||||
copy_fields(tmp_table_param);
|
||||
copy_funcs(tmp_table_param->items_to_copy);
|
||||
if (copy_funcs(tmp_table_param->items_to_copy, table->in_use))
|
||||
return TRUE;
|
||||
|
||||
for (uint i= 0; i < arg_count_field; i++)
|
||||
{
|
||||
|
@ -394,10 +394,10 @@ public:
|
||||
/* Use this to start writing a new log file */
|
||||
void new_file();
|
||||
|
||||
bool write(Log_event* event_info);
|
||||
bool write(Log_event* event_info); // binary log write
|
||||
bool write(THD *thd, IO_CACHE *cache, Log_event *commit_event, bool incident);
|
||||
bool write_incident(THD *thd, bool lock);
|
||||
|
||||
bool write_incident(THD *thd, bool lock);
|
||||
int write_cache(IO_CACHE *cache, bool lock_log, bool flush_and_sync);
|
||||
void set_write_error(THD *thd);
|
||||
bool check_write_error(THD *thd);
|
||||
|
@ -12723,7 +12723,9 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
|
||||
if (!end_of_records)
|
||||
{
|
||||
copy_fields(&join->tmp_table_param);
|
||||
copy_funcs(join->tmp_table_param.items_to_copy);
|
||||
if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
|
||||
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
|
||||
|
||||
if (!join->having || join->having->val_int())
|
||||
{
|
||||
int error;
|
||||
@ -12813,7 +12815,8 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
|
||||
memcpy(table->record[0]+key_part->offset, group->buff, 1);
|
||||
}
|
||||
init_tmptable_sum_functions(join->sum_funcs);
|
||||
copy_funcs(join->tmp_table_param.items_to_copy);
|
||||
if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
|
||||
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
|
||||
if ((error=table->file->ha_write_row(table->record[0])))
|
||||
{
|
||||
if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param,
|
||||
@ -12848,7 +12851,8 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
|
||||
|
||||
init_tmptable_sum_functions(join->sum_funcs);
|
||||
copy_fields(&join->tmp_table_param); // Groups are copied twice.
|
||||
copy_funcs(join->tmp_table_param.items_to_copy);
|
||||
if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
|
||||
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
|
||||
|
||||
if (!(error=table->file->ha_write_row(table->record[0])))
|
||||
join->send_records++; // New group
|
||||
@ -12935,7 +12939,8 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
|
||||
if (idx < (int) join->send_group_parts)
|
||||
{
|
||||
copy_fields(&join->tmp_table_param);
|
||||
copy_funcs(join->tmp_table_param.items_to_copy);
|
||||
if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
|
||||
DBUG_RETURN(NESTED_LOOP_ERROR);
|
||||
if (init_sum_functions(join->sum_funcs, join->sum_funcs_end[idx+1]))
|
||||
DBUG_RETURN(NESTED_LOOP_ERROR);
|
||||
if (join->procedure)
|
||||
@ -15807,14 +15812,39 @@ update_sum_func(Item_sum **func_ptr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Copy result of functions to record in tmp_table. */
|
||||
/**
|
||||
Copy result of functions to record in tmp_table.
|
||||
|
||||
void
|
||||
copy_funcs(Item **func_ptr)
|
||||
Uses the thread pointer to check for errors in
|
||||
some of the val_xxx() methods called by the
|
||||
save_in_result_field() function.
|
||||
TODO: make the Item::val_xxx() return error code
|
||||
|
||||
@param func_ptr array of the function Items to copy to the tmp table
|
||||
@param thd pointer to the current thread for error checking
|
||||
@retval
|
||||
FALSE if OK
|
||||
@retval
|
||||
TRUE on error
|
||||
*/
|
||||
|
||||
bool
|
||||
copy_funcs(Item **func_ptr, const THD *thd)
|
||||
{
|
||||
Item *func;
|
||||
for (; (func = *func_ptr) ; func_ptr++)
|
||||
{
|
||||
func->save_in_result_field(1);
|
||||
/*
|
||||
Need to check the THD error state because Item::val_xxx() don't
|
||||
return error code, but can generate errors
|
||||
TODO: change it for a real status check when Item::val_xxx()
|
||||
are extended to return status code.
|
||||
*/
|
||||
if (thd->is_error())
|
||||
return TRUE;
|
||||
}
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
|
||||
|
@ -606,7 +606,7 @@ bool setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
|
||||
List<Item> &new_list1, List<Item> &new_list2,
|
||||
uint elements, List<Item> &fields);
|
||||
void copy_fields(TMP_TABLE_PARAM *param);
|
||||
void copy_funcs(Item **func_ptr);
|
||||
bool copy_funcs(Item **func_ptr, const THD *thd);
|
||||
bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
|
||||
int error, bool ignore_last_dupp_error);
|
||||
uint find_shortest_key(TABLE *table, const key_map *usable_keys);
|
||||
|
@ -204,7 +204,6 @@ typedef struct st_order {
|
||||
struct st_order *next;
|
||||
Item **item; /* Point at item in select fields */
|
||||
Item *item_ptr; /* Storage for initial item */
|
||||
Item **item_copy; /* For SPs; the original item ptr */
|
||||
int counter; /* position in SELECT list, correct
|
||||
only if counter_used is true*/
|
||||
bool asc; /* true if ascending */
|
||||
|
Loading…
x
Reference in New Issue
Block a user