Merge stella.local:/home2/mydev/mysql-5.1-amain
into stella.local:/home2/mydev/mysql-5.1-axmrg configure.in: Auto merged sql/ha_partition.cc: Auto merged
This commit is contained in:
commit
da804f3088
@ -68,6 +68,7 @@ extern my_error_reporter my_getopt_error_reporter;
|
||||
|
||||
extern int handle_options (int *argc, char ***argv,
|
||||
const struct my_option *longopts, my_get_one_option);
|
||||
extern void my_cleanup_options(const struct my_option *options);
|
||||
extern void my_print_help(const struct my_option *options);
|
||||
extern void my_print_variables(const struct my_option *options);
|
||||
extern void my_getopt_register_get_addr(uchar ** (*func_addr)(const char *, uint,
|
||||
|
@ -307,6 +307,13 @@ UNIQUE USING BTREE(c1)
|
||||
) ENGINE= MEMORY DEFAULT CHARSET= utf8;
|
||||
INSERT INTO t1 VALUES('1'), ('2');
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (a INT, KEY USING BTREE(a)) ENGINE=MEMORY;
|
||||
INSERT INTO t1 VALUES(1),(2),(2);
|
||||
DELETE FROM t1 WHERE a=2;
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
1
|
||||
DROP TABLE t1;
|
||||
End of 4.1 tests
|
||||
CREATE TABLE t1(val INT, KEY USING BTREE(val)) ENGINE=memory;
|
||||
INSERT INTO t1 VALUES(0);
|
||||
|
@ -879,4 +879,9 @@ CHECK TABLE tm1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.tm1 check status OK
|
||||
DROP TABLE tm1, t1, t2;
|
||||
CREATE TABLE t1(c1 INT);
|
||||
CREATE TABLE t2 (c1 INT) ENGINE=MERGE UNION=(t1) INSERT_METHOD=FIRST;
|
||||
CREATE TABLE IF NOT EXISTS t1 SELECT * FROM t2;
|
||||
ERROR HY000: You can't specify target table 't1' for update in FROM clause
|
||||
DROP TABLE t1, t2;
|
||||
End of 5.0 tests
|
||||
|
@ -1259,6 +1259,10 @@ INSERT INTO t1 SELECT a + 8, b FROM t1;
|
||||
ALTER TABLE t1 ADD PARTITION (PARTITION p1 VALUES LESS THAN (64));
|
||||
ALTER TABLE t1 DROP PARTITION p1;
|
||||
DROP TABLE t1;
|
||||
create table t (s1 int) engine=myisam partition by key (s1);
|
||||
create trigger t_ad after delete on t for each row insert into t values (old.s1);
|
||||
insert into t values (1);
|
||||
drop table t;
|
||||
USE mysql;
|
||||
SET GLOBAL general_log = 0;
|
||||
ALTER TABLE general_log ENGINE = MyISAM;
|
||||
|
@ -129,3 +129,10 @@ insert into t1 (time, first_name, last_name) values ('2007-02-07', 'Q', 'Robert'
|
||||
SELECT * FROM t1 WHERE first_name='Andy' OR last_name='Jake';
|
||||
id time first_name last_name
|
||||
drop table t1;
|
||||
CREATE TABLE t1 (a DOUBLE NOT NULL, KEY(a)) ENGINE=InnoDB
|
||||
PARTITION BY KEY(a) PARTITIONS 10;
|
||||
INSERT INTO t1 VALUES(1),(2);
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
2
|
||||
DROP TABLE t1;
|
||||
|
@ -213,6 +213,15 @@ CREATE TABLE t1 (
|
||||
INSERT INTO t1 VALUES('1'), ('2');
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# BUG#30590 - delete from memory table with composite btree primary key
|
||||
#
|
||||
CREATE TABLE t1 (a INT, KEY USING BTREE(a)) ENGINE=MEMORY;
|
||||
INSERT INTO t1 VALUES(1),(2),(2);
|
||||
DELETE FROM t1 WHERE a=2;
|
||||
SELECT * FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo End of 4.1 tests
|
||||
|
||||
#
|
||||
|
@ -511,4 +511,18 @@ SELECT * FROM tm1;
|
||||
CHECK TABLE tm1;
|
||||
DROP TABLE tm1, t1, t2;
|
||||
|
||||
#
|
||||
# Bug#15522 - create ... select and with merge tables
|
||||
#
|
||||
# This was fixed together with Bug#20662 (Infinite loop in CREATE TABLE
|
||||
# IF NOT EXISTS ... SELECT with locked tables).
|
||||
# The new behavior for MERGE tables is consistent with the
|
||||
# CREATE TABLE SELECT behavior for ordinary tables.
|
||||
#
|
||||
CREATE TABLE t1(c1 INT);
|
||||
CREATE TABLE t2 (c1 INT) ENGINE=MERGE UNION=(t1) INSERT_METHOD=FIRST;
|
||||
--error ER_UPDATE_TABLE_USED
|
||||
CREATE TABLE IF NOT EXISTS t1 SELECT * FROM t2;
|
||||
DROP TABLE t1, t2;
|
||||
|
||||
--echo End of 5.0 tests
|
||||
|
@ -1480,6 +1480,15 @@ ALTER TABLE t1 DROP PARTITION p1;
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# Bug #30484: Partitions: crash with self-referencing trigger
|
||||
#
|
||||
|
||||
create table t (s1 int) engine=myisam partition by key (s1);
|
||||
create trigger t_ad after delete on t for each row insert into t values (old.s1);
|
||||
insert into t values (1);
|
||||
drop table t;
|
||||
|
||||
#
|
||||
# Bug #27816: Log tables ran with partitions crashes the server when logging
|
||||
# is enabled.
|
||||
|
@ -134,3 +134,11 @@ SELECT * FROM t1 WHERE first_name='Andy' OR last_name='Jake';
|
||||
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# BUG#30583 - Partition on DOUBLE key + INNODB + count(*) == crash
|
||||
#
|
||||
CREATE TABLE t1 (a DOUBLE NOT NULL, KEY(a)) ENGINE=InnoDB
|
||||
PARTITION BY KEY(a) PARTITIONS 10;
|
||||
INSERT INTO t1 VALUES(1),(2);
|
||||
SELECT COUNT(*) FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
@ -20,6 +20,9 @@
|
||||
#include <mysys_err.h>
|
||||
#include <my_getopt.h>
|
||||
|
||||
typedef void (*init_func_p)(const struct my_option *option, uchar* *variable,
|
||||
longlong value);
|
||||
|
||||
static void default_reporter(enum loglevel level, const char *format, ...);
|
||||
my_error_reporter my_getopt_error_reporter= &default_reporter;
|
||||
|
||||
@ -33,7 +36,12 @@ static longlong getopt_ll(char *arg, const struct my_option *optp, int *err);
|
||||
static ulonglong getopt_ull(char *arg, const struct my_option *optp,
|
||||
int *err);
|
||||
static double getopt_double(char *arg, const struct my_option *optp, int *err);
|
||||
static void init_variables(const struct my_option *options);
|
||||
static void init_variables(const struct my_option *options,
|
||||
init_func_p init_one_value);
|
||||
static void init_one_value(const struct my_option *option, uchar* *variable,
|
||||
longlong value);
|
||||
static void fini_one_value(const struct my_option *option, uchar* *variable,
|
||||
longlong value);
|
||||
static int setval(const struct my_option *opts, uchar* *value, char *argument,
|
||||
my_bool set_maximum_value);
|
||||
static char *check_struct_option(char *cur_arg, char *key_name);
|
||||
@ -117,7 +125,7 @@ int handle_options(int *argc, char ***argv,
|
||||
DBUG_ASSERT(argv && *argv);
|
||||
(*argc)--; /* Skip the program name */
|
||||
(*argv)++; /* --- || ---- */
|
||||
init_variables(longopts);
|
||||
init_variables(longopts, init_one_value);
|
||||
|
||||
for (pos= *argv, pos_end=pos+ *argc; pos != pos_end ; pos++)
|
||||
{
|
||||
@ -906,6 +914,37 @@ static void init_one_value(const struct my_option *option, uchar* *variable,
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Init one value to it's default values
|
||||
|
||||
SYNOPSIS
|
||||
init_one_value()
|
||||
option Option to initialize
|
||||
value Pointer to variable
|
||||
*/
|
||||
|
||||
static void fini_one_value(const struct my_option *option, uchar* *variable,
|
||||
longlong value __attribute__ ((unused)))
|
||||
{
|
||||
DBUG_ENTER("fini_one_value");
|
||||
switch ((option->var_type & GET_TYPE_MASK)) {
|
||||
case GET_STR_ALLOC:
|
||||
my_free((*(char**) variable), MYF(MY_ALLOW_ZERO_PTR));
|
||||
*((char**) variable)= NULL;
|
||||
break;
|
||||
default: /* dummy default to avoid compiler warnings */
|
||||
break;
|
||||
}
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
||||
void my_cleanup_options(const struct my_option *options)
|
||||
{
|
||||
init_variables(options, fini_one_value);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
initialize all variables to their default values
|
||||
|
||||
@ -919,7 +958,8 @@ static void init_one_value(const struct my_option *option, uchar* *variable,
|
||||
for a value and initialize.
|
||||
*/
|
||||
|
||||
static void init_variables(const struct my_option *options)
|
||||
static void init_variables(const struct my_option *options,
|
||||
init_func_p init_one_value)
|
||||
{
|
||||
DBUG_ENTER("init_variables");
|
||||
for (; options->name; options++)
|
||||
|
@ -3391,6 +3391,22 @@ int ha_partition::index_init(uint inx, bool sorted)
|
||||
*/
|
||||
if (m_lock_type == F_WRLCK)
|
||||
bitmap_union(table->read_set, &m_part_info->full_part_field_set);
|
||||
else if (sorted && m_table_flags & HA_PARTIAL_COLUMN_READ)
|
||||
{
|
||||
/*
|
||||
An ordered scan is requested and necessary fields aren't in read_set.
|
||||
This may happen e.g. with SELECT COUNT(*) FROM t1. We must ensure
|
||||
that all fields of current key are included into read_set, as
|
||||
partitioning requires them for sorting
|
||||
(see ha_partition::handle_ordered_index_scan).
|
||||
|
||||
TODO: handle COUNT(*) queries via unordered scan.
|
||||
*/
|
||||
uint i;
|
||||
for (i= 0; i < m_curr_key_info->key_parts; i++)
|
||||
bitmap_set_bit(table->read_set,
|
||||
m_curr_key_info->key_part[i].field->field_index);
|
||||
}
|
||||
file= m_file;
|
||||
do
|
||||
{
|
||||
@ -4540,6 +4556,8 @@ void ha_partition::get_dynamic_partition_info(PARTITION_INFO *stat_info,
|
||||
4) Parameters only used by temporary tables for query processing
|
||||
5) Parameters only used by MyISAM internally
|
||||
6) Parameters not used at all
|
||||
7) Parameters only used by federated tables for query processing
|
||||
8) Parameters only used by NDB
|
||||
|
||||
The partition handler need to handle category 1), 2) and 3).
|
||||
|
||||
@ -4806,6 +4824,15 @@ void ha_partition::get_dynamic_partition_info(PARTITION_INFO *stat_info,
|
||||
HA_EXTRA_INSERT_WITH_UPDATE:
|
||||
Inform handler that an "INSERT...ON DUPLICATE KEY UPDATE" will be
|
||||
executed. This condition is unset by HA_EXTRA_NO_IGNORE_DUP_KEY.
|
||||
|
||||
8) Parameters only used by NDB
|
||||
------------------------------
|
||||
HA_EXTRA_DELETE_CANNOT_BATCH:
|
||||
HA_EXTRA_UPDATE_CANNOT_BATCH:
|
||||
Inform handler that delete_row()/update_row() cannot batch deletes/updates
|
||||
and should perform them immediately. This may be needed when table has
|
||||
AFTER DELETE/UPDATE triggers which access to subject table.
|
||||
These flags are reset by the handler::extra(HA_EXTRA_RESET) call.
|
||||
*/
|
||||
|
||||
int ha_partition::extra(enum ha_extra_function operation)
|
||||
@ -4890,6 +4917,13 @@ int ha_partition::extra(enum ha_extra_function operation)
|
||||
/* Category 7), used by federated handlers */
|
||||
case HA_EXTRA_INSERT_WITH_UPDATE:
|
||||
DBUG_RETURN(loop_extra(operation));
|
||||
/* Category 8) Parameters only used by NDB */
|
||||
case HA_EXTRA_DELETE_CANNOT_BATCH:
|
||||
case HA_EXTRA_UPDATE_CANNOT_BATCH:
|
||||
{
|
||||
/* Currently only NDB use the *_CANNOT_BATCH */
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
/* Temporary crash to discover what is wrong */
|
||||
|
@ -3053,7 +3053,7 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
|
||||
MEM_ROOT *mem_root= alloc_root_inited(&tmp->mem_root) ?
|
||||
&tmp->mem_root : &plugin_mem_root;
|
||||
st_mysql_sys_var **opt;
|
||||
my_option *opts;
|
||||
my_option *opts= NULL;
|
||||
char *p, *varname;
|
||||
int error;
|
||||
st_mysql_sys_var *o;
|
||||
@ -3092,7 +3092,7 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
|
||||
{
|
||||
sql_print_error("Parsing options for plugin '%s' failed.",
|
||||
tmp->name.str);
|
||||
DBUG_RETURN(error);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3102,6 +3102,8 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
|
||||
*enabled= TRUE;
|
||||
}
|
||||
|
||||
error= 1;
|
||||
|
||||
if (*enabled)
|
||||
{
|
||||
for (opt= tmp->plugin->system_vars; opt && *opt; opt++)
|
||||
@ -3140,7 +3142,7 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
|
||||
{
|
||||
sql_print_error("Plugin '%s' has conflicting system variables",
|
||||
tmp->name.str);
|
||||
DBUG_RETURN(1);
|
||||
goto err;
|
||||
}
|
||||
tmp->system_vars= chain.first;
|
||||
}
|
||||
@ -3150,7 +3152,10 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
|
||||
if (enabled_saved && global_system_variables.log_warnings)
|
||||
sql_print_information("Plugin '%s' disabled by command line option",
|
||||
tmp->name.str);
|
||||
DBUG_RETURN(1);
|
||||
err:
|
||||
if (opts)
|
||||
my_cleanup_options(opts);
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
||||
|
@ -969,6 +969,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
|
||||
}
|
||||
parser_name.str= (char*) next_chunk;
|
||||
parser_name.length= strlen((char*) next_chunk);
|
||||
next_chunk+= parser_name.length + 1;
|
||||
keyinfo->parser= my_plugin_lock_by_name(NULL, &parser_name,
|
||||
MYSQL_FTPARSER_PLUGIN);
|
||||
if (! keyinfo->parser)
|
||||
|
@ -72,10 +72,7 @@ int hp_rb_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo,
|
||||
int res;
|
||||
|
||||
if (flag)
|
||||
{
|
||||
info->last_pos= NULL; /* For heap_rnext/heap_rprev */
|
||||
info->lastkey_len= 0;
|
||||
}
|
||||
|
||||
custom_arg.keyseg= keyinfo->seg;
|
||||
custom_arg.key_length= hp_rb_make_key(keyinfo, info->recbuf, record, recpos);
|
||||
|
@ -35,6 +35,17 @@ int heap_rfirst(HP_INFO *info, uchar *record, int inx)
|
||||
sizeof(uchar*));
|
||||
info->current_ptr = pos;
|
||||
memcpy(record, pos, (size_t)share->reclength);
|
||||
/*
|
||||
If we're performing index_first on a table that was taken from
|
||||
table cache, info->lastkey_len is initialized to previous query.
|
||||
Thus we set info->lastkey_len to proper value for subsequent
|
||||
heap_rnext() calls.
|
||||
This is needed for DELETE queries only, otherwise this variable is
|
||||
not used.
|
||||
Note that the same workaround may be needed for heap_rlast(), but
|
||||
for now heap_rlast() is never used for DELETE queries.
|
||||
*/
|
||||
info->lastkey_len= 0;
|
||||
info->update = HA_STATE_AKTIV;
|
||||
}
|
||||
else
|
||||
|
@ -33,11 +33,40 @@ int heap_rnext(HP_INFO *info, uchar *record)
|
||||
heap_rb_param custom_arg;
|
||||
|
||||
if (info->last_pos)
|
||||
{
|
||||
/*
|
||||
We enter this branch for non-DELETE queries after heap_rkey()
|
||||
or heap_rfirst(). As last key position (info->last_pos) is available,
|
||||
we only need to climb the tree using tree_search_next().
|
||||
*/
|
||||
pos = tree_search_next(&keyinfo->rb_tree, &info->last_pos,
|
||||
offsetof(TREE_ELEMENT, left),
|
||||
offsetof(TREE_ELEMENT, right));
|
||||
}
|
||||
else if (!info->lastkey_len)
|
||||
{
|
||||
/*
|
||||
We enter this branch only for DELETE queries after heap_rfirst(). E.g.
|
||||
DELETE FROM t1 WHERE a<10. As last key position is not available
|
||||
(last key is removed by heap_delete()), we must restart search as it
|
||||
is done in heap_rfirst().
|
||||
|
||||
It should be safe to handle this situation without this branch. That is
|
||||
branch below should find smallest element in a tree as lastkey_len is
|
||||
zero. tree_search_edge() is a kind of optimisation here as it should be
|
||||
faster than tree_search_key().
|
||||
*/
|
||||
pos= tree_search_edge(&keyinfo->rb_tree, info->parents,
|
||||
&info->last_pos, offsetof(TREE_ELEMENT, left));
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
We enter this branch only for DELETE queries after heap_rkey(). E.g.
|
||||
DELETE FROM t1 WHERE a=10. As last key position is not available
|
||||
(last key is removed by heap_delete()), we must restart search as it
|
||||
is done in heap_rkey().
|
||||
*/
|
||||
custom_arg.keyseg = keyinfo->seg;
|
||||
custom_arg.key_length = info->lastkey_len;
|
||||
custom_arg.search_flag = SEARCH_SAME | SEARCH_FIND;
|
||||
|
Loading…
x
Reference in New Issue
Block a user