Merge dator5.(none):/home/pappa/clean-mysql-5.1-new-ndb
into dator5.(none):/home/pappa/push_clone sql/sql_show.cc: Auto merged storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp: Auto merged mysql-test/t/disabled.def: manual merge
This commit is contained in:
commit
c8e4fe09b0
@ -17,7 +17,7 @@ drop table t1;
|
||||
CREATE TABLE t1 (a int, b int, c int, d int, PRIMARY KEY(a,b))
|
||||
ENGINE = NDB
|
||||
PARTITION BY KEY (c);
|
||||
ERROR HY000: A PRIMARY KEY need to include all fields in the partition function
|
||||
ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function
|
||||
CREATE TABLE t1 (a int, b int, c int, PRIMARY KEY(a,b))
|
||||
ENGINE = NDB
|
||||
PARTITION BY KEY (a);
|
||||
|
@ -1,4 +1,60 @@
|
||||
drop table if exists t1;
|
||||
create table t1 (s1 char(2) character set utf8)
|
||||
partition by list (case when s1 > 'cz' then 1 else 2 end)
|
||||
(partition p1 values in (1),
|
||||
partition p2 values in (2));
|
||||
drop table t1;
|
||||
create table t1 (a int)
|
||||
partition by key(a)
|
||||
partitions 0.2+e1;
|
||||
ERROR 42000: Only normal integers allowed as number here near '0.2+e1' at line 3
|
||||
create table t1 (a int)
|
||||
partition by key(a)
|
||||
partitions -1;
|
||||
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '-1' at line 3
|
||||
create table t1 (a int)
|
||||
partition by key(a)
|
||||
partitions 1.5;
|
||||
ERROR 42000: Only normal integers allowed as number here near '1.5' at line 3
|
||||
create table t1 (a int)
|
||||
partition by key(a)
|
||||
partitions 1e+300;
|
||||
ERROR 42000: Only normal integers allowed as number here near '1e+300' at line 3
|
||||
create table t1 (a int)
|
||||
engine = innodb
|
||||
partition by key (a);
|
||||
show table status;
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 InnoDB 10 Compact 2 8192 16384 0 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL partitioned
|
||||
insert into t1 values (0), (1), (2), (3);
|
||||
show table status;
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 InnoDB 10 Compact 4 4096 16384 0 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL partitioned
|
||||
drop table t1;
|
||||
create table t1 (a int auto_increment primary key)
|
||||
engine = innodb
|
||||
partition by key (a);
|
||||
show table status;
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 InnoDB 10 Compact 2 8192 16384 0 0 0 1 NULL NULL NULL latin1_swedish_ci NULL partitioned
|
||||
insert into t1 values (NULL), (NULL), (NULL), (NULL);
|
||||
show table status;
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 InnoDB 10 Compact 4 4096 16384 0 0 0 5 NULL NULL NULL latin1_swedish_ci NULL partitioned
|
||||
insert into t1 values (NULL), (NULL), (NULL), (NULL);
|
||||
show table status;
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 InnoDB 10 Compact 8 2048 16384 0 0 0 9 NULL NULL NULL latin1_swedish_ci NULL partitioned
|
||||
drop table t1;
|
||||
create table t1 (a int)
|
||||
partition by key (a)
|
||||
(partition p0 DATA DIRECTORY 'part-data' INDEX DIRECTORY 'part-data');
|
||||
ERROR 42000: Incorrect table name 'part-data'
|
||||
create table t1 (a int)
|
||||
partition by key (a)
|
||||
(partition p0,
|
||||
partition p1 DATA DIRECTORY 'part-data' INDEX DIRECTORY 'part-data');
|
||||
ERROR 42000: Incorrect table name 'part-data'
|
||||
create table t1 (a int)
|
||||
partition by list (a)
|
||||
(partition p0 values in (1));
|
||||
@ -731,7 +787,7 @@ ERROR HY000: Cannot create temporary table with partitions
|
||||
create table t1 (a int, b int) partition by list (a)
|
||||
(partition p1 values in (1), partition p2 values in (2));
|
||||
alter table t1 add primary key (b);
|
||||
ERROR HY000: A PRIMARY KEY need to include all fields in the partition function
|
||||
ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function
|
||||
show create table t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
@ -750,11 +806,6 @@ t2 CREATE TABLE `t2` (
|
||||
PRIMARY KEY (`a`)
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COMMENT='no comment' /*!50100 PARTITION BY KEY (a) */
|
||||
drop table t2;
|
||||
create table t1 (s1 char(2) character set utf8)
|
||||
partition by list (case when s1 > 'cz' then 1 else 2 end)
|
||||
(partition p1 values in (1),
|
||||
partition p2 values in (2));
|
||||
drop table t1;
|
||||
create table t1 (f1 int) partition by hash (f1) as select 1;
|
||||
drop table t1;
|
||||
prepare stmt1 from 'create table t1 (s1 int) partition by hash (s1)';
|
||||
@ -1117,4 +1168,32 @@ hello/master-data/tmpinx/t1#P#p1#SP#subpart11.MYI
|
||||
hello/master-data/tmpinx/t1#P#p2#SP#subpart20.MYI
|
||||
hello/master-data/tmpinx/t1#P#p2#SP#subpart21.MYI
|
||||
drop table t1;
|
||||
create table t1 (a bigint unsigned not null, primary key(a))
|
||||
engine = myisam
|
||||
partition by key (a)
|
||||
partitions 10;
|
||||
show create table t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` bigint(20) unsigned NOT NULL,
|
||||
PRIMARY KEY (`a`)
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (a) PARTITIONS 10 */
|
||||
insert into t1 values (18446744073709551615), (0xFFFFFFFFFFFFFFFE),
|
||||
(18446744073709551613), (18446744073709551612);
|
||||
select * from t1;
|
||||
a
|
||||
18446744073709551612
|
||||
18446744073709551613
|
||||
18446744073709551614
|
||||
18446744073709551615
|
||||
select * from t1 where a = 18446744073709551615;
|
||||
a
|
||||
18446744073709551615
|
||||
delete from t1 where a = 18446744073709551615;
|
||||
select * from t1;
|
||||
a
|
||||
18446744073709551612
|
||||
18446744073709551613
|
||||
18446744073709551614
|
||||
drop table t1;
|
||||
End of 5.1 tests
|
||||
|
@ -1,4 +1,14 @@
|
||||
DROP TABLE IF EXISTS t1;
|
||||
create table t1 (a int)
|
||||
partition by range (a)
|
||||
subpartition by key (a)
|
||||
(partition p0 values less than (10) (subpartition sp00, subpartition sp01),
|
||||
partition p1 values less than (20) (subpartition sp10, subpartition sp11));
|
||||
alter table t1 reorganize partition p0 into
|
||||
(partition p0 values less than (10) (subpartition sp00,
|
||||
subpartition sp01, subpartition sp02));
|
||||
ERROR HY000: Wrong number of subpartitions defined, mismatch with previous setting
|
||||
drop table t1;
|
||||
CREATE TABLE t1 (f_date DATE, f_varchar VARCHAR(30))
|
||||
PARTITION BY HASH(CAST(YEAR(f_date) AS SIGNED INTEGER)) PARTITIONS 2;
|
||||
SHOW CREATE TABLE t1;
|
||||
|
@ -1,4 +1,14 @@
|
||||
drop table if exists t1;
|
||||
create table t1 (a date)
|
||||
engine = innodb
|
||||
partition by range (year(a))
|
||||
(partition p0 values less than (2006),
|
||||
partition p1 values less than (2007));
|
||||
explain partitions select * from t1
|
||||
where a between '2006-01-01' and '2007-06-01';
|
||||
id select_type table partitions type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 p1 ALL NULL NULL NULL NULL 2 Using where
|
||||
drop table t1;
|
||||
create table t1 (a int unsigned)
|
||||
partition by range (a)
|
||||
(partition pnull values less than (0),
|
||||
|
@ -13,14 +13,13 @@
|
||||
#events_stress : BUG#17619 2006-02-21 andrey Race conditions
|
||||
#events : BUG#17619 2006-02-21 andrey Race conditions
|
||||
#events_scheduling : BUG#19170 2006-04-26 andrey Test case of 19170 fails on some platforms. Has to be checked.
|
||||
#im_instance_conf : Bug#20294 2006-06-06 monty Instance manager test im_instance_conf fails randomly
|
||||
im_instance_conf : Bug#20294 2006-06-06 monty Instance manager test im_instance_conf fails randomly
|
||||
im_options : Bug#20294 2006-07-24 stewart Instance manager test im_instance_conf fails randomly
|
||||
#im_life_cycle : Bug#20368 2006-06-10 alik im_life_cycle test fails
|
||||
ndb_autodiscover : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog
|
||||
ndb_autodiscover2 : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog
|
||||
#ndb_binlog_ignore_db : BUG#21279 2006-07-25 ingo Randomly throws a warning
|
||||
ndb_load : BUG#17233 2006-05-04 tomas failed load data from infile causes mysqld dbug_assert, binlog not flushed
|
||||
partition : BUG#21658 2006-08-16 Partition test fails, --ps-protocol
|
||||
partition_03ndb : BUG#16385 2006-03-24 mikael Partitions: crash when updating a range partitioned NDB table
|
||||
ps : BUG#21524 2006-08-08 pgalbraith 'ps' test fails in --ps-protocol test AMD64 bit
|
||||
ps_7ndb : BUG#18950 2006-02-16 jmiller create table like does not obtain LOCK_open
|
||||
@ -44,5 +43,8 @@ rpl_sp_effects : BUG#19862 2006-06-15 mkindahl
|
||||
#rpl_truncate_7ndb : BUG#21298 2006-07-27 msvensson
|
||||
crash_commit_before : 2006-08-02 msvensson
|
||||
rpl_ndb_dd_advance : BUG#18679 2006-07-28 jimw (Test fails randomly)
|
||||
func_group : BUG#21924 2006-08-30 reggie
|
||||
func_in : BUG#21925 2006-08-30 reggie
|
||||
partition_mgm_err2 : BUG#19107 2006-08-30 reggie
|
||||
ndb_binlog_discover : bug#21806 2006-08-24
|
||||
ndb_autodiscover3 : bug#21806
|
||||
|
@ -9,6 +9,74 @@
|
||||
drop table if exists t1;
|
||||
--enable_warnings
|
||||
|
||||
#
|
||||
# Bug#14367: Partitions: crash if utf8 column
|
||||
#
|
||||
create table t1 (s1 char(2) character set utf8)
|
||||
partition by list (case when s1 > 'cz' then 1 else 2 end)
|
||||
(partition p1 values in (1),
|
||||
partition p2 values in (2));
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Bug 15890: Strange number of partitions accepted
|
||||
#
|
||||
-- error 1064
|
||||
create table t1 (a int)
|
||||
partition by key(a)
|
||||
partitions 0.2+e1;
|
||||
-- error 1064
|
||||
create table t1 (a int)
|
||||
partition by key(a)
|
||||
partitions -1;
|
||||
-- error 1064
|
||||
create table t1 (a int)
|
||||
partition by key(a)
|
||||
partitions 1.5;
|
||||
-- error 1064
|
||||
create table t1 (a int)
|
||||
partition by key(a)
|
||||
partitions 1e+300;
|
||||
|
||||
#
|
||||
# Bug 21173: SHOW TABLE STATUS crashes server in InnoDB
|
||||
#
|
||||
create table t1 (a int)
|
||||
engine = innodb
|
||||
partition by key (a);
|
||||
show table status;
|
||||
insert into t1 values (0), (1), (2), (3);
|
||||
show table status;
|
||||
drop table t1;
|
||||
|
||||
create table t1 (a int auto_increment primary key)
|
||||
engine = innodb
|
||||
partition by key (a);
|
||||
show table status;
|
||||
insert into t1 values (NULL), (NULL), (NULL), (NULL);
|
||||
show table status;
|
||||
insert into t1 values (NULL), (NULL), (NULL), (NULL);
|
||||
show table status;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Bug 21350: Data Directory problems
|
||||
#
|
||||
-- error 1103
|
||||
create table t1 (a int)
|
||||
partition by key (a)
|
||||
(partition p0 DATA DIRECTORY 'part-data' INDEX DIRECTORY 'part-data');
|
||||
|
||||
#
|
||||
# Insert a test that manages to create the first partition and fails with
|
||||
# the second, ensure that we clean up afterwards in a proper manner.
|
||||
#
|
||||
--error 1103
|
||||
create table t1 (a int)
|
||||
partition by key (a)
|
||||
(partition p0,
|
||||
partition p1 DATA DIRECTORY 'part-data' INDEX DIRECTORY 'part-data');
|
||||
|
||||
#
|
||||
# Bug 19309 Partitions: Crash if double procedural alter
|
||||
#
|
||||
@ -880,15 +948,6 @@ show create table t2;
|
||||
|
||||
drop table t2;
|
||||
|
||||
#
|
||||
# Bug#14367: Partitions: crash if utf8 column
|
||||
#
|
||||
create table t1 (s1 char(2) character set utf8)
|
||||
partition by list (case when s1 > 'cz' then 1 else 2 end)
|
||||
(partition p1 values in (1),
|
||||
partition p2 values in (2));
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Bug#15336 Partitions: crash if create table as select
|
||||
#
|
||||
@ -1300,4 +1359,22 @@ eval ALTER TABLE t1 REORGANIZE PARTITION p0 INTO
|
||||
drop table t1;
|
||||
--exec rmdir $MYSQLTEST_VARDIR/master-data/tmpdata || true
|
||||
--exec rmdir $MYSQLTEST_VARDIR/master-data/tmpinx || true
|
||||
|
||||
#
|
||||
# Bug 21388: Bigint fails to find record
|
||||
#
|
||||
create table t1 (a bigint unsigned not null, primary key(a))
|
||||
engine = myisam
|
||||
partition by key (a)
|
||||
partitions 10;
|
||||
|
||||
show create table t1;
|
||||
insert into t1 values (18446744073709551615), (0xFFFFFFFFFFFFFFFE),
|
||||
(18446744073709551613), (18446744073709551612);
|
||||
select * from t1;
|
||||
select * from t1 where a = 18446744073709551615;
|
||||
delete from t1 where a = 18446744073709551615;
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
|
||||
--echo End of 5.1 tests
|
||||
|
@ -2,6 +2,23 @@
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1;
|
||||
--enable_warnings
|
||||
|
||||
#
|
||||
# Bug 21143: mysqld hang when error in number of subparts in
|
||||
# REORGANIZE command
|
||||
#
|
||||
create table t1 (a int)
|
||||
partition by range (a)
|
||||
subpartition by key (a)
|
||||
(partition p0 values less than (10) (subpartition sp00, subpartition sp01),
|
||||
partition p1 values less than (20) (subpartition sp10, subpartition sp11));
|
||||
|
||||
-- error ER_PARTITION_WRONG_NO_SUBPART_ERROR
|
||||
alter table t1 reorganize partition p0 into
|
||||
(partition p0 values less than (10) (subpartition sp00,
|
||||
subpartition sp01, subpartition sp02));
|
||||
drop table t1;
|
||||
|
||||
CREATE TABLE t1 (f_date DATE, f_varchar VARCHAR(30))
|
||||
PARTITION BY HASH(CAST(YEAR(f_date) AS SIGNED INTEGER)) PARTITIONS 2;
|
||||
SHOW CREATE TABLE t1;
|
||||
|
@ -9,6 +9,18 @@
|
||||
drop table if exists t1;
|
||||
--enable_warnings
|
||||
|
||||
#
|
||||
# Bug 21339: Crash in Explain Partitions
|
||||
#
|
||||
create table t1 (a date)
|
||||
engine = innodb
|
||||
partition by range (year(a))
|
||||
(partition p0 values less than (2006),
|
||||
partition p1 values less than (2007));
|
||||
explain partitions select * from t1
|
||||
where a between '2006-01-01' and '2007-06-01';
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# More checks for partition pruning
|
||||
#
|
||||
@ -686,3 +698,4 @@ EXPLAIN PARTITIONS SELECT * from t1
|
||||
WHERE (a >= '2004-07-01' AND a <= '2004-09-30') OR
|
||||
(a >= '2005-07-01' AND a <= '2005-09-30');
|
||||
DROP TABLE t1;
|
||||
|
||||
|
@ -1134,7 +1134,9 @@ int ha_partition::prepare_new_partition(TABLE *table,
|
||||
bool open_flag= FALSE;
|
||||
DBUG_ENTER("prepare_new_partition");
|
||||
|
||||
set_up_table_before_create(table, part_name, create_info, 0, p_elem);
|
||||
if ((error= set_up_table_before_create(table, part_name, create_info,
|
||||
0, p_elem)))
|
||||
goto error;
|
||||
if ((error= file->create(part_name, table, create_info)))
|
||||
goto error;
|
||||
create_flag= TRUE;
|
||||
@ -1343,9 +1345,9 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
|
||||
ones used to be.
|
||||
*/
|
||||
first= FALSE;
|
||||
DBUG_ASSERT(i + m_reorged_parts <= m_file_tot_parts);
|
||||
DBUG_ASSERT(((i*no_subparts) + m_reorged_parts) <= m_file_tot_parts);
|
||||
memcpy((void*)m_reorged_file, &m_file[i*no_subparts],
|
||||
sizeof(handler*)*m_reorged_parts*no_subparts);
|
||||
sizeof(handler*)*m_reorged_parts);
|
||||
}
|
||||
} while (++i < no_parts);
|
||||
}
|
||||
@ -1579,6 +1581,17 @@ void ha_partition::update_create_info(HA_CREATE_INFO *create_info)
|
||||
}
|
||||
|
||||
|
||||
void ha_partition::change_table_ptr(TABLE *table_arg, TABLE_SHARE *share)
|
||||
{
|
||||
handler **file_array= m_file;
|
||||
table= table_arg;
|
||||
table_share= share;
|
||||
do
|
||||
{
|
||||
(*file_array)->change_table_ptr(table_arg, share);
|
||||
} while (*(++file_array));
|
||||
}
|
||||
|
||||
/*
|
||||
Change comments specific to handler
|
||||
|
||||
@ -1633,7 +1646,7 @@ uint ha_partition::del_ren_cre_table(const char *from,
|
||||
char from_buff[FN_REFLEN], to_buff[FN_REFLEN];
|
||||
char *name_buffer_ptr;
|
||||
uint i;
|
||||
handler **file;
|
||||
handler **file, **abort_file;
|
||||
DBUG_ENTER("del_ren_cre_table()");
|
||||
|
||||
if (get_from_handler_file(from, current_thd->mem_root))
|
||||
@ -1657,8 +1670,10 @@ uint ha_partition::del_ren_cre_table(const char *from,
|
||||
error= (*file)->delete_table((const char*) from_buff);
|
||||
else
|
||||
{
|
||||
set_up_table_before_create(table_arg, from_buff, create_info, i, NULL);
|
||||
error= (*file)->create(from_buff, table_arg, create_info);
|
||||
if ((error= set_up_table_before_create(table_arg, from_buff,
|
||||
create_info, i, NULL)) ||
|
||||
((error= (*file)->create(from_buff, table_arg, create_info))))
|
||||
goto create_error;
|
||||
}
|
||||
name_buffer_ptr= strend(name_buffer_ptr) + 1;
|
||||
if (error)
|
||||
@ -1666,6 +1681,16 @@ uint ha_partition::del_ren_cre_table(const char *from,
|
||||
i++;
|
||||
} while (*(++file));
|
||||
DBUG_RETURN(save_error);
|
||||
create_error:
|
||||
name_buffer_ptr= m_name_buffer_ptr;
|
||||
for (abort_file= file, file= m_file; file < abort_file; file++)
|
||||
{
|
||||
create_partition_name(from_buff, from, name_buffer_ptr, NORMAL_PART_NAME,
|
||||
FALSE);
|
||||
VOID((*file)->delete_table((const char*) from_buff));
|
||||
name_buffer_ptr= strend(name_buffer_ptr) + 1;
|
||||
}
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1720,7 +1745,8 @@ partition_element *ha_partition::find_partition_element(uint part_id)
|
||||
part_id Partition id of partition to set-up
|
||||
|
||||
RETURN VALUE
|
||||
NONE
|
||||
TRUE Error
|
||||
FALSE Success
|
||||
|
||||
DESCRIPTION
|
||||
Set up
|
||||
@ -1730,31 +1756,40 @@ partition_element *ha_partition::find_partition_element(uint part_id)
|
||||
4) Data file name on partition
|
||||
*/
|
||||
|
||||
void ha_partition::set_up_table_before_create(TABLE *table,
|
||||
const char *partition_name_with_path,
|
||||
HA_CREATE_INFO *info,
|
||||
uint part_id,
|
||||
partition_element *part_elem)
|
||||
int ha_partition::set_up_table_before_create(TABLE *table,
|
||||
const char *partition_name_with_path,
|
||||
HA_CREATE_INFO *info,
|
||||
uint part_id,
|
||||
partition_element *part_elem)
|
||||
{
|
||||
int error= 0;
|
||||
const char *partition_name;
|
||||
THD *thd= current_thd;
|
||||
DBUG_ENTER("set_up_table_before_create");
|
||||
|
||||
if (!part_elem)
|
||||
{
|
||||
part_elem= find_partition_element(part_id);
|
||||
if (!part_elem)
|
||||
return; // Fatal error
|
||||
DBUG_RETURN(1); // Fatal error
|
||||
}
|
||||
table->s->max_rows= part_elem->part_max_rows;
|
||||
table->s->min_rows= part_elem->part_min_rows;
|
||||
const char *partition_name= strrchr(partition_name_with_path, FN_LIBCHAR);
|
||||
if (part_elem->index_file_name)
|
||||
append_file_to_dir(current_thd,
|
||||
(const char**)&part_elem->index_file_name,
|
||||
partition_name+1);
|
||||
if (part_elem->data_file_name)
|
||||
append_file_to_dir(current_thd,
|
||||
(const char**)&part_elem->data_file_name,
|
||||
partition_name+1);
|
||||
partition_name= strrchr(partition_name_with_path, FN_LIBCHAR);
|
||||
if ((part_elem->index_file_name &&
|
||||
(error= append_file_to_dir(thd,
|
||||
(const char**)&part_elem->index_file_name,
|
||||
partition_name+1))) ||
|
||||
(part_elem->data_file_name &&
|
||||
(error= append_file_to_dir(thd,
|
||||
(const char**)&part_elem->data_file_name,
|
||||
partition_name+1))))
|
||||
{
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
info->index_file_name= part_elem->index_file_name;
|
||||
info->data_file_name= part_elem->data_file_name;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
|
||||
@ -4183,9 +4218,19 @@ void ha_partition::info(uint flag)
|
||||
ulonglong nb_reserved_values;
|
||||
DBUG_PRINT("info", ("HA_STATUS_AUTO"));
|
||||
/* we don't want to reserve any values, it's pure information */
|
||||
get_auto_increment(0, 0, 0, &stats.auto_increment_value,
|
||||
&nb_reserved_values);
|
||||
release_auto_increment();
|
||||
|
||||
if (table->found_next_number_field)
|
||||
{
|
||||
/*
|
||||
Can only call get_auto_increment for tables that actually
|
||||
have auto_increment columns, otherwise there will be
|
||||
problems in handlers that don't expect get_auto_increment
|
||||
for non-autoincrement tables.
|
||||
*/
|
||||
get_auto_increment(0, 0, 0, &stats.auto_increment_value,
|
||||
&nb_reserved_values);
|
||||
release_auto_increment();
|
||||
}
|
||||
}
|
||||
if (flag & HA_STATUS_VARIABLE)
|
||||
{
|
||||
@ -5145,13 +5190,12 @@ void ha_partition::print_error(int error, myf errflag)
|
||||
DBUG_ENTER("ha_partition::print_error");
|
||||
|
||||
/* Should probably look for my own errors first */
|
||||
/* monty: needs to be called for the last used partition ! */
|
||||
DBUG_PRINT("enter", ("error: %d", error));
|
||||
|
||||
if (error == HA_ERR_NO_PARTITION_FOUND)
|
||||
m_part_info->print_no_partition_found(table);
|
||||
else
|
||||
m_file[0]->print_error(error, errflag);
|
||||
m_file[m_last_part]->print_error(error, errflag);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
@ -5161,8 +5205,7 @@ bool ha_partition::get_error_message(int error, String *buf)
|
||||
DBUG_ENTER("ha_partition::get_error_message");
|
||||
|
||||
/* Should probably look for my own errors first */
|
||||
/* monty: needs to be called for the last used partition ! */
|
||||
DBUG_RETURN(m_file[0]->get_error_message(error, buf));
|
||||
DBUG_RETURN(m_file[m_last_part]->get_error_message(error, buf));
|
||||
}
|
||||
|
||||
|
||||
@ -5363,7 +5406,6 @@ void ha_partition::get_auto_increment(ulonglong offset, ulonglong increment,
|
||||
if (increment) // If not check for values
|
||||
*nb_reserved_values= (last_value == ULONGLONG_MAX) ?
|
||||
ULONGLONG_MAX : ((last_value - *first_value) / increment);
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
@ -199,6 +199,7 @@ public:
|
||||
*no_parts= m_tot_parts;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share);
|
||||
private:
|
||||
int prepare_for_delete();
|
||||
int copy_partitions(ulonglong *copied, ulonglong *deleted);
|
||||
@ -222,11 +223,11 @@ private:
|
||||
bool new_handlers_from_part_info(MEM_ROOT *mem_root);
|
||||
bool create_handlers(MEM_ROOT *mem_root);
|
||||
void clear_handler_file();
|
||||
void set_up_table_before_create(TABLE *table_arg,
|
||||
const char *partition_name_with_path,
|
||||
HA_CREATE_INFO *info,
|
||||
uint part_id,
|
||||
partition_element *p_elem);
|
||||
int set_up_table_before_create(TABLE *table_arg,
|
||||
const char *partition_name_with_path,
|
||||
HA_CREATE_INFO *info,
|
||||
uint part_id,
|
||||
partition_element *p_elem);
|
||||
partition_element *find_partition_element(uint part_id);
|
||||
|
||||
public:
|
||||
|
@ -985,7 +985,7 @@ public:
|
||||
virtual void print_error(int error, myf errflag);
|
||||
virtual bool get_error_message(int error, String *buf);
|
||||
uint get_dup_key(int error);
|
||||
void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share)
|
||||
virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share)
|
||||
{
|
||||
table= table_arg;
|
||||
table_share= share;
|
||||
|
@ -1535,7 +1535,7 @@ bool agg_item_charsets(DTCollation &coll, const char *fname,
|
||||
been created in prepare. In this case register the change for
|
||||
rollback.
|
||||
*/
|
||||
if (arena)
|
||||
if (thd->is_stmt_prepare())
|
||||
*arg= conv;
|
||||
else
|
||||
thd->change_item_tree(arg, conv);
|
||||
|
@ -5702,9 +5702,7 @@ ER_BLOB_FIELD_IN_PART_FUNC_ERROR
|
||||
ger "In der Partitionierungsfunktion sind BLOB-Spalten nicht erlaubt"
|
||||
swe "Ett BLOB-fält är inte tillåtet i partitioneringsfunktioner"
|
||||
ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF
|
||||
eng "A %-.64s need to include all fields in the partition function"
|
||||
ger "Ein %-.64s muss alle Spalten der Partitionierungsfunktion umfassen"
|
||||
swe "En %-.64s behöver inkludera alla fält i partitioneringsfunktionen för denna lagringsmotor"
|
||||
eng "A %-.64s must include all columns in the table's partitioning function"
|
||||
ER_NO_PARTS_ERROR
|
||||
eng "Number of %-.64s = 0 is not an allowed value"
|
||||
ger "Eine Anzahl von %-.64s = 0 ist kein erlaubter Wert"
|
||||
@ -5954,6 +5952,8 @@ ER_BAD_LOG_ENGINE
|
||||
eng "One can use only CSV and MyISAM engines for the log tables"
|
||||
ER_CANT_DROP_LOG_TABLE
|
||||
eng "Cannot drop log table if log is enabled"
|
||||
ER_ONLY_INTEGERS_ALLOWED
|
||||
eng "Only normal integers allowed as number here"
|
||||
ER_USERNAME
|
||||
eng "user name"
|
||||
ER_HOSTNAME
|
||||
|
@ -868,6 +868,7 @@ int check_signed_flag(partition_info *part_info)
|
||||
bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
|
||||
bool is_sub_part, bool is_field_to_be_setup)
|
||||
{
|
||||
MEM_ROOT new_mem_root;
|
||||
partition_info *part_info= table->part_info;
|
||||
uint dir_length, home_dir_length;
|
||||
bool result= TRUE;
|
||||
@ -917,7 +918,25 @@ bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
|
||||
func_expr->walk(&Item::change_context_processor, 0, (byte*) context);
|
||||
save_where= thd->where;
|
||||
thd->where= "partition function";
|
||||
/*
|
||||
In execution we must avoid the use of thd->change_item_tree since
|
||||
we might release memory before statement is completed. We do this
|
||||
by temporarily setting the stmt_arena->mem_root to be the mem_root
|
||||
of the table object, this also ensures that any memory allocated
|
||||
during fix_fields will not be released at end of execution of this
|
||||
statement. Thus the item tree will remain valid also in subsequent
|
||||
executions of this table object. We do however not at the moment
|
||||
support allocations during execution of val_int so any item class
|
||||
that does this during val_int must be disallowed as partition
|
||||
function.
|
||||
SEE Bug #21658
|
||||
*/
|
||||
/*
|
||||
This is a tricky call to prepare for since it can have a large number
|
||||
of interesting side effects, both desirable and undesirable.
|
||||
*/
|
||||
error= func_expr->fix_fields(thd, (Item**)0);
|
||||
|
||||
context->table_list= save_table_list;
|
||||
context->first_name_resolution_table= save_first_table;
|
||||
context->last_name_resolution_table= save_last_table;
|
||||
@ -1402,7 +1421,6 @@ bool fix_partition_func(THD *thd, TABLE *table,
|
||||
bool result= TRUE;
|
||||
partition_info *part_info= table->part_info;
|
||||
enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
|
||||
Item *thd_free_list= thd->free_list;
|
||||
DBUG_ENTER("fix_partition_func");
|
||||
|
||||
if (part_info->fixed)
|
||||
@ -1422,7 +1440,6 @@ bool fix_partition_func(THD *thd, TABLE *table,
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
}
|
||||
thd->free_list= part_info->item_free_list;
|
||||
if (part_info->is_sub_partitioned())
|
||||
{
|
||||
DBUG_ASSERT(part_info->subpart_type == HASH_PARTITION);
|
||||
@ -1530,7 +1547,6 @@ bool fix_partition_func(THD *thd, TABLE *table,
|
||||
set_up_range_analysis_info(part_info);
|
||||
result= FALSE;
|
||||
end:
|
||||
thd->free_list= thd_free_list;
|
||||
thd->mark_used_columns= save_mark_used_columns;
|
||||
DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
|
||||
DBUG_RETURN(result);
|
||||
@ -2573,10 +2589,13 @@ uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
|
||||
}
|
||||
else
|
||||
{
|
||||
if (part_func_value == range_array[loc_part_id])
|
||||
loc_part_id += test(include_endpoint);
|
||||
else if (part_func_value > range_array[loc_part_id])
|
||||
loc_part_id++;
|
||||
if (loc_part_id < max_partition)
|
||||
{
|
||||
if (part_func_value == range_array[loc_part_id])
|
||||
loc_part_id += test(include_endpoint);
|
||||
else if (part_func_value > range_array[loc_part_id])
|
||||
loc_part_id++;
|
||||
}
|
||||
loc_part_id++;
|
||||
}
|
||||
DBUG_RETURN(loc_part_id);
|
||||
@ -3365,7 +3384,6 @@ bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
|
||||
TABLE* table, bool is_create_table_ind,
|
||||
handlerton *default_db_type)
|
||||
{
|
||||
Item *thd_free_list= thd->free_list;
|
||||
bool result= TRUE;
|
||||
partition_info *part_info;
|
||||
CHARSET_INFO *old_character_set_client= thd->variables.character_set_client;
|
||||
@ -3393,7 +3411,6 @@ bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
|
||||
Thus we move away the current list temporarily and start a new list that
|
||||
we then save in the partition info structure.
|
||||
*/
|
||||
thd->free_list= NULL;
|
||||
lex.part_info= new partition_info();/* Indicates MYSQLparse from this place */
|
||||
if (!lex.part_info)
|
||||
{
|
||||
@ -3405,7 +3422,7 @@ bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
|
||||
DBUG_PRINT("info", ("Parse: %s", part_buf));
|
||||
if (MYSQLparse((void*)thd) || thd->is_fatal_error)
|
||||
{
|
||||
free_items(thd->free_list);
|
||||
thd->free_items();
|
||||
goto end;
|
||||
}
|
||||
/*
|
||||
@ -3463,23 +3480,16 @@ bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
|
||||
just to ensure we don't get into strange situations with the
|
||||
item objects.
|
||||
*/
|
||||
free_items(thd->free_list);
|
||||
thd->free_items();
|
||||
part_info= thd->work_part_info;
|
||||
thd->free_list= NULL;
|
||||
table->s->version= 0UL;
|
||||
}
|
||||
}
|
||||
table->part_info= part_info;
|
||||
table->file->set_part_info(part_info);
|
||||
if (part_info->default_engine_type == NULL)
|
||||
{
|
||||
if (!part_info->default_engine_type)
|
||||
part_info->default_engine_type= default_db_type;
|
||||
}
|
||||
else
|
||||
{
|
||||
DBUG_ASSERT(part_info->default_engine_type == default_db_type);
|
||||
}
|
||||
part_info->item_free_list= thd->free_list;
|
||||
DBUG_ASSERT(part_info->default_engine_type == default_db_type);
|
||||
|
||||
{
|
||||
/*
|
||||
@ -3501,8 +3511,7 @@ bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
|
||||
!((subpart_func_string= thd->alloc(subpart_func_len)))))
|
||||
{
|
||||
mem_alloc_error(part_func_len);
|
||||
free_items(thd->free_list);
|
||||
part_info->item_free_list= 0;
|
||||
thd->free_items();
|
||||
goto end;
|
||||
}
|
||||
if (part_func_len)
|
||||
@ -3517,7 +3526,6 @@ bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
|
||||
result= FALSE;
|
||||
end:
|
||||
lex_end(thd->lex);
|
||||
thd->free_list= thd_free_list;
|
||||
thd->lex= old_lex;
|
||||
thd->variables.character_set_client= old_character_set_client;
|
||||
DBUG_RETURN(result);
|
||||
@ -4392,6 +4400,13 @@ state of p1.
|
||||
my_error(ER_REORG_HASH_ONLY_ON_SAME_NO, MYF(0));
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
if (tab_part_info->is_sub_partitioned() &&
|
||||
alt_part_info->no_subparts &&
|
||||
alt_part_info->no_subparts != tab_part_info->no_subparts)
|
||||
{
|
||||
my_error(ER_PARTITION_WRONG_NO_SUBPART_ERROR, MYF(0));
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
check_total_partitions= tab_part_info->no_parts + no_parts_new;
|
||||
check_total_partitions-= no_parts_reorged;
|
||||
if (check_total_partitions > MAX_PARTITIONS)
|
||||
|
@ -5149,7 +5149,7 @@ static my_bool run_hton_fill_schema_files(THD *thd, st_plugin_int *plugin,
|
||||
struct run_hton_fill_schema_files_args *args=
|
||||
(run_hton_fill_schema_files_args *) arg;
|
||||
handlerton *hton= (handlerton *)plugin->data;
|
||||
if(hton->fill_files_table)
|
||||
if(hton->fill_files_table && hton->state == SHOW_OPTION_YES)
|
||||
hton->fill_files_table(thd, args->tables, args->cond);
|
||||
return false;
|
||||
}
|
||||
|
@ -780,10 +780,10 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
|
||||
ev_alter_on_schedule_completion opt_ev_rename_to opt_ev_sql_stmt
|
||||
|
||||
%type <ulong_num>
|
||||
ulong_num merge_insert_types
|
||||
ulong_num real_ulong_num merge_insert_types
|
||||
|
||||
%type <ulonglong_number>
|
||||
ulonglong_num size_number
|
||||
ulonglong_num real_ulonglong_num size_number
|
||||
|
||||
%type <p_elem_value>
|
||||
part_bit_expr
|
||||
@ -3125,7 +3125,7 @@ opt_ts_redo_buffer_size:
|
||||
};
|
||||
|
||||
opt_ts_nodegroup:
|
||||
NODEGROUP_SYM opt_equal ulong_num
|
||||
NODEGROUP_SYM opt_equal real_ulong_num
|
||||
{
|
||||
LEX *lex= Lex;
|
||||
if (lex->alter_tablespace_info->nodegroup_id != UNDEF_NODEGROUP)
|
||||
@ -3184,7 +3184,7 @@ ts_wait:
|
||||
};
|
||||
|
||||
size_number:
|
||||
ulong_num { $$= $1;}
|
||||
real_ulong_num { $$= $1;}
|
||||
| IDENT
|
||||
{
|
||||
ulonglong number, test_number;
|
||||
@ -3423,7 +3423,7 @@ sub_part_func:
|
||||
|
||||
opt_no_parts:
|
||||
/* empty */ {}
|
||||
| PARTITIONS_SYM ulong_num
|
||||
| PARTITIONS_SYM real_ulong_num
|
||||
{
|
||||
uint no_parts= $2;
|
||||
LEX *lex= Lex;
|
||||
@ -3487,7 +3487,7 @@ part_func_expr:
|
||||
|
||||
opt_no_subparts:
|
||||
/* empty */ {}
|
||||
| SUBPARTITIONS_SYM ulong_num
|
||||
| SUBPARTITIONS_SYM real_ulong_num
|
||||
{
|
||||
uint no_parts= $2;
|
||||
LEX *lex= Lex;
|
||||
@ -3827,11 +3827,11 @@ opt_part_option:
|
||||
lex->part_info->curr_part_elem->engine_type= $4;
|
||||
lex->part_info->default_engine_type= $4;
|
||||
}
|
||||
| NODEGROUP_SYM opt_equal ulong_num
|
||||
| NODEGROUP_SYM opt_equal real_ulong_num
|
||||
{ Lex->part_info->curr_part_elem->nodegroup_id= $3; }
|
||||
| MAX_ROWS opt_equal ulonglong_num
|
||||
| MAX_ROWS opt_equal real_ulonglong_num
|
||||
{ Lex->part_info->curr_part_elem->part_max_rows= $3; }
|
||||
| MIN_ROWS opt_equal ulonglong_num
|
||||
| MIN_ROWS opt_equal real_ulonglong_num
|
||||
{ Lex->part_info->curr_part_elem->part_min_rows= $3; }
|
||||
| DATA_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys
|
||||
{ Lex->part_info->curr_part_elem->data_file_name= $4.str; }
|
||||
@ -4961,7 +4961,7 @@ alter_commands:
|
||||
lex->check_opt.init();
|
||||
}
|
||||
opt_mi_repair_type
|
||||
| COALESCE PARTITION_SYM opt_no_write_to_binlog ulong_num
|
||||
| COALESCE PARTITION_SYM opt_no_write_to_binlog real_ulong_num
|
||||
{
|
||||
LEX *lex= Lex;
|
||||
lex->alter_info.flags|= ALTER_COALESCE_PARTITION;
|
||||
@ -5009,7 +5009,7 @@ add_part_extra:
|
||||
LEX *lex= Lex;
|
||||
lex->part_info->no_parts= lex->part_info->partitions.elements;
|
||||
}
|
||||
| PARTITIONS_SYM ulong_num
|
||||
| PARTITIONS_SYM real_ulong_num
|
||||
{
|
||||
LEX *lex= Lex;
|
||||
lex->part_info->no_parts= $2;
|
||||
@ -7548,7 +7548,15 @@ ulong_num:
|
||||
| ULONGLONG_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); }
|
||||
| DECIMAL_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); }
|
||||
| FLOAT_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); }
|
||||
;
|
||||
;
|
||||
|
||||
real_ulong_num:
|
||||
NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); }
|
||||
| HEX_NUM { $$= (ulong) strtol($1.str, (char**) 0, 16); }
|
||||
| LONG_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); }
|
||||
| ULONGLONG_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); }
|
||||
| dec_num_error { YYABORT; }
|
||||
;
|
||||
|
||||
ulonglong_num:
|
||||
NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); }
|
||||
@ -7558,6 +7566,23 @@ ulonglong_num:
|
||||
| FLOAT_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); }
|
||||
;
|
||||
|
||||
real_ulonglong_num:
|
||||
NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); }
|
||||
| ULONGLONG_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); }
|
||||
| LONG_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); }
|
||||
| dec_num_error { YYABORT; }
|
||||
;
|
||||
|
||||
dec_num_error:
|
||||
dec_num
|
||||
{ yyerror(ER(ER_ONLY_INTEGERS_ALLOWED)); }
|
||||
;
|
||||
|
||||
dec_num:
|
||||
DECIMAL_NUM
|
||||
| FLOAT_NUM
|
||||
;
|
||||
|
||||
procedure_clause:
|
||||
/* empty */
|
||||
| PROCEDURE ident /* Procedure name */
|
||||
|
25
sql/table.cc
25
sql/table.cc
@ -1471,11 +1471,23 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
if (share->partition_info_len)
|
||||
{
|
||||
MEM_ROOT **root_ptr, *old_root;
|
||||
/*
|
||||
In this execution we must avoid calling thd->change_item_tree since
|
||||
we might release memory before statement is completed. We do this
|
||||
by changing to a new statement arena. As part of this arena we also
|
||||
set the memory root to be the memory root of the table since we
|
||||
call the parser and fix_fields which both can allocate memory for
|
||||
item objects. We keep the arena to ensure that we can release the
|
||||
free_list when closing the table object.
|
||||
SEE Bug #21658
|
||||
*/
|
||||
|
||||
Query_arena *backup_stmt_arena_ptr= thd->stmt_arena;
|
||||
Query_arena backup_arena;
|
||||
Query_arena part_func_arena(&outparam->mem_root, Query_arena::INITIALIZED);
|
||||
thd->set_n_backup_active_arena(&part_func_arena, &backup_arena);
|
||||
thd->stmt_arena= &part_func_arena;
|
||||
bool tmp;
|
||||
root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC);
|
||||
old_root= *root_ptr;
|
||||
*root_ptr= &outparam->mem_root;
|
||||
|
||||
tmp= mysql_unpack_partition(thd, share->partition_info,
|
||||
share->partition_info_len,
|
||||
@ -1487,7 +1499,10 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
|
||||
DBUG_PRINT("info", ("autopartitioned: %u", share->auto_partitioned));
|
||||
if (!tmp)
|
||||
tmp= fix_partition_func(thd, outparam, is_create_table);
|
||||
*root_ptr= old_root;
|
||||
thd->stmt_arena= backup_stmt_arena_ptr;
|
||||
thd->restore_active_arena(&part_func_arena, &backup_arena);
|
||||
if (!tmp)
|
||||
outparam->part_info->item_free_list= part_func_arena.free_list;
|
||||
if (tmp)
|
||||
{
|
||||
if (is_create_table)
|
||||
|
Loading…
x
Reference in New Issue
Block a user