Patch for push of wl1354 Partitioning

This commit is contained in:
unknown 2005-07-18 13:31:02 +02:00
parent 22545f4777
commit cd483c5520
99 changed files with 14672 additions and 594 deletions

View File

@ -128,8 +128,9 @@ AC_DEFUN([MYSQL_CHECK_NDBCLUSTER], [
ndb_mgmclient_libs=
case "$ndbcluster" in
yes )
AC_MSG_RESULT([Using NDB Cluster])
AC_MSG_RESULT([Using NDB Cluster and Partitioning])
AC_DEFINE([HAVE_NDBCLUSTER_DB], [1], [Using Ndb Cluster DB])
AC_DEFINE([HAVE_PARTITION_DB], [1], [Builds Partition DB])
have_ndbcluster="yes"
ndbcluster_includes="-I\$(top_builddir)/storage/ndb/include -I\$(top_builddir)/storage/ndb/include/ndbapi"
ndbcluster_libs="\$(top_builddir)/storage/ndb/src/.libs/libndbclient.a"

View File

@ -0,0 +1,30 @@
dnl ---------------------------------------------------------------------------
dnl Macro: MYSQL_CHECK_PARTITIONDB
dnl Sets HAVE_PARTITION_DB if --with-partition is used
dnl ---------------------------------------------------------------------------
AC_DEFUN([MYSQL_CHECK_PARTITIONDB], [
AC_ARG_WITH([partition],
[
--with-partition
Enable the Partition Storage Engine],
[partitiondb="$withval"],
[partitiondb=no])
AC_MSG_CHECKING([for partition])
case "$partitiondb" in
yes )
AC_DEFINE([HAVE_PARTITION_DB], [1], [Builds Partition DB])
AC_MSG_RESULT([yes])
[partitiondb=yes]
;;
* )
AC_MSG_RESULT([no])
[partitiondb=no]
;;
esac
])
dnl ---------------------------------------------------------------------------
dnl END OF MYSQL_CHECK_PARTITION SECTION
dnl ---------------------------------------------------------------------------

View File

@ -2437,6 +2437,7 @@ MYSQL_CHECK_CSVDB
MYSQL_CHECK_BLACKHOLEDB
MYSQL_CHECK_NDBCLUSTER
MYSQL_CHECK_FEDERATED
MYSQL_CHECK_PARTITIONDB
# If we have threads generate some library functions and test programs
sql_server_dirs=

View File

@ -388,6 +388,7 @@ enum data_file_type {
#define EQ_RANGE 32
#define NULL_RANGE 64
#define GEOM_FLAG 128
#define SKIP_RANGE 256
typedef struct st_key_range
{

View File

@ -101,6 +101,94 @@
#define unlikely(x) __builtin_expect((x),0)
/*
The macros below are useful in optimising places where it has been
discovered that cache misses stall the process and where a prefetch
of the cache line can improve matters. This is available in GCC 3.1.1
and later versions.
PREFETCH_READ says that addr is going to be used for reading and that
it is to be kept in caches if possible for a while
PREFETCH_WRITE also says that the item to be cached is likely to be
updated.
The *LOCALITY scripts are also available for experimentation purposes
mostly and should only be used if they are verified to improve matters.
For more input see GCC manual (available in GCC 3.1.1 and later)
*/
#if (__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR > 10)
#define PREFETCH_READ(addr) __builtin_prefetch(addr, 0, 3)
#define PREFETCH_WRITE(addr) \
__builtin_prefetch(addr, 1, 3)
#define PREFETCH_READ_LOCALITY(addr, locality) \
__builtin_prefetch(addr, 0, locality)
#define PREFETCH_WRITE_LOCALITY(addr, locality) \
__builtin_prefetch(addr, 1, locality)
#else
#define PREFETCH_READ(addr)
#define PREFETCH_READ_LOCALITY(addr, locality)
#define PREFETCH_WRITE(addr)
#define PREFETCH_WRITE_LOCALITY(addr, locality)
#endif
/*
The following macro is used to ensure that code often used in most
SQL statements and definitely for parts of the SQL processing are
kept in a code segment by itself. This has the advantage that the
risk of common code being overlapping in caches of the CPU is less.
This can be a cause of big performance problems.
Routines should be put in this category with care and when they are
put there one should also strive to make as much of the error handling
as possible (or uncommon code of the routine) to execute in a
separate method to avoid moving to much code to this code segment.
It is very easy to use, simply add HOT_METHOD at the end of the
function declaration.
For more input see GCC manual (available in GCC 2.95 and later)
*/
#if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR > 94)
#define HOT_METHOD \
__attribute__ ((section ("hot_code_section")))
#else
#define HOT_METHOD
#endif
/*
The following macro is used to ensure that popular global variables
are located next to each other to avoid that they contend for the
same cache lines.
It is very easy to use, simply add HOT_DATA at the end of the declaration
of the variable, the variable must be initialised because of the way
that linker works so a declaration using HOT_DATA should look like:
uint global_hot_data HOT_DATA = 0;
For more input see GCC manual (available in GCC 2.95 and later)
*/
#if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR > 94)
#define HOT_DATA \
__attribute__ ((section ("hot_data_section")))
#else
#define HOT_DATA
#endif
/*
The following macros are used to control inlining a bit more than
usual. These macros are used to ensure that inlining always or
never occurs (independent of compilation mode).
For more input see GCC manual (available in GCC 3.1.1 and later)
*/
#if (__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR > 10)
#define ALWAYS_INLINE __attribute__ ((always_inline))
#define NEVER_INLINE __attribute__ ((noinline))
#else
#define ALWAYS_INLINE
#define NEVER_INLINE
#endif
/* Fix problem with S_ISLNK() on Linux */
#if defined(TARGET_OS_LINUX)
#undef _GNU_SOURCE

View File

@ -88,6 +88,8 @@ enum enum_server_command
#define GROUP_FLAG 32768 /* Intern: Group field */
#define UNIQUE_FLAG 65536 /* Intern: Used by sql_yacc */
#define BINCMP_FLAG 131072 /* Intern: Used by sql_yacc */
#define GET_FIXED_FIELDS_FLAG (1 << 18) /* Used to get fields in item tree */
#define FIELD_IN_PART_FUNC_FLAG (1 << 19)/* Field part of partition func */
#define REFRESH_GRANT 1 /* Refresh grant tables */
#define REFRESH_LOG 2 /* Start on new log file */

View File

@ -41,6 +41,9 @@ typedef struct st_queue {
#define queue_element(queue,index) ((queue)->root[index+1])
#define queue_end(queue) ((queue)->root[(queue)->elements])
#define queue_replaced(queue) _downheap(queue,1)
#define queue_set_cmp_arg(queue, set_arg) (queue)->first_cmp_arg= set_arg
#define queue_set_max_at_top(queue, set_arg) \
(queue)->max_at_top= set_arg ? (-1 ^ 1) : 0
typedef int (*queue_compare)(void *,byte *, byte *);
int init_queue(QUEUE *queue,uint max_elements,uint offset_to_key,

View File

@ -0,0 +1,4 @@
-- require r/have_partition.require
disable_query_log;
show variables like "have_partition_engine";
enable_query_log;

View File

@ -0,0 +1,2 @@
Variable_name Value
have_partition_engine YES

View File

@ -0,0 +1,71 @@
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (a int, b int, c int, d int, PRIMARY KEY(a,b,c))
ENGINE = NDB
PARTITION BY KEY (a,b);
insert into t1 values (1,1,1,1);
select * from t1;
a b c d
1 1 1 1
update t1 set d = 2 where a = 1 and b = 1 and c = 1;
select * from t1;
a b c d
1 1 1 2
delete from t1;
select * from t1;
a b c d
drop table t1;
CREATE TABLE t1 (a int, b int, c int, d int, PRIMARY KEY(a,b))
ENGINE = NDB
PARTITION BY KEY (c);
ERROR HY000: A PRIMARY KEY need to include all fields in the partition function
CREATE TABLE t1 (a int, b int, c int, PRIMARY KEY(a,b))
ENGINE = NDB
PARTITION BY KEY (a);
insert into t1 values
(1,1,3),(1,2,3),(1,3,3),(1,4,3),(1,5,3),(1,6,3),
(1,7,3),(1,8,3),(1,9,3),(1,10,3),(1,11,3),(1,12,3);
select * from t1 order by b;
a b c
1 1 3
1 2 3
1 3 3
1 4 3
1 5 3
1 6 3
1 7 3
1 8 3
1 9 3
1 10 3
1 11 3
1 12 3
DROP TABLE t1;
CREATE TABLE t1 (a INT, b CHAR(10) COLLATE latin1_bin, c INT, d INT,
PRIMARY KEY USING HASH (a,b,c))
ENGINE=NDB
DEFAULT CHARSET=latin1
PARTITION BY KEY (b);
insert into t1 values (1,"a",1,1),(2,"a",1,1),(3,"a",1,1);
-- t1 --
Fragment type: 5
K Value: 6
Min load factor: 78
Max load factor: 80
Temporary table: no
Number of attributes: 4
Number of primary keys: 3
Length of frm data: 301
TableStatus: Retrieved
-- Attributes --
a Int PRIMARY KEY
b Char(10;latin1_bin) PRIMARY KEY DISTRIBUTION KEY
c Int PRIMARY KEY
d Int NULL
-- Indexes --
PRIMARY KEY(a, b, c) - UniqueHashIndex
NDBT_ProgramExit: 0 - OK
DROP TABLE t1;

View File

@ -0,0 +1,105 @@
drop table if exists t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b),
index (a))
engine = ndb
partition by range (a)
partitions 3
(partition x1 values less than (5),
partition x2 values less than (10),
partition x3 values less than (20));
INSERT into t1 values (1, 1, 1);
INSERT into t1 values (6, 1, 1);
INSERT into t1 values (10, 1, 1);
INSERT into t1 values (15, 1, 1);
select * from t1 order by a;
a b c
1 1 1
6 1 1
10 1 1
15 1 1
select * from t1 where a=1 order by a;
a b c
1 1 1
select * from t1 where a=15 and b=1 order by a;
a b c
15 1 1
select * from t1 where a=21 and b=1 order by a;
a b c
select * from t1 where a=21 order by a;
a b c
select * from t1 where a in (1,6,10,21) order by a;
a b c
1 1 1
6 1 1
10 1 1
select * from t1 where b=1 and a in (1,6,10,21) order by a;
a b c
1 1 1
6 1 1
10 1 1
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(b),
unique (a))
engine = ndb
partition by range (b)
partitions 3
(partition x1 values less than (5),
partition x2 values less than (10),
partition x3 values less than (20));
INSERT into t1 values (1, 1, 1);
INSERT into t1 values (2, 6, 1);
INSERT into t1 values (3, 10, 1);
INSERT into t1 values (4, 15, 1);
select * from t1 order by a;
a b c
1 1 1
2 6 1
3 10 1
4 15 1
UPDATE t1 set a = 5 WHERE b = 15;
select * from t1 order by a;
a b c
1 1 1
2 6 1
3 10 1
5 15 1
UPDATE t1 set a = 6 WHERE a = 5;
select * from t1 order by a;
a b c
1 1 1
2 6 1
3 10 1
6 15 1
select * from t1 where b=1 order by b;
a b c
1 1 1
select * from t1 where b=15 and a=1 order by b;
a b c
select * from t1 where b=21 and a=1 order by b;
a b c
select * from t1 where b=21 order by b;
a b c
select * from t1 where b in (1,6,10,21) order by b;
a b c
1 1 1
2 6 1
3 10 1
select * from t1 where a in (1,2,5,6) order by b;
a b c
1 1 1
2 6 1
6 15 1
select * from t1 where a=1 and b in (1,6,10,21) order by b;
a b c
1 1 1
DELETE from t1 WHERE b = 6;
DELETE from t1 WHERE a = 6;
drop table t1;

View File

@ -0,0 +1,355 @@
drop table if exists t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a);
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a, b);
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a)
partitions 3
(partition x1, partition x2, partition x3);
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a)
partitions 3
(partition x1 nodegroup 0,
partition x2 nodegroup 1,
partition x3 nodegroup 2);
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a)
partitions 3
(partition x1 engine myisam,
partition x2 engine myisam,
partition x3 engine myisam);
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a)
partitions 3
(partition x1 tablespace ts1,
partition x2 tablespace ts2,
partition x3 tablespace ts3);
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a)
partitions 3
(partition x1 values in (1,2,9,4) tablespace ts1,
partition x2 values in (3, 11, 5, 7) tablespace ts2,
partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (b*a)
partitions 3
(partition x1 values in (1,2,9,4) tablespace ts1,
partition x2 values in (3, 11, 5, 7) tablespace ts2,
partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (b*a)
(partition x1 values in (1) tablespace ts1,
partition x2 values in (3, 11, 5, 7) tablespace ts2,
partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
drop table t1;
partition by list (a)
partitions 3
(partition x1 values in (1,2,9,4) tablespace ts1,
partition x2 values in (3, 11, 5, 7) tablespace ts2,
partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
ERROR 42000: Partitioning can not be used stand-alone in query near 'partition by list (a)
partitions 3
(partition x1 values in (1,2,9,4) tablespace ' at line 1
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a)
partitions 2;
ERROR HY000: For LIST partitions each partition must be defined
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (sin(a))
partitions 3
(partition x1 values in (1,2,9,4) tablespace ts1,
partition x2 values in (3, 11, 5, 7) tablespace ts2,
partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
ERROR HY000: The PARTITION function returns the wrong type
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a+2)
partitions 3
(partition x1 tablespace ts1,
partition x2 tablespace ts2,
partition x3 tablespace ts3);
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '+2)
partitions 3
(partition x1 tablespace ts1,
partition x2 tablespace ts2,
part' at line 6
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a)
partitions 3
(partition tablespace ts1,
partition x2 tablespace ts2,
partition x3 tablespace ts3);
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'ts1,
partition x2 tablespace ts2,
partition x3 tablespace ts3)' at line 8
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a,d)
partitions 3
(partition x1 tablespace ts1,
partition x2 tablespace ts2,
partition x3 tablespace ts3);
ERROR HY000: Field in list of fields for partition function not found in table
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by hash (a + d)
partitions 3
(partition x1 tablespace ts1,
partition x2 tablespace ts2,
partition x3 tablespace ts3);
ERROR 42S22: Unknown column 'd' in 'partition function'
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by hash (sin(a))
partitions 3
(partition x1 tablespace ts1,
partition x2 tablespace ts2,
partition x3 tablespace ts3);
ERROR HY000: The PARTITION function returns the wrong type
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a)
partitions 3
(partition x1, partition x2);
ERROR 42000: Wrong number of partitions defined, mismatch with previous setting near ')' at line 8
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by hash (rand(a))
partitions 2
(partition x1, partition x2);
ERROR 42000: Constant/Random expression in (sub)partitioning function is not allowed near ')
partitions 2
(partition x1, partition x2)' at line 6
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (rand(a))
partitions 2
(partition x1 values less than (0), partition x2 values less than (2));
ERROR 42000: Constant/Random expression in (sub)partitioning function is not allowed near ')
partitions 2
(partition x1 values less than (0), partition x2 values less than' at line 6
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (rand(a))
partitions 2
(partition x1 values in (1), partition x2 values in (2));
ERROR 42000: Constant/Random expression in (sub)partitioning function is not allowed near ')
partitions 2
(partition x1 values in (1), partition x2 values in (2))' at line 6
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by hash (a)
partitions 2
(partition x1 values less than (4),
partition x2 values less than (5));
ERROR HY000: Only RANGE PARTITIONING can use VALUES LESS THAN in partition definition
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by hash (a)
partitions 2
(partition x1 values in (4),
partition x2 values in (5));
ERROR HY000: Only LIST PARTITIONING can use VALUES IN in partition definition
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by hash (a)
partitions 2
(partition x1 values in (4,6),
partition x2 values in (5,7));
ERROR HY000: Only LIST PARTITIONING can use VALUES IN in partition definition
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by key (b);
ERROR HY000: It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by key (a, b);
ERROR HY000: It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by hash (a+b);
ERROR HY000: It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by key (b);
ERROR HY000: It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by key (a, b);
ERROR HY000: It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by hash (a+b);
ERROR HY000: It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by hash (rand(a+b));
ERROR 42000: Constant/Random expression in (sub)partitioning function is not allowed near ')' at line 7
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by hash (sin(a+b))
(partition x1 (subpartition x11, subpartition x12),
partition x2 (subpartition x21, subpartition x22));
ERROR HY000: It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by key (a+b)
(partition x1 values less than (1) (subpartition x11, subpartition x12),
partition x2 values less than (2) (subpartition x21, subpartition x22));
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '+b)
(partition x1 values less than (1) (subpartition x11, subpartition x12),
par' at line 7
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by key (a,d)
(partition x1 values less than (1) (subpartition x11, subpartition x12),
partition x2 values less than (2) (subpartition x21, subpartition x22));
ERROR HY000: Field in list of fields for partition function not found in table
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by hash (3+4);
ERROR HY000: It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by hash (a+d)
(partition x1 values less than (1) (subpartition x11, subpartition x12),
partition x2 values less than (2) (subpartition x21, subpartition x22));
ERROR 42S22: Unknown column 'd' in 'partition function'

View File

@ -0,0 +1,66 @@
drop table if exists t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by hash (a + 2)
partitions 3
(partition x1 tablespace ts1,
partition x2 tablespace ts2,
partition x3 tablespace ts3);
insert into t1 values (1,1,1);
insert into t1 values (2,1,1);
insert into t1 values (3,1,1);
insert into t1 values (4,1,1);
insert into t1 values (5,1,1);
select * from t1;
a b c
1 1 1
4 1 1
2 1 1
5 1 1
3 1 1
update t1 set c=3 where b=1;
select * from t1;
a b c
1 1 3
4 1 3
2 1 3
5 1 3
3 1 3
select b from t1 where a=3;
b
1
select b,c from t1 where a=1 AND b=1;
b c
1 3
delete from t1 where a=1;
delete from t1 where c=3;
select * from t1;
a b c
ALTER TABLE t1
partition by hash (a + 3)
partitions 3
(partition x1 tablespace ts1,
partition x2 tablespace ts2,
partition x3 tablespace ts3);
select * from t1;
a b c
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by hash (a)
(partition x1);
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a)
(partition x1);
drop table t1;

View File

@ -0,0 +1,342 @@
drop table if exists t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null)
partition by list(a)
partitions 2
(partition x123 values in (1,5,6),
partition x234 values in (4,7,8));
INSERT into t1 VALUES (1,1,1);
INSERT into t1 VALUES (2,1,1);
ERROR HY000: Got error 1 from storage engine
INSERT into t1 VALUES (3,1,1);
ERROR HY000: Got error 1 from storage engine
INSERT into t1 VALUES (4,1,1);
INSERT into t1 VALUES (5,1,1);
INSERT into t1 VALUES (6,1,1);
INSERT into t1 VALUES (7,1,1);
INSERT into t1 VALUES (8,1,1);
INSERT into t1 VALUES (9,1,1);
ERROR HY000: Got error 1 from storage engine
INSERT into t1 VALUES (1,2,1);
INSERT into t1 VALUES (1,3,1);
INSERT into t1 VALUES (1,4,1);
INSERT into t1 VALUES (7,2,1);
INSERT into t1 VALUES (7,3,1);
INSERT into t1 VALUES (7,4,1);
SELECT * from t1;
a b c
1 1 1
5 1 1
6 1 1
1 2 1
1 3 1
1 4 1
4 1 1
7 1 1
8 1 1
7 2 1
7 3 1
7 4 1
SELECT * from t1 WHERE a=1;
a b c
1 1 1
1 2 1
1 3 1
1 4 1
SELECT * from t1 WHERE a=7;
a b c
7 1 1
7 2 1
7 3 1
7 4 1
SELECT * from t1 WHERE b=2;
a b c
1 2 1
7 2 1
UPDATE t1 SET a=8 WHERE a=7 AND b=3;
SELECT * from t1;
a b c
1 1 1
5 1 1
6 1 1
1 2 1
1 3 1
1 4 1
4 1 1
7 1 1
8 1 1
7 2 1
8 3 1
7 4 1
UPDATE t1 SET a=8 WHERE a=5 AND b=1;
SELECT * from t1;
a b c
1 1 1
6 1 1
1 2 1
1 3 1
1 4 1
4 1 1
7 1 1
8 1 1
7 2 1
8 3 1
7 4 1
8 1 1
DELETE from t1 WHERE a=8;
SELECT * from t1;
a b c
1 1 1
6 1 1
1 2 1
1 3 1
1 4 1
4 1 1
7 1 1
7 2 1
7 4 1
DELETE from t1 WHERE a=2;
SELECT * from t1;
a b c
1 1 1
6 1 1
1 2 1
1 3 1
1 4 1
4 1 1
7 1 1
7 2 1
7 4 1
DELETE from t1 WHERE a=5 OR a=6;
SELECT * from t1;
a b c
1 1 1
1 2 1
1 3 1
1 4 1
4 1 1
7 1 1
7 2 1
7 4 1
ALTER TABLE t1
partition by list(a)
partitions 2
(partition x123 values in (1,5,6),
partition x234 values in (4,7,8));
SELECT * from t1;
a b c
1 1 1
1 2 1
1 3 1
1 4 1
4 1 1
7 1 1
7 2 1
7 4 1
INSERT into t1 VALUES (6,2,1);
INSERT into t1 VALUES (2,2,1);
ERROR HY000: Got error 1 from storage engine
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by list (a)
subpartition by hash (a+b)
( partition x1 values in (1,2,3)
( subpartition x11 nodegroup 0,
subpartition x12 nodegroup 1),
partition x2 values in (4,5,6)
( subpartition x21 nodegroup 0,
subpartition x22 nodegroup 1)
);
INSERT into t1 VALUES (1,1,1);
INSERT into t1 VALUES (4,1,1);
INSERT into t1 VALUES (7,1,1);
ERROR HY000: Got error 1 from storage engine
UPDATE t1 SET a=5 WHERE a=1;
SELECT * from t1;
a b c
5 1 1
4 1 1
UPDATE t1 SET a=6 WHERE a=4;
SELECT * from t1;
a b c
5 1 1
6 1 1
DELETE from t1 WHERE a=6;
SELECT * from t1;
a b c
5 1 1
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by list (a)
subpartition by hash (a+b)
subpartitions 3
( partition x1 values in (1,2,4)
( subpartition x11 nodegroup 0,
subpartition x12 nodegroup 1),
partition x2 values in (3,5,6)
( subpartition x21 nodegroup 0,
subpartition x22 nodegroup 1)
);
ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near '),
partition x2 values in (3,5,6)
( subpartition x21 nodegroup 0,
subpartition x' at line 11
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by list (a)
subpartition by hash (a+b)
( partition x1 values in (1)
( subpartition x11 nodegroup 0,
subpartition xextra,
subpartition x12 nodegroup 1),
partition x2 values in (2)
( subpartition x21 nodegroup 0,
subpartition x22 nodegroup 1)
);
ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near ')
)' at line 14
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by list (a+b)
( partition x1
( subpartition x11 engine myisam,
subpartition x12 engine myisam),
partition x2
( subpartition x21 engine myisam,
subpartition x22 engine myisam)
);
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'list (a+b)
( partition x1
( subpartition x11 engine myisam,
subpartition x12 eng' at line 7
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by list (a+b)
( partition x1
( subpartition x11 engine myisam values in (0),
subpartition x12 engine myisam values in (1)),
partition x2
( subpartition x21 engine myisam values in (0),
subpartition x22 engine myisam values in (1))
);
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'list (a+b)
( partition x1
( subpartition x11 engine myisam values in (0),
subpar' at line 7
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a)
(partition x1 values in (1,2,9,4) tablespace ts1);
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a);
ERROR HY000: For LIST partitions each partition must be defined
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (3+4)
partitions 2
(partition x1 values in (4) tablespace ts1,
partition x2 values in (8) tablespace ts2);
ERROR HY000: Constant/Random expression in (sub)partitioning function is not allowed
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a+d)
partitions 2
(partition x1 values in (4) tablespace ts1,
partition x2 values in (8) tablespace ts2);
ERROR 42S22: Unknown column 'd' in 'partition function'
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a)
partitions 2
(partition x1 values in (4),
partition x2);
ERROR HY000: LIST PARTITIONING requires definition of VALUES IN for each partition
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a)
partitions 2
(partition x1 values in (4),
partition x2 values less than (5));
ERROR HY000: Only RANGE PARTITIONING can use VALUES LESS THAN in partition definition
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a)
partitions 2
(partition x1 values in (4,6),
partition x2);
ERROR HY000: LIST PARTITIONING requires definition of VALUES IN for each partition
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a)
partitions 2
(partition x1 values in (4, 12+9),
partition x2 values in (3, 21));
ERROR HY000: Multiple definition of same constant in list partitioning
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a)
partitions 2
(partition x1 values in (4.0, 12+8),
partition x2 values in (3, 21));
ERROR HY000: VALUES IN value must be of same type as partition function
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a)
partitions 2
(partition x1 values in 4,
partition x2 values in (5));
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '4,
partition x2 values in (5))' at line 8

View File

@ -0,0 +1,733 @@
drop table if exists t1;
CREATE TABLE t1 (
a int not null,
b int not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 order by b;
a b
1 1
35 2
30 4
2 5
select * from t1 force index (b) where b > 0 order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b int unsigned not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b tinyint not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b tinyint unsigned not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b smallint not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b smallint unsigned not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b mediumint not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b mediumint unsigned not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b bigint unsigned not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b bigint not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b bigint not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b float not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b double not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b double unsigned not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b float unsigned not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b double precision not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b double precision unsigned not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b decimal not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b char(10) not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, '1');
INSERT into t1 values (2, '5');
INSERT into t1 values (30, '4');
INSERT into t1 values (35, '2');
select * from t1 force index (b) where b > 0 order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b varchar(10) not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, '1');
INSERT into t1 values (2, '5');
INSERT into t1 values (30, '4');
INSERT into t1 values (35, '2');
select * from t1 force index (b) where b > '0' order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b varchar(10) not null,
primary key(a),
index (b(5)))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, '1');
INSERT into t1 values (2, '5');
INSERT into t1 values (30, '4');
INSERT into t1 values (35, '2');
select * from t1 force index (b) where b > '0' order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b varchar(10) binary not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, '1');
INSERT into t1 values (2, '5');
INSERT into t1 values (30, '4');
INSERT into t1 values (35, '2');
select * from t1 force index (b) where b > '0' order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b tinytext not null,
primary key(a),
index (b(10)))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, '1');
INSERT into t1 values (2, '5');
INSERT into t1 values (30, '4');
INSERT into t1 values (35, '2');
select * from t1 force index (b) where b > '0' order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b text not null,
primary key(a),
index (b(10)))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, '1');
INSERT into t1 values (2, '5');
INSERT into t1 values (30, '4');
INSERT into t1 values (35, '2');
select * from t1 force index (b) where b > '0' order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b mediumtext not null,
primary key(a),
index (b(10)))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, '1');
INSERT into t1 values (2, '5');
INSERT into t1 values (30, '4');
INSERT into t1 values (35, '2');
select * from t1 force index (b) where b > '0' order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b longtext not null,
primary key(a),
index (b(10)))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, '1');
INSERT into t1 values (2, '5');
INSERT into t1 values (30, '4');
INSERT into t1 values (35, '2');
select * from t1 force index (b) where b > '0' order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b enum('1','2', '4', '5') not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, '1');
INSERT into t1 values (2, '5');
INSERT into t1 values (30, '4');
INSERT into t1 values (35, '2');
select * from t1 force index (b) where b >= '1' order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b set('1','2', '4', '5') not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, '1');
INSERT into t1 values (2, '5');
INSERT into t1 values (30, '4');
INSERT into t1 values (35, '2');
select * from t1 force index (b) where b >= '1' order by b;
a b
1 1
35 2
30 4
2 5
drop table t1;
CREATE TABLE t1 (
a int not null,
b date not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, '2001-01-01');
INSERT into t1 values (2, '2005-01-01');
INSERT into t1 values (30, '2004-01-01');
INSERT into t1 values (35, '2002-01-01');
select * from t1 force index (b) where b > '2000-01-01' order by b;
a b
1 2001-01-01
35 2002-01-01
30 2004-01-01
2 2005-01-01
drop table t1;
CREATE TABLE t1 (
a int not null,
b datetime not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, '2001-01-01 00:00:00');
INSERT into t1 values (2, '2005-01-01 00:00:00');
INSERT into t1 values (30, '2004-01-01 00:00:00');
INSERT into t1 values (35, '2002-01-01 00:00:00');
select * from t1 force index (b) where b > '2000-01-01 00:00:00' order by b;
a b
1 2001-01-01 00:00:00
35 2002-01-01 00:00:00
30 2004-01-01 00:00:00
2 2005-01-01 00:00:00
drop table t1;
CREATE TABLE t1 (
a int not null,
b timestamp not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, '2001-01-01 00:00:00');
INSERT into t1 values (2, '2005-01-01 00:00:00');
INSERT into t1 values (30, '2004-01-01 00:00:00');
INSERT into t1 values (35, '2002-01-01 00:00:00');
select * from t1 force index (b) where b > '2000-01-01 00:00:00' order by b;
a b
1 2001-01-01 00:00:00
35 2002-01-01 00:00:00
30 2004-01-01 00:00:00
2 2005-01-01 00:00:00
drop table t1;
CREATE TABLE t1 (
a int not null,
b time not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, '01:00:00');
INSERT into t1 values (2, '05:00:00');
INSERT into t1 values (30, '04:00:00');
INSERT into t1 values (35, '02:00:00');
select * from t1 force index (b) where b > '00:00:00' order by b;
a b
1 01:00:00
35 02:00:00
30 04:00:00
2 05:00:00
drop table t1;
CREATE TABLE t1 (
a int not null,
b year not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, 2001);
INSERT into t1 values (2, 2005);
INSERT into t1 values (30, 2004);
INSERT into t1 values (35, 2002);
select * from t1 force index (b) where b > 2000 order by b;
a b
1 2001
35 2002
30 2004
2 2005
drop table t1;
CREATE TABLE t1 (
a int not null,
b bit(5) not null,
c int,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, b'00001', NULL);
INSERT into t1 values (2, b'00101', 2);
INSERT into t1 values (30, b'00100', 2);
INSERT into t1 values (35, b'00010', NULL);
select a from t1 force index (b) where b > b'00000' order by b;
a
1
35
30
2
drop table t1;
CREATE TABLE t1 (
a int not null,
b bit(15) not null,
c int,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, b'000000000000001', NULL);
INSERT into t1 values (2, b'001010000000101', 2);
INSERT into t1 values (30, b'001000000000100', 2);
INSERT into t1 values (35, b'000100000000010', NULL);
select a from t1 force index (b) where b > b'000000000000000' order by b;
a
1
35
30
2
drop table t1;
CREATE TABLE t1 (
a int not null,
b int,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
INSERT into t1 values (1, 1);
INSERT into t1 values (5, NULL);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
INSERT into t1 values (40, NULL);
select * from t1 force index (b) where b < 10 OR b IS NULL order by b;
a b
5 NULL
40 NULL
1 1
35 2
30 4
2 5
drop table t1;

View File

@ -0,0 +1,455 @@
drop table if exists t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a)
partitions 3
(partition x1 values less than (5) tablespace ts1,
partition x2 values less than (10) tablespace ts2,
partition x3 values less than maxvalue tablespace ts3);
INSERT into t1 values (1, 1, 1);
INSERT into t1 values (6, 1, 1);
INSERT into t1 values (10, 1, 1);
INSERT into t1 values (15, 1, 1);
select * from t1;
a b c
1 1 1
6 1 1
10 1 1
15 1 1
ALTER TABLE t1
partition by range (a)
partitions 3
(partition x1 values less than (5) tablespace ts1,
partition x2 values less than (10) tablespace ts2,
partition x3 values less than maxvalue tablespace ts3);
select * from t1;
a b c
1 1 1
6 1 1
10 1 1
15 1 1
drop table if exists t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null)
partition by range (a)
partitions 3
(partition x1 values less than (5) tablespace ts1,
partition x2 values less than (10) tablespace ts2,
partition x3 values less than maxvalue tablespace ts3);
INSERT into t1 values (1, 1, 1);
INSERT into t1 values (6, 1, 1);
INSERT into t1 values (10, 1, 1);
INSERT into t1 values (15, 1, 1);
select * from t1;
a b c
1 1 1
6 1 1
10 1 1
15 1 1
ALTER TABLE t1
partition by range (a)
partitions 3
(partition x1 values less than (5) tablespace ts1,
partition x2 values less than (10) tablespace ts2,
partition x3 values less than maxvalue tablespace ts3);
select * from t1;
a b c
1 1 1
6 1 1
10 1 1
15 1 1
drop table if exists t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a)
partitions 3
(partition x1 values less than (5) tablespace ts1,
partition x2 values less than (10) tablespace ts2,
partition x3 values less than (15) tablespace ts3);
INSERT into t1 values (1, 1, 1);
INSERT into t1 values (6, 1, 1);
INSERT into t1 values (10, 1, 1);
INSERT into t1 values (15, 1, 1);
ERROR HY000: Got error 1 from storage engine
select * from t1;
a b c
1 1 1
6 1 1
10 1 1
ALTER TABLE t1
partition by range (a)
partitions 3
(partition x1 values less than (5) tablespace ts1,
partition x2 values less than (10) tablespace ts2,
partition x3 values less than (15) tablespace ts3);
select * from t1;
a b c
1 1 1
6 1 1
10 1 1
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a)
(partition x1 values less than (1));
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a);
ERROR HY000: For RANGE partitions each partition must be defined
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a+d)
partitions 2
(partition x1 values less than (4) tablespace ts1,
partition x2 values less than (8) tablespace ts2);
ERROR 42S22: Unknown column 'd' in 'partition function'
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a)
partitions 2
(partition x1 values less than (4.0) tablespace ts1,
partition x2 values less than (8) tablespace ts2);
ERROR HY000: VALUES LESS THAN value must be of same type as partition function
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (3+4)
partitions 2
(partition x1 values less than (4) tablespace ts1,
partition x2 values less than (8) tablespace ts2);
ERROR HY000: Constant/Random expression in (sub)partitioning function is not allowed
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a)
partitions 2
(partition x1 values less than (4),
partition x2);
ERROR HY000: RANGE PARTITIONING requires definition of VALUES LESS THAN for each partition
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a)
partitions 2
(partition x1 values in (4),
partition x2);
ERROR HY000: Only LIST PARTITIONING can use VALUES IN in partition definition
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a)
partitions 2
(partition x1 values in (4),
partition x2 values less than (5));
ERROR HY000: Only LIST PARTITIONING can use VALUES IN in partition definition
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a)
partitions 2
(partition x1 values less than 4,
partition x2 values less than (5));
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '4,
partition x2 values less than (5))' at line 8
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a)
partitions 2
(partition x1 values less than maxvalue,
partition x2 values less than (5));
ERROR 42000: MAXVALUE can only be used in last partition definition near '))' at line 9
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a)
partitions 2
(partition x1 values less than maxvalue,
partition x2 values less than maxvalue);
ERROR 42000: MAXVALUE can only be used in last partition definition near 'maxvalue)' at line 9
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a)
partitions 2
(partition x1 values less than (4),
partition x2 values less than (3));
ERROR HY000: VALUES LESS THAN value must be strictly increasing for each partition
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (sin(a))
partitions 2
(partition x1 values less than (4),
partition x2 values less than (5));
ERROR HY000: The PARTITION function returns the wrong type
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11,
subpartition x12),
partition x2 values less than (5)
( subpartition x21,
subpartition x22)
);
SELECT * from t1;
a b c
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 tablespace t1 engine myisam nodegroup 0,
subpartition x12 tablespace t2 engine myisam nodegroup 1),
partition x2 values less than (5)
( subpartition x21 tablespace t1 engine myisam nodegroup 0,
subpartition x22 tablespace t2 engine myisam nodegroup 1)
);
SELECT * from t1;
a b c
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 tablespace t1 nodegroup 0,
subpartition x12 tablespace t2 nodegroup 1),
partition x2 values less than (5)
( subpartition x21 tablespace t1 nodegroup 0,
subpartition x22 tablespace t2 nodegroup 1)
);
SELECT * from t1;
a b c
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 engine myisam nodegroup 0,
subpartition x12 engine myisam nodegroup 1),
partition x2 values less than (5)
( subpartition x21 engine myisam nodegroup 0,
subpartition x22 engine myisam nodegroup 1)
);
INSERT into t1 VALUES (1,1,1);
INSERT into t1 VALUES (4,1,1);
INSERT into t1 VALUES (5,1,1);
ERROR HY000: Got error 1 from storage engine
SELECT * from t1;
a b c
1 1 1
4 1 1
ALTER TABLE t1
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 engine myisam nodegroup 0,
subpartition x12 engine myisam nodegroup 1),
partition x2 values less than (5)
( subpartition x21 engine myisam nodegroup 0,
subpartition x22 engine myisam nodegroup 1)
);
SELECT * from t1;
a b c
1 1 1
4 1 1
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 tablespace t1 engine myisam,
subpartition x12 tablespace t2 engine myisam),
partition x2 values less than (5)
( subpartition x21 tablespace t1 engine myisam,
subpartition x22 tablespace t2 engine myisam)
);
INSERT into t1 VALUES (1,1,1);
INSERT into t1 VALUES (4,1,1);
INSERT into t1 VALUES (5,1,1);
ERROR HY000: Got error 1 from storage engine
SELECT * from t1;
a b c
1 1 1
4 1 1
ALTER TABLE t1
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 tablespace t1 engine myisam,
subpartition x12 tablespace t2 engine myisam),
partition x2 values less than (5)
( subpartition x21 tablespace t1 engine myisam,
subpartition x22 tablespace t2 engine myisam)
);
SELECT * from t1;
a b c
1 1 1
4 1 1
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 tablespace t1,
subpartition x12 tablespace t2),
partition x2 values less than (5)
( subpartition x21 tablespace t1,
subpartition x22 tablespace t2)
);
INSERT into t1 VALUES (1,1,1);
INSERT into t1 VALUES (4,1,1);
INSERT into t1 VALUES (5,1,1);
ERROR HY000: Got error 1 from storage engine
SELECT * from t1;
a b c
1 1 1
4 1 1
ALTER TABLE t1
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 tablespace t1 engine myisam,
subpartition x12 tablespace t2 engine myisam),
partition x2 values less than (5)
( subpartition x21 tablespace t1 engine myisam,
subpartition x22 tablespace t2 engine myisam)
);
SELECT * from t1;
a b c
1 1 1
4 1 1
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 engine myisam,
subpartition x12 engine myisam),
partition x2 values less than (5)
( subpartition x21 engine myisam,
subpartition x22 engine myisam)
);
INSERT into t1 VALUES (1,1,1);
INSERT into t1 VALUES (4,1,1);
INSERT into t1 VALUES (5,1,1);
ERROR HY000: Got error 1 from storage engine
SELECT * from t1;
a b c
1 1 1
4 1 1
ALTER TABLE t1
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 engine myisam,
subpartition x12 engine myisam),
partition x2 values less than (5)
( subpartition x21 engine myisam,
subpartition x22 engine myisam)
);
SELECT * from t1;
a b c
1 1 1
4 1 1
drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a+b)
subpartition by key (a)
( partition x1
( subpartition x11 engine myisam,
subpartition x12 engine myisam),
partition x2
( subpartition x21 engine myisam,
subpartition x22 engine myisam)
);
ERROR HY000: RANGE PARTITIONING requires definition of VALUES LESS THAN for each partition
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by range (a+b)
( partition x1
( subpartition x11 engine myisam values less than (0),
subpartition x12 engine myisam values less than (1)),
partition x2
( subpartition x21 engine myisam values less than (0),
subpartition x22 engine myisam values less than (1))
);
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'range (a+b)
( partition x1
( subpartition x11 engine myisam values less than (0)' at line 7

View File

@ -0,0 +1,58 @@
-- source include/have_ndb.inc
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
#
# Basic syntax test
#
# Support for partition key verified
CREATE TABLE t1 (a int, b int, c int, d int, PRIMARY KEY(a,b,c))
ENGINE = NDB
PARTITION BY KEY (a,b);
insert into t1 values (1,1,1,1);
select * from t1;
update t1 set d = 2 where a = 1 and b = 1 and c = 1;
select * from t1;
delete from t1;
select * from t1;
drop table t1;
# only support for partition key on primary key
--error 1453
CREATE TABLE t1 (a int, b int, c int, d int, PRIMARY KEY(a,b))
ENGINE = NDB
PARTITION BY KEY (c);
CREATE TABLE t1 (a int, b int, c int, PRIMARY KEY(a,b))
ENGINE = NDB
PARTITION BY KEY (a);
insert into t1 values
(1,1,3),(1,2,3),(1,3,3),(1,4,3),(1,5,3),(1,6,3),
(1,7,3),(1,8,3),(1,9,3),(1,10,3),(1,11,3),(1,12,3);
select * from t1 order by b;
DROP TABLE t1;
#
# Test partition and char support
#
CREATE TABLE t1 (a INT, b CHAR(10) COLLATE latin1_bin, c INT, d INT,
PRIMARY KEY USING HASH (a,b,c))
ENGINE=NDB
DEFAULT CHARSET=latin1
PARTITION BY KEY (b);
insert into t1 values (1,"a",1,1),(2,"a",1,1),(3,"a",1,1);
# should show only one attribute with DISTRIBUTION KEY
--exec $NDB_TOOLS_DIR/ndb_desc --no-defaults -d test t1 | sed 's/Version: [0-9]*//'
DROP TABLE t1;

View File

@ -0,0 +1,86 @@
-- source include/have_ndb.inc
#--disable_abort_on_error
#
# Simple test for the partition storage engine
# Focuses on range partitioning tests
#
#-- source include/have_partition.inc
--disable_warnings
drop table if exists t1;
--enable_warnings
#
# Partition by range, basic
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b),
index (a))
engine = ndb
partition by range (a)
partitions 3
(partition x1 values less than (5),
partition x2 values less than (10),
partition x3 values less than (20));
# Simple insert and verify test
INSERT into t1 values (1, 1, 1);
INSERT into t1 values (6, 1, 1);
INSERT into t1 values (10, 1, 1);
INSERT into t1 values (15, 1, 1);
select * from t1 order by a;
select * from t1 where a=1 order by a;
select * from t1 where a=15 and b=1 order by a;
select * from t1 where a=21 and b=1 order by a;
select * from t1 where a=21 order by a;
select * from t1 where a in (1,6,10,21) order by a;
select * from t1 where b=1 and a in (1,6,10,21) order by a;
drop table t1;
#
# Partition by range, basic
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(b),
unique (a))
engine = ndb
partition by range (b)
partitions 3
(partition x1 values less than (5),
partition x2 values less than (10),
partition x3 values less than (20));
# Simple insert and verify test
INSERT into t1 values (1, 1, 1);
INSERT into t1 values (2, 6, 1);
INSERT into t1 values (3, 10, 1);
INSERT into t1 values (4, 15, 1);
select * from t1 order by a;
UPDATE t1 set a = 5 WHERE b = 15;
select * from t1 order by a;
UPDATE t1 set a = 6 WHERE a = 5;
select * from t1 order by a;
select * from t1 where b=1 order by b;
select * from t1 where b=15 and a=1 order by b;
select * from t1 where b=21 and a=1 order by b;
select * from t1 where b=21 order by b;
select * from t1 where b in (1,6,10,21) order by b;
select * from t1 where a in (1,2,5,6) order by b;
select * from t1 where a=1 and b in (1,6,10,21) order by b;
DELETE from t1 WHERE b = 6;
DELETE from t1 WHERE a = 6;
drop table t1;

494
mysql-test/t/partition.test Normal file
View File

@ -0,0 +1,494 @@
#--disable_abort_on_error
#
# Simple test for the partition storage engine
# Taken fromm the select test
#
-- source include/have_partition.inc
--disable_warnings
drop table if exists t1;
--enable_warnings
#
# Partition by key no partition defined => OK
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a);
drop table t1;
#
# Partition by key no partition, list of fields
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a, b);
drop table t1;
#
# Partition by key specified 3 partitions and defined 3 => ok
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a)
partitions 3
(partition x1, partition x2, partition x3);
drop table t1;
#
# Partition by key specifying nodegroup
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a)
partitions 3
(partition x1 nodegroup 0,
partition x2 nodegroup 1,
partition x3 nodegroup 2);
drop table t1;
#
# Partition by key specifying engine
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a)
partitions 3
(partition x1 engine myisam,
partition x2 engine myisam,
partition x3 engine myisam);
drop table t1;
#
# Partition by key specifying tablespace
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a)
partitions 3
(partition x1 tablespace ts1,
partition x2 tablespace ts2,
partition x3 tablespace ts3);
drop table t1;
#
# Partition by key list, basic
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a)
partitions 3
(partition x1 values in (1,2,9,4) tablespace ts1,
partition x2 values in (3, 11, 5, 7) tablespace ts2,
partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
drop table t1;
#
# Partition by key list, list function
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (b*a)
partitions 3
(partition x1 values in (1,2,9,4) tablespace ts1,
partition x2 values in (3, 11, 5, 7) tablespace ts2,
partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
drop table t1;
#
# Partition by key list, list function, no spec of #partitions
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (b*a)
(partition x1 values in (1) tablespace ts1,
partition x2 values in (3, 11, 5, 7) tablespace ts2,
partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
drop table t1;
#
# Partition by key stand-alone error
#
--error 1064
partition by list (a)
partitions 3
(partition x1 values in (1,2,9,4) tablespace ts1,
partition x2 values in (3, 11, 5, 7) tablespace ts2,
partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
#
# Partition by key list, number of partitions defined, no partition defined
#
--error 1441
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a)
partitions 2;
#
# Partition by key list, wrong result type
#
--error 1440
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (sin(a))
partitions 3
(partition x1 values in (1,2,9,4) tablespace ts1,
partition x2 values in (3, 11, 5, 7) tablespace ts2,
partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
#
# Partition by key, partition function not allowed
#
--error 1064
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a+2)
partitions 3
(partition x1 tablespace ts1,
partition x2 tablespace ts2,
partition x3 tablespace ts3);
#
# Partition by key, no partition name
#
--error 1064
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a)
partitions 3
(partition tablespace ts1,
partition x2 tablespace ts2,
partition x3 tablespace ts3);
#
# Partition by key, invalid field in field list
#
--error 1437
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a,d)
partitions 3
(partition x1 tablespace ts1,
partition x2 tablespace ts2,
partition x3 tablespace ts3);
#
# Partition by hash, invalid field in function
#
--error 1054
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by hash (a + d)
partitions 3
(partition x1 tablespace ts1,
partition x2 tablespace ts2,
partition x3 tablespace ts3);
#
# Partition by hash, invalid result type
#
--error 1440
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by hash (sin(a))
partitions 3
(partition x1 tablespace ts1,
partition x2 tablespace ts2,
partition x3 tablespace ts3);
#
# Partition by key specified 3 partitions but only defined 2 => error
#
--error 1064
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a)
partitions 3
(partition x1, partition x2);
#
# Partition by key specified 3 partitions but only defined 2 => error
#
--error 1064
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by hash (rand(a))
partitions 2
(partition x1, partition x2);
#
# Partition by key specified 3 partitions but only defined 2 => error
#
--error 1064
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (rand(a))
partitions 2
(partition x1 values less than (0), partition x2 values less than (2));
#
# Partition by key specified 3 partitions but only defined 2 => error
#
--error 1064
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (rand(a))
partitions 2
(partition x1 values in (1), partition x2 values in (2));
#
# Partition by hash, values less than error
#
--error 1430
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by hash (a)
partitions 2
(partition x1 values less than (4),
partition x2 values less than (5));
#
# Partition by hash, values in error
#
--error 1430
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by hash (a)
partitions 2
(partition x1 values in (4),
partition x2 values in (5));
#
# Partition by hash, values in error
#
--error 1430
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by hash (a)
partitions 2
(partition x1 values in (4,6),
partition x2 values in (5,7));
#
# Subpartition by key, no partitions defined, single field
#
--error 1449
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by key (b);
#
# Subpartition by key, no partitions defined, list of fields
#
--error 1449
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by key (a, b);
#
# Subpartition by hash, no partitions defined
#
--error 1449
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by hash (a+b);
#
# Subpartition by key, no partitions defined, single field
#
--error 1449
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by key (b);
#
# Subpartition by key, no partitions defined, list of fields
#
--error 1449
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by key (a, b);
#
# Subpartition by hash, no partitions defined
#
--error 1449
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by hash (a+b);
#
# Subpartition by hash, no partitions defined, wrong subpartition function
#
--error 1064
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by hash (rand(a+b));
#
# Subpartition by hash, wrong subpartition function
#
--error 1449
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by hash (sin(a+b))
(partition x1 (subpartition x11, subpartition x12),
partition x2 (subpartition x21, subpartition x22));
#
# Subpartition by hash, no partitions defined, wrong subpartition function
#
--error 1064
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by key (a+b)
(partition x1 values less than (1) (subpartition x11, subpartition x12),
partition x2 values less than (2) (subpartition x21, subpartition x22));
#
# Subpartition by hash, no partitions defined, wrong subpartition function
#
--error 1437
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by key (a,d)
(partition x1 values less than (1) (subpartition x11, subpartition x12),
partition x2 values less than (2) (subpartition x21, subpartition x22));
#
# Subpartition by hash, no partitions defined, wrong subpartition function
#
--error 1449
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by hash (3+4);
#
# Subpartition by hash, no partitions defined, wrong subpartition function
#
--error 1054
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by hash (a+d)
(partition x1 values less than (1) (subpartition x11, subpartition x12),
partition x2 values less than (2) (subpartition x21, subpartition x22));

View File

@ -0,0 +1,77 @@
#--disable_abort_on_error
#
# Simple test for the partition storage engine
# Taken fromm the select test
#
-- source include/have_partition.inc
--disable_warnings
drop table if exists t1;
--enable_warnings
#
# Partition by hash, basic
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by hash (a + 2)
partitions 3
(partition x1 tablespace ts1,
partition x2 tablespace ts2,
partition x3 tablespace ts3);
insert into t1 values (1,1,1);
insert into t1 values (2,1,1);
insert into t1 values (3,1,1);
insert into t1 values (4,1,1);
insert into t1 values (5,1,1);
select * from t1;
update t1 set c=3 where b=1;
select * from t1;
select b from t1 where a=3;
select b,c from t1 where a=1 AND b=1;
delete from t1 where a=1;
delete from t1 where c=3;
select * from t1;
ALTER TABLE t1
partition by hash (a + 3)
partitions 3
(partition x1 tablespace ts1,
partition x2 tablespace ts2,
partition x3 tablespace ts3);
select * from t1;
drop table t1;
#
# Partition by hash, only one partition
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by hash (a)
(partition x1);
drop table t1;
#
# Partition by key, only one partition
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by key (a)
(partition x1);
drop table t1;

View File

@ -0,0 +1,316 @@
#--disable_abort_on_error
#
# Simple test for the partition storage engine
# testing list partitioning
#
-- source include/have_partition.inc
--disable_warnings
drop table if exists t1;
--enable_warnings
#
# Test ordinary list partitioning that it works ok
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null)
partition by list(a)
partitions 2
(partition x123 values in (1,5,6),
partition x234 values in (4,7,8));
INSERT into t1 VALUES (1,1,1);
--error 1030
INSERT into t1 VALUES (2,1,1);
--error 1030
INSERT into t1 VALUES (3,1,1);
INSERT into t1 VALUES (4,1,1);
INSERT into t1 VALUES (5,1,1);
INSERT into t1 VALUES (6,1,1);
INSERT into t1 VALUES (7,1,1);
INSERT into t1 VALUES (8,1,1);
--error 1030
INSERT into t1 VALUES (9,1,1);
INSERT into t1 VALUES (1,2,1);
INSERT into t1 VALUES (1,3,1);
INSERT into t1 VALUES (1,4,1);
INSERT into t1 VALUES (7,2,1);
INSERT into t1 VALUES (7,3,1);
INSERT into t1 VALUES (7,4,1);
SELECT * from t1;
SELECT * from t1 WHERE a=1;
SELECT * from t1 WHERE a=7;
SELECT * from t1 WHERE b=2;
UPDATE t1 SET a=8 WHERE a=7 AND b=3;
SELECT * from t1;
UPDATE t1 SET a=8 WHERE a=5 AND b=1;
SELECT * from t1;
DELETE from t1 WHERE a=8;
SELECT * from t1;
DELETE from t1 WHERE a=2;
SELECT * from t1;
DELETE from t1 WHERE a=5 OR a=6;
SELECT * from t1;
ALTER TABLE t1
partition by list(a)
partitions 2
(partition x123 values in (1,5,6),
partition x234 values in (4,7,8));
SELECT * from t1;
INSERT into t1 VALUES (6,2,1);
--error 1030
INSERT into t1 VALUES (2,2,1);
drop table t1;
#
# Subpartition by hash, two partitions and two subpartitions
# Defined node group
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by list (a)
subpartition by hash (a+b)
( partition x1 values in (1,2,3)
( subpartition x11 nodegroup 0,
subpartition x12 nodegroup 1),
partition x2 values in (4,5,6)
( subpartition x21 nodegroup 0,
subpartition x22 nodegroup 1)
);
INSERT into t1 VALUES (1,1,1);
INSERT into t1 VALUES (4,1,1);
--error 1030
INSERT into t1 VALUES (7,1,1);
UPDATE t1 SET a=5 WHERE a=1;
SELECT * from t1;
UPDATE t1 SET a=6 WHERE a=4;
SELECT * from t1;
DELETE from t1 WHERE a=6;
SELECT * from t1;
drop table t1;
#
# Subpartition by hash, wrong number of subpartitions
#
--error 1064
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by list (a)
subpartition by hash (a+b)
subpartitions 3
( partition x1 values in (1,2,4)
( subpartition x11 nodegroup 0,
subpartition x12 nodegroup 1),
partition x2 values in (3,5,6)
( subpartition x21 nodegroup 0,
subpartition x22 nodegroup 1)
);
#
# Subpartition by hash, wrong number of subpartitions
#
--error 1064
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by list (a)
subpartition by hash (a+b)
( partition x1 values in (1)
( subpartition x11 nodegroup 0,
subpartition xextra,
subpartition x12 nodegroup 1),
partition x2 values in (2)
( subpartition x21 nodegroup 0,
subpartition x22 nodegroup 1)
);
#
# Subpartition by list => error
#
--error 1064
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by list (a+b)
( partition x1
( subpartition x11 engine myisam,
subpartition x12 engine myisam),
partition x2
( subpartition x21 engine myisam,
subpartition x22 engine myisam)
);
#
# Subpartition by list => error
#
--error 1064
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by list (a+b)
( partition x1
( subpartition x11 engine myisam values in (0),
subpartition x12 engine myisam values in (1)),
partition x2
( subpartition x21 engine myisam values in (0),
subpartition x22 engine myisam values in (1))
);
#
# Partition by list, only one partition => ok
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a)
(partition x1 values in (1,2,9,4) tablespace ts1);
drop table t1;
#
# Partition by list, no partition => error
#
--error 1441
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a);
#
# Partition by list, constant partition function not allowed
#
--error 1435
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (3+4)
partitions 2
(partition x1 values in (4) tablespace ts1,
partition x2 values in (8) tablespace ts2);
#
# Partition by list, invalid field in function
#
--error 1054
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a+d)
partitions 2
(partition x1 values in (4) tablespace ts1,
partition x2 values in (8) tablespace ts2);
#
# Partition by list, no values in definition
#
--error 1429
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a)
partitions 2
(partition x1 values in (4),
partition x2);
#
# Partition by list, values less than error
#
--error 1430
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a)
partitions 2
(partition x1 values in (4),
partition x2 values less than (5));
#
# Partition by list, no values in definition
#
--error 1429
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a)
partitions 2
(partition x1 values in (4,6),
partition x2);
#
# Partition by list, duplicate values
#
--error 1444
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a)
partitions 2
(partition x1 values in (4, 12+9),
partition x2 values in (3, 21));
#
# Partition by list, wrong constant result type (not INT)
#
--error 1443
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a)
partitions 2
(partition x1 values in (4.0, 12+8),
partition x2 values in (3, 21));
#
# Partition by list, missing parenthesis
#
--error 1064
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a)
partitions 2
(partition x1 values in 4,
partition x2 values in (5));

View File

@ -0,0 +1,828 @@
#--disable_abort_on_error
#
# Simple test for the partition storage engine
# Focuses on tests of ordered index read
#
-- source include/have_partition.inc
--disable_warnings
drop table if exists t1;
--enable_warnings
#
# Ordered index read, int type
#
CREATE TABLE t1 (
a int not null,
b int not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 order by b;
select * from t1 force index (b) where b > 0 order by b;
drop table t1;
#
# Ordered index read, unsigned int type
#
CREATE TABLE t1 (
a int not null,
b int unsigned not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
drop table t1;
#
# Ordered index read, tiny int type
#
CREATE TABLE t1 (
a int not null,
b tinyint not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
drop table t1;
#
# Ordered index read, unsigned tinyint type
#
CREATE TABLE t1 (
a int not null,
b tinyint unsigned not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
drop table t1;
#
# Ordered index read, smallint type
#
CREATE TABLE t1 (
a int not null,
b smallint not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
drop table t1;
#
# Ordered index read, unsigned smallint type
#
CREATE TABLE t1 (
a int not null,
b smallint unsigned not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
drop table t1;
#
# Ordered index read, mediumint type
#
CREATE TABLE t1 (
a int not null,
b mediumint not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
drop table t1;
#
# Ordered index read, unsigned int type
#
CREATE TABLE t1 (
a int not null,
b mediumint unsigned not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
drop table t1;
#
# Ordered index read, unsigned bigint type
#
CREATE TABLE t1 (
a int not null,
b bigint unsigned not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
drop table t1;
#
# Ordered index read, bigint type
#
CREATE TABLE t1 (
a int not null,
b bigint not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
drop table t1;
#
# Ordered index read, bigint type
#
CREATE TABLE t1 (
a int not null,
b bigint not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
drop table t1;
#
# Ordered index read, float type
#
CREATE TABLE t1 (
a int not null,
b float not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
drop table t1;
#
# Ordered index read, double type
#
CREATE TABLE t1 (
a int not null,
b double not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
drop table t1;
#
# Ordered index read, unsigned double type
#
CREATE TABLE t1 (
a int not null,
b double unsigned not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
drop table t1;
#
# Ordered index read, unsigned float type
#
CREATE TABLE t1 (
a int not null,
b float unsigned not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
drop table t1;
#
# Ordered index read, double precision type
#
CREATE TABLE t1 (
a int not null,
b double precision not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
drop table t1;
#
# Ordered index read, unsigned double precision type
#
CREATE TABLE t1 (
a int not null,
b double precision unsigned not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
drop table t1;
#
# Ordered index read, decimal type
#
CREATE TABLE t1 (
a int not null,
b decimal not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, 1);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
select * from t1 force index (b) where b > 0 order by b;
drop table t1;
#
# Ordered index read, char type
#
CREATE TABLE t1 (
a int not null,
b char(10) not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, '1');
INSERT into t1 values (2, '5');
INSERT into t1 values (30, '4');
INSERT into t1 values (35, '2');
select * from t1 force index (b) where b > 0 order by b;
drop table t1;
#
# Ordered index read, varchar type
#
CREATE TABLE t1 (
a int not null,
b varchar(10) not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, '1');
INSERT into t1 values (2, '5');
INSERT into t1 values (30, '4');
INSERT into t1 values (35, '2');
select * from t1 force index (b) where b > '0' order by b;
drop table t1;
#
# Ordered index read, varchar type limited index size
#
CREATE TABLE t1 (
a int not null,
b varchar(10) not null,
primary key(a),
index (b(5)))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, '1');
INSERT into t1 values (2, '5');
INSERT into t1 values (30, '4');
INSERT into t1 values (35, '2');
select * from t1 force index (b) where b > '0' order by b;
drop table t1;
#
# Ordered index read, varchar binary type
#
CREATE TABLE t1 (
a int not null,
b varchar(10) binary not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, '1');
INSERT into t1 values (2, '5');
INSERT into t1 values (30, '4');
INSERT into t1 values (35, '2');
select * from t1 force index (b) where b > '0' order by b;
drop table t1;
#
# Ordered index read, tinytext type
#
CREATE TABLE t1 (
a int not null,
b tinytext not null,
primary key(a),
index (b(10)))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, '1');
INSERT into t1 values (2, '5');
INSERT into t1 values (30, '4');
INSERT into t1 values (35, '2');
select * from t1 force index (b) where b > '0' order by b;
drop table t1;
#
# Ordered index read, text type
#
CREATE TABLE t1 (
a int not null,
b text not null,
primary key(a),
index (b(10)))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, '1');
INSERT into t1 values (2, '5');
INSERT into t1 values (30, '4');
INSERT into t1 values (35, '2');
select * from t1 force index (b) where b > '0' order by b;
drop table t1;
#
# Ordered index read, mediumtext type
#
CREATE TABLE t1 (
a int not null,
b mediumtext not null,
primary key(a),
index (b(10)))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, '1');
INSERT into t1 values (2, '5');
INSERT into t1 values (30, '4');
INSERT into t1 values (35, '2');
select * from t1 force index (b) where b > '0' order by b;
drop table t1;
#
# Ordered index read, longtext type
#
CREATE TABLE t1 (
a int not null,
b longtext not null,
primary key(a),
index (b(10)))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, '1');
INSERT into t1 values (2, '5');
INSERT into t1 values (30, '4');
INSERT into t1 values (35, '2');
select * from t1 force index (b) where b > '0' order by b;
drop table t1;
#
# Ordered index read, enum type
#
CREATE TABLE t1 (
a int not null,
b enum('1','2', '4', '5') not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, '1');
INSERT into t1 values (2, '5');
INSERT into t1 values (30, '4');
INSERT into t1 values (35, '2');
select * from t1 force index (b) where b >= '1' order by b;
drop table t1;
#
# Ordered index read, set type
#
CREATE TABLE t1 (
a int not null,
b set('1','2', '4', '5') not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, '1');
INSERT into t1 values (2, '5');
INSERT into t1 values (30, '4');
INSERT into t1 values (35, '2');
select * from t1 force index (b) where b >= '1' order by b;
drop table t1;
#
# Ordered index read, date type
#
CREATE TABLE t1 (
a int not null,
b date not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, '2001-01-01');
INSERT into t1 values (2, '2005-01-01');
INSERT into t1 values (30, '2004-01-01');
INSERT into t1 values (35, '2002-01-01');
select * from t1 force index (b) where b > '2000-01-01' order by b;
drop table t1;
#
# Ordered index read, datetime type
#
CREATE TABLE t1 (
a int not null,
b datetime not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, '2001-01-01 00:00:00');
INSERT into t1 values (2, '2005-01-01 00:00:00');
INSERT into t1 values (30, '2004-01-01 00:00:00');
INSERT into t1 values (35, '2002-01-01 00:00:00');
select * from t1 force index (b) where b > '2000-01-01 00:00:00' order by b;
drop table t1;
#
# Ordered index read, timestamp type
#
CREATE TABLE t1 (
a int not null,
b timestamp not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, '2001-01-01 00:00:00');
INSERT into t1 values (2, '2005-01-01 00:00:00');
INSERT into t1 values (30, '2004-01-01 00:00:00');
INSERT into t1 values (35, '2002-01-01 00:00:00');
select * from t1 force index (b) where b > '2000-01-01 00:00:00' order by b;
drop table t1;
#
# Ordered index read, time type
#
CREATE TABLE t1 (
a int not null,
b time not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, '01:00:00');
INSERT into t1 values (2, '05:00:00');
INSERT into t1 values (30, '04:00:00');
INSERT into t1 values (35, '02:00:00');
select * from t1 force index (b) where b > '00:00:00' order by b;
drop table t1;
#
# Ordered index read, year type
#
CREATE TABLE t1 (
a int not null,
b year not null,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, 2001);
INSERT into t1 values (2, 2005);
INSERT into t1 values (30, 2004);
INSERT into t1 values (35, 2002);
select * from t1 force index (b) where b > 2000 order by b;
drop table t1;
#
# Ordered index read, bit(5) type
#
CREATE TABLE t1 (
a int not null,
b bit(5) not null,
c int,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, b'00001', NULL);
INSERT into t1 values (2, b'00101', 2);
INSERT into t1 values (30, b'00100', 2);
INSERT into t1 values (35, b'00010', NULL);
select a from t1 force index (b) where b > b'00000' order by b;
drop table t1;
#
# Ordered index read, bit(15) type
#
CREATE TABLE t1 (
a int not null,
b bit(15) not null,
c int,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, b'000000000000001', NULL);
INSERT into t1 values (2, b'001010000000101', 2);
INSERT into t1 values (30, b'001000000000100', 2);
INSERT into t1 values (35, b'000100000000010', NULL);
select a from t1 force index (b) where b > b'000000000000000' order by b;
drop table t1;
#
# Ordered index read, NULL values
#
CREATE TABLE t1 (
a int not null,
b int,
primary key(a),
index (b))
partition by range (a)
partitions 2
(partition x1 values less than (25),
partition x2 values less than (100));
# Insert a couple of tuples
INSERT into t1 values (1, 1);
INSERT into t1 values (5, NULL);
INSERT into t1 values (2, 5);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
INSERT into t1 values (40, NULL);
select * from t1 force index (b) where b < 10 OR b IS NULL order by b;
drop table t1;

View File

@ -0,0 +1,560 @@
#--disable_abort_on_error
#
# Simple test for the partition storage engine
# Focuses on range partitioning tests
#
-- source include/have_partition.inc
--disable_warnings
drop table if exists t1;
--enable_warnings
#
# Partition by range, basic
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a)
partitions 3
(partition x1 values less than (5) tablespace ts1,
partition x2 values less than (10) tablespace ts2,
partition x3 values less than maxvalue tablespace ts3);
# Simple insert and verify test
INSERT into t1 values (1, 1, 1);
INSERT into t1 values (6, 1, 1);
INSERT into t1 values (10, 1, 1);
INSERT into t1 values (15, 1, 1);
select * from t1;
ALTER TABLE t1
partition by range (a)
partitions 3
(partition x1 values less than (5) tablespace ts1,
partition x2 values less than (10) tablespace ts2,
partition x3 values less than maxvalue tablespace ts3);
select * from t1;
drop table if exists t1;
#
# Partition by range, basic
# No primary key
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null)
partition by range (a)
partitions 3
(partition x1 values less than (5) tablespace ts1,
partition x2 values less than (10) tablespace ts2,
partition x3 values less than maxvalue tablespace ts3);
# Simple insert and verify test
INSERT into t1 values (1, 1, 1);
INSERT into t1 values (6, 1, 1);
INSERT into t1 values (10, 1, 1);
INSERT into t1 values (15, 1, 1);
select * from t1;
ALTER TABLE t1
partition by range (a)
partitions 3
(partition x1 values less than (5) tablespace ts1,
partition x2 values less than (10) tablespace ts2,
partition x3 values less than maxvalue tablespace ts3);
select * from t1;
drop table if exists t1;
#
# Partition by range, basic
# No max value used
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a)
partitions 3
(partition x1 values less than (5) tablespace ts1,
partition x2 values less than (10) tablespace ts2,
partition x3 values less than (15) tablespace ts3);
# Simple insert and verify test
INSERT into t1 values (1, 1, 1);
INSERT into t1 values (6, 1, 1);
INSERT into t1 values (10, 1, 1);
--error 1030
INSERT into t1 values (15, 1, 1);
select * from t1;
ALTER TABLE t1
partition by range (a)
partitions 3
(partition x1 values less than (5) tablespace ts1,
partition x2 values less than (10) tablespace ts2,
partition x3 values less than (15) tablespace ts3);
select * from t1;
drop table t1;
#
# Partition by range, only one partition
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a)
(partition x1 values less than (1));
drop table t1;
#
# Partition by range, no partition => error
#
--error 1441
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a);
#
# Partition by range, invalid field in function
#
--error 1054
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a+d)
partitions 2
(partition x1 values less than (4) tablespace ts1,
partition x2 values less than (8) tablespace ts2);
#
# Partition by range, inconsistent partition function and constants
#
--error 1443
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a)
partitions 2
(partition x1 values less than (4.0) tablespace ts1,
partition x2 values less than (8) tablespace ts2);
#
# Partition by range, constant partition function not allowed
#
--error 1435
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (3+4)
partitions 2
(partition x1 values less than (4) tablespace ts1,
partition x2 values less than (8) tablespace ts2);
#
# Partition by range, no values less than definition
#
--error 1429
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a)
partitions 2
(partition x1 values less than (4),
partition x2);
#
# Partition by range, no values in definition allowed
#
--error 1430
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a)
partitions 2
(partition x1 values in (4),
partition x2);
#
# Partition by range, values in error
#
--error 1430
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a)
partitions 2
(partition x1 values in (4),
partition x2 values less than (5));
#
# Partition by range, missing parenthesis
#
--error 1064
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by list (a)
partitions 2
(partition x1 values less than 4,
partition x2 values less than (5));
#
# Partition by range, maxvalue in wrong place
#
--error 1064
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a)
partitions 2
(partition x1 values less than maxvalue,
partition x2 values less than (5));
#
# Partition by range, maxvalue in several places
#
--error 1064
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a)
partitions 2
(partition x1 values less than maxvalue,
partition x2 values less than maxvalue);
#
# Partition by range, not increasing ranges
#
--error 1442
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (a)
partitions 2
(partition x1 values less than (4),
partition x2 values less than (3));
#
# Partition by range, wrong result type of partition function
#
--error 1440
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key(a,b))
partition by range (sin(a))
partitions 2
(partition x1 values less than (4),
partition x2 values less than (5));
#
# Subpartition by hash, two partitions and two subpartitions
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11,
subpartition x12),
partition x2 values less than (5)
( subpartition x21,
subpartition x22)
);
SELECT * from t1;
drop table t1;
#
# Subpartition by hash, two partitions and two subpartitions
# Defined tablespace, engine and node group
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 tablespace t1 engine myisam nodegroup 0,
subpartition x12 tablespace t2 engine myisam nodegroup 1),
partition x2 values less than (5)
( subpartition x21 tablespace t1 engine myisam nodegroup 0,
subpartition x22 tablespace t2 engine myisam nodegroup 1)
);
SELECT * from t1;
drop table t1;
#
# Subpartition by hash, two partitions and two subpartitions
# Defined tablespace, node group
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 tablespace t1 nodegroup 0,
subpartition x12 tablespace t2 nodegroup 1),
partition x2 values less than (5)
( subpartition x21 tablespace t1 nodegroup 0,
subpartition x22 tablespace t2 nodegroup 1)
);
SELECT * from t1;
drop table t1;
#
# Subpartition by hash, two partitions and two subpartitions
# Defined engine and node group
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 engine myisam nodegroup 0,
subpartition x12 engine myisam nodegroup 1),
partition x2 values less than (5)
( subpartition x21 engine myisam nodegroup 0,
subpartition x22 engine myisam nodegroup 1)
);
INSERT into t1 VALUES (1,1,1);
INSERT into t1 VALUES (4,1,1);
--error 1030
INSERT into t1 VALUES (5,1,1);
SELECT * from t1;
ALTER TABLE t1
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 engine myisam nodegroup 0,
subpartition x12 engine myisam nodegroup 1),
partition x2 values less than (5)
( subpartition x21 engine myisam nodegroup 0,
subpartition x22 engine myisam nodegroup 1)
);
SELECT * from t1;
drop table t1;
#
# Subpartition by hash, two partitions and two subpartitions
# Defined tablespace, engine
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 tablespace t1 engine myisam,
subpartition x12 tablespace t2 engine myisam),
partition x2 values less than (5)
( subpartition x21 tablespace t1 engine myisam,
subpartition x22 tablespace t2 engine myisam)
);
INSERT into t1 VALUES (1,1,1);
INSERT into t1 VALUES (4,1,1);
--error 1030
INSERT into t1 VALUES (5,1,1);
SELECT * from t1;
ALTER TABLE t1
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 tablespace t1 engine myisam,
subpartition x12 tablespace t2 engine myisam),
partition x2 values less than (5)
( subpartition x21 tablespace t1 engine myisam,
subpartition x22 tablespace t2 engine myisam)
);
SELECT * from t1;
drop table t1;
#
# Subpartition by hash, two partitions and two subpartitions
# Defined tablespace
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 tablespace t1,
subpartition x12 tablespace t2),
partition x2 values less than (5)
( subpartition x21 tablespace t1,
subpartition x22 tablespace t2)
);
INSERT into t1 VALUES (1,1,1);
INSERT into t1 VALUES (4,1,1);
--error 1030
INSERT into t1 VALUES (5,1,1);
SELECT * from t1;
ALTER TABLE t1
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 tablespace t1 engine myisam,
subpartition x12 tablespace t2 engine myisam),
partition x2 values less than (5)
( subpartition x21 tablespace t1 engine myisam,
subpartition x22 tablespace t2 engine myisam)
);
SELECT * from t1;
drop table t1;
#
# Subpartition by hash, two partitions and two subpartitions
# Defined engine
#
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 engine myisam,
subpartition x12 engine myisam),
partition x2 values less than (5)
( subpartition x21 engine myisam,
subpartition x22 engine myisam)
);
INSERT into t1 VALUES (1,1,1);
INSERT into t1 VALUES (4,1,1);
--error 1030
INSERT into t1 VALUES (5,1,1);
SELECT * from t1;
ALTER TABLE t1
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 engine myisam,
subpartition x12 engine myisam),
partition x2 values less than (5)
( subpartition x21 engine myisam,
subpartition x22 engine myisam)
);
SELECT * from t1;
drop table t1;
#
# Subpartition with range => error
#
--error 1429
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a+b)
subpartition by key (a)
( partition x1
( subpartition x11 engine myisam,
subpartition x12 engine myisam),
partition x2
( subpartition x21 engine myisam,
subpartition x22 engine myisam)
);
#
# Subpartition with range => error
#
--error 1064
CREATE TABLE t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by key (a)
subpartition by range (a+b)
( partition x1
( subpartition x11 engine myisam values less than (0),
subpartition x12 engine myisam values less than (1)),
partition x2
( subpartition x21 engine myisam values less than (0),
subpartition x22 engine myisam values less than (1))
);

View File

@ -84,6 +84,9 @@ FLAGS=$(DEFS) $(INCLUDES) $(CPPFLAGS) $(CFLAGS) @NOINST_LDFLAGS@
test_bitmap$(EXEEXT): my_bitmap.c $(LIBRARIES)
$(LINK) $(FLAGS) -DMAIN ./my_bitmap.c $(LDADD) $(LIBS)
test_priority_queue$(EXEEXT): queues.c $(LIBRARIES)
$(LINK) $(FLAGS) -DMAIN ./queues.c $(LDADD) $(LIBS)
test_thr_alarm$(EXEEXT): thr_alarm.c $(LIBRARIES)
$(CP) $(srcdir)/thr_alarm.c ./test_thr_alarm.c
$(LINK) $(FLAGS) -DMAIN ./test_thr_alarm.c $(LDADD) $(LIBS)

View File

@ -20,20 +20,20 @@
API limitations (or, rather asserted safety assumptions,
to encourage correct programming)
* the size of the used bitmap is less than ~(uint) 0
* it's a multiple of 8 (for efficiency reasons)
* when arguments are a bitmap and a bit number, the number
must be within bitmap size
* bitmap_set_prefix() is an exception - one can use ~0 to set all bits
* when both arguments are bitmaps, they must be of the same size
* bitmap_intersect() is an exception :)
(for for Bitmap::intersect(ulonglong map2buff))
If THREAD is defined all bitmap operations except bitmap_init/bitmap_free
are thread-safe.
* the internal size is a set of 32 bit words
* the number of bits specified in creation can be any number > 0
* there are THREAD safe versions of most calls called bitmap_lock_*
many of those are not used and not compiled normally but the code
already exist for them in an #ifdef:ed part. These can only be used
if THREAD was specified in bitmap_init
TODO:
Make assembler THREAD safe versions of these using test-and-set instructions
Original version created by Sergei Golubchik 2001 - 2004.
New version written and test program added and some changes to the interface
was made by Mikael Ronström 2005, with assistance of Tomas Ulin and Mats
Kindahl.
*/
#include "mysys_priv.h"
@ -1046,17 +1046,9 @@ int main()
}
/*
This is the way the test part was compiled after a complete tree build with
debug.
gcc -DHAVE_CONFIG_H -I. -I. -I.. -I../include -I. -g -O -DDBUG_ON
-DSAFE_MUTEX -fno-omit-frame-pointer -DHAVE_DARWIN_THREADS
-D_P1003_1B_VISIBLE -DSIGNAL_WITH_VIO_CLOSE -DTEST_BITMAP
-DSIGNALS_DONT_BREAK_READ -DIGNORE_SIGHUP_SIGQUIT -MT
my_bitmap.o -MD -MP -MF ".deps/my_bitmap.Tpo" -c -o my_bitmap.o my_bitmap.c
gcc -o my_bitmap my_bitmap.o -L../mysys -lmysys -L../strings -lmystrings
-L../dbug -ldbug
In directory mysys:
make test_bitmap
will build the bitmap tests and ./test_bitmap will execute it
*/
#endif

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2000 MySQL AB
/* Copyright (C) 2000, 2005 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -17,6 +17,10 @@
/*
Code for generell handling of priority Queues.
Implemention of queues from "Algoritms in C" by Robert Sedgewick.
An optimisation of _downheap suggested in Exercise 7.51 in "Data
Structures & Algorithms in C++" by Mark Allen Weiss, Second Edition
was implemented by Mikael Ronström 2005. Also the O(N) algorithm
of queue_fix was implemented.
*/
#include "mysys_priv.h"
@ -214,8 +218,64 @@ void queue_replaced(QUEUE *queue)
}
#endif
/* Fix heap when index have changed */
#ifndef OLD_VERSION
void _downheap(register QUEUE *queue, uint idx)
{
byte *element;
uint elements,half_queue,offset_to_key, next_index;
bool first= TRUE;
uint start_idx= idx;
offset_to_key=queue->offset_to_key;
element=queue->root[idx];
half_queue=(elements=queue->elements) >> 1;
while (idx <= half_queue)
{
int cmp;
next_index=idx+idx;
if (next_index < elements &&
(queue->compare(queue->first_cmp_arg,
queue->root[next_index]+offset_to_key,
queue->root[next_index+1]+offset_to_key) ^
queue->max_at_top) > 0)
next_index++;
if (first &&
(((cmp=queue->compare(queue->first_cmp_arg,
queue->root[next_index]+offset_to_key,
element+offset_to_key)) == 0) ||
((cmp ^ queue->max_at_top) > 0)))
{
queue->root[idx]= element;
return;
}
queue->root[idx]=queue->root[next_index];
idx=next_index;
first= FALSE;
}
next_index= idx >> 1;
while (next_index > start_idx)
{
if ((queue->compare(queue->first_cmp_arg,
queue->root[next_index]+offset_to_key,
element+offset_to_key) ^
queue->max_at_top) < 0)
break;
queue->root[idx]=queue->root[next_index];
idx=next_index;
next_index= idx >> 1;
}
queue->root[idx]=element;
}
#else
/*
The old _downheap version is kept for comparisons with the benchmark
suit or new benchmarks anyone wants to run for comparisons.
*/
/* Fix heap when index have changed */
void _downheap(register QUEUE *queue, uint idx)
{
byte *element;
@ -247,20 +307,336 @@ void _downheap(register QUEUE *queue, uint idx)
}
static int queue_fix_cmp(QUEUE *queue, void **a, void **b)
{
return queue->compare(queue->first_cmp_arg,
(byte*) (*a)+queue->offset_to_key,
(byte*) (*b)+queue->offset_to_key);
}
#endif
/*
Fix heap when every element was changed,
actually, it can be done better, in linear time, not in n*log(n)
Fix heap when every element was changed.
*/
void queue_fix(QUEUE *queue)
{
qsort2(queue->root+1,queue->elements, sizeof(void *),
(qsort2_cmp)queue_fix_cmp, queue);
uint i;
for (i= queue->elements >> 1; i > 0; i--)
_downheap(queue, i);
}
#ifdef MAIN
/*
A test program for the priority queue implementation.
It can also be used to benchmark changes of the implementation
Build by doing the following in the directory mysys
make test_priority_queue
./test_priority_queue
Written by Mikael Ronström, 2005
*/
static uint num_array[1025];
static uint tot_no_parts= 0;
static uint tot_no_loops= 0;
static uint expected_part= 0;
static uint expected_num= 0;
static bool max_ind= 0;
static bool fix_used= 0;
static ulonglong start_time= 0;
static bool is_divisible_by(uint num, uint divisor)
{
uint quotient= num / divisor;
if (quotient * divisor == num)
return TRUE;
return FALSE;
}
void calculate_next()
{
uint part= expected_part, num= expected_num;
uint no_parts= tot_no_parts;
if (max_ind)
{
do
{
while (++part <= no_parts)
{
if (is_divisible_by(num, part) &&
(num <= ((1 << 21) + part)))
{
expected_part= part;
expected_num= num;
return;
}
}
part= 0;
} while (--num);
}
else
{
do
{
while (--part > 0)
{
if (is_divisible_by(num, part))
{
expected_part= part;
expected_num= num;
return;
}
}
part= no_parts + 1;
} while (++num);
}
}
void calculate_end_next(uint part)
{
uint no_parts= tot_no_parts, num;
num_array[part]= 0;
if (max_ind)
{
expected_num= 0;
for (part= no_parts; part > 0 ; part--)
{
if (num_array[part])
{
num= num_array[part] & 0x3FFFFF;
if (num >= expected_num)
{
expected_num= num;
expected_part= part;
}
}
}
if (expected_num == 0)
expected_part= 0;
}
else
{
expected_num= 0xFFFFFFFF;
for (part= 1; part <= no_parts; part++)
{
if (num_array[part])
{
num= num_array[part] & 0x3FFFFF;
if (num <= expected_num)
{
expected_num= num;
expected_part= part;
}
}
}
if (expected_num == 0xFFFFFFFF)
expected_part= 0;
}
return;
}
static int test_compare(void *null_arg, byte *a, byte *b)
{
uint a_num= (*(uint*)a) & 0x3FFFFF;
uint b_num= (*(uint*)b) & 0x3FFFFF;
uint a_part, b_part;
if (a_num > b_num)
return +1;
if (a_num < b_num)
return -1;
a_part= (*(uint*)a) >> 22;
b_part= (*(uint*)b) >> 22;
if (a_part < b_part)
return +1;
if (a_part > b_part)
return -1;
return 0;
}
bool check_num(uint num_part)
{
uint part= num_part >> 22;
uint num= num_part & 0x3FFFFF;
if (part == expected_part)
if (num == expected_num)
return FALSE;
printf("Expect part %u Expect num 0x%x got part %u num 0x%x max_ind %u fix_used %u \n",
expected_part, expected_num, part, num, max_ind, fix_used);
return TRUE;
}
void perform_insert(QUEUE *queue)
{
uint i= 1, no_parts= tot_no_parts;
uint backward_start= 0;
expected_part= 1;
expected_num= 1;
if (max_ind)
backward_start= 1 << 21;
do
{
uint num= (i + backward_start);
if (max_ind)
{
while (!is_divisible_by(num, i))
num--;
if (max_ind && (num > expected_num ||
(num == expected_num && i < expected_part)))
{
expected_num= num;
expected_part= i;
}
}
num_array[i]= num + (i << 22);
if (fix_used)
queue_element(queue, i-1)= (byte*)&num_array[i];
else
queue_insert(queue, (byte*)&num_array[i]);
} while (++i <= no_parts);
if (fix_used)
{
queue->elements= no_parts;
queue_fix(queue);
}
}
bool perform_ins_del(QUEUE *queue, bool max_ind)
{
uint i= 0, no_loops= tot_no_loops, j= tot_no_parts;
do
{
uint num_part= *(uint*)queue_top(queue);
uint part= num_part >> 22;
if (check_num(num_part))
return TRUE;
if (j++ >= no_loops)
{
calculate_end_next(part);
queue_remove(queue, (uint) 0);
}
else
{
calculate_next();
if (max_ind)
num_array[part]-= part;
else
num_array[part]+= part;
queue_top(queue)= (byte*)&num_array[part];
queue_replaced(queue);
}
} while (++i < no_loops);
return FALSE;
}
bool do_test(uint no_parts, uint l_max_ind, bool l_fix_used)
{
QUEUE queue;
bool result;
max_ind= l_max_ind;
fix_used= l_fix_used;
init_queue(&queue, no_parts, 0, max_ind, test_compare, NULL);
tot_no_parts= no_parts;
tot_no_loops= 1024;
perform_insert(&queue);
if ((result= perform_ins_del(&queue, max_ind)))
delete_queue(&queue);
if (result)
{
printf("Error\n");
return TRUE;
}
return FALSE;
}
static void start_measurement()
{
start_time= my_getsystime();
}
static void stop_measurement()
{
ulonglong stop_time= my_getsystime();
uint time_in_micros;
stop_time-= start_time;
stop_time/= 10; /* Convert to microseconds */
time_in_micros= (uint)stop_time;
printf("Time expired is %u microseconds \n", time_in_micros);
}
static void benchmark_test()
{
QUEUE queue_real;
QUEUE *queue= &queue_real;
uint i, add;
fix_used= TRUE;
max_ind= FALSE;
tot_no_parts= 1024;
init_queue(queue, tot_no_parts, 0, max_ind, test_compare, NULL);
/*
First benchmark whether queue_fix is faster than using queue_insert
for sizes of 16 partitions.
*/
for (tot_no_parts= 2, add=2; tot_no_parts < 128;
tot_no_parts+= add, add++)
{
printf("Start benchmark queue_fix, tot_no_parts= %u \n", tot_no_parts);
start_measurement();
for (i= 0; i < 128; i++)
{
perform_insert(queue);
queue_remove_all(queue);
}
stop_measurement();
fix_used= FALSE;
printf("Start benchmark queue_insert\n");
start_measurement();
for (i= 0; i < 128; i++)
{
perform_insert(queue);
queue_remove_all(queue);
}
stop_measurement();
}
/*
Now benchmark insertion and deletion of 16400 elements.
Used in consecutive runs this shows whether the optimised _downheap
is faster than the standard implementation.
*/
printf("Start benchmarking _downheap \n");
start_measurement();
perform_insert(queue);
for (i= 0; i < 65536; i++)
{
uint num, part;
num= *(uint*)queue_top(queue);
num+= 16;
part= num >> 22;
num_array[part]= num;
queue_top(queue)= (byte*)&num_array[part];
queue_replaced(queue);
}
for (i= 0; i < 16; i++)
queue_remove(queue, (uint) 0);
queue_remove_all(queue);
stop_measurement();
}
int main()
{
int i, add= 1;
for (i= 1; i < 1024; i+=add, add++)
{
printf("Start test for priority queue of size %u\n", i);
if (do_test(i, 0, 1))
return -1;
if (do_test(i, 1, 1))
return -1;
if (do_test(i, 0, 0))
return -1;
if (do_test(i, 1, 0))
return -1;
}
benchmark_test();
printf("OK\n");
return 0;
}
#endif

View File

@ -63,7 +63,7 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
parse_file.h sql_view.h sql_trigger.h \
examples/ha_example.h examples/ha_archive.h \
examples/ha_tina.h ha_blackhole.h \
ha_federated.h
ha_federated.h ha_partition.h
mysqld_SOURCES = sql_lex.cc sql_handler.cc \
item.cc item_sum.cc item_buff.cc item_func.cc \
item_cmpfunc.cc item_strfunc.cc item_timefunc.cc \
@ -100,6 +100,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \
sp_cache.cc parse_file.cc sql_trigger.cc \
examples/ha_example.cc examples/ha_archive.cc \
examples/ha_tina.cc ha_blackhole.cc \
ha_partition.cc sql_partition.cc \
ha_federated.cc
gen_lex_hash_SOURCES = gen_lex_hash.cc

View File

@ -6311,7 +6311,8 @@ my_decimal *Field_varstring::val_decimal(my_decimal *decimal_value)
}
int Field_varstring::cmp(const char *a_ptr, const char *b_ptr)
int Field_varstring::cmp_max(const char *a_ptr, const char *b_ptr,
uint max_len)
{
uint a_length, b_length;
int diff;
@ -6326,6 +6327,8 @@ int Field_varstring::cmp(const char *a_ptr, const char *b_ptr)
a_length= uint2korr(a_ptr);
b_length= uint2korr(b_ptr);
}
set_if_smaller(a_length, max_len);
set_if_smaller(b_length, max_len);
diff= field_charset->coll->strnncollsp(field_charset,
(const uchar*) a_ptr+
length_bytes,
@ -6956,13 +6959,16 @@ int Field_blob::cmp(const char *a,uint32 a_length, const char *b,
}
int Field_blob::cmp(const char *a_ptr, const char *b_ptr)
int Field_blob::cmp_max(const char *a_ptr, const char *b_ptr,
uint max_length)
{
char *blob1,*blob2;
memcpy_fixed(&blob1,a_ptr+packlength,sizeof(char*));
memcpy_fixed(&blob2,b_ptr+packlength,sizeof(char*));
return Field_blob::cmp(blob1,get_length(a_ptr),
blob2,get_length(b_ptr));
uint a_len= get_length(a_ptr), b_len= get_length(b_ptr);
set_if_smaller(a_len, max_length);
set_if_smaller(b_len, max_length);
return Field_blob::cmp(blob1,a_len,blob2,b_len);
}
@ -7979,6 +7985,35 @@ my_decimal *Field_bit::val_decimal(my_decimal *deciaml_value)
}
/*
Compare two bit fields using pointers within the record.
SYNOPSIS
cmp_max()
a Pointer to field->ptr in first record
b Pointer to field->ptr in second record
max_len Maximum length used in index
DESCRIPTION
This method is used from key_rec_cmp used by merge sorts used
by partitioned index read and later other similar places.
The a and b pointer must be pointers to the field in a record
(not the table->record[0] necessarily)
*/
int Field_bit::cmp_max(const char *a, const char *b, uint max_len)
{
my_ptrdiff_t a_diff= a - ptr;
my_ptrdiff_t b_diff= b - ptr;
if (bit_len)
{
int flag;
uchar bits_a= get_rec_bits(bit_ptr+a_diff, bit_ofs, bit_len);
uchar bits_b= get_rec_bits(bit_ptr+b_diff, bit_ofs, bit_len);
if ((flag= (int) (bits_a - bits_b)))
return flag;
}
return memcmp(a, b, field_length);
}
int Field_bit::key_cmp(const byte *str, uint length)
{
if (bit_len)

View File

@ -87,7 +87,7 @@ public:
utype unireg_check;
uint32 field_length; // Length of field
uint field_index; // field number in fields array
uint16 flags;
uint32 flags;
/* fieldnr is the id of the field (first field = 1) as is also
used in key_part.
*/
@ -154,6 +154,8 @@ public:
virtual enum_field_types type() const =0;
virtual enum_field_types real_type() const { return type(); }
inline int cmp(const char *str) { return cmp(ptr,str); }
virtual int cmp_max(const char *a, const char *b, uint max_len)
{ return cmp(a, b); }
virtual int cmp(const char *,const char *)=0;
virtual int cmp_binary(const char *a,const char *b, uint32 max_length=~0L)
{ return memcmp(a,b,pack_length()); }
@ -1059,7 +1061,11 @@ public:
longlong val_int(void);
String *val_str(String*,String *);
my_decimal *val_decimal(my_decimal *);
int cmp(const char *,const char*);
int cmp_max(const char *, const char *, uint max_length);
int cmp(const char *a,const char*b)
{
return cmp_max(a, b, ~0);
}
void sort_string(char *buff,uint length);
void get_key_image(char *buff,uint length, imagetype type);
void set_key_image(char *buff,uint length);
@ -1115,7 +1121,9 @@ public:
longlong val_int(void);
String *val_str(String*,String *);
my_decimal *val_decimal(my_decimal *);
int cmp(const char *,const char*);
int cmp_max(const char *, const char *, uint max_length);
int cmp(const char *a,const char*b)
{ return cmp_max(a, b, ~0); }
int cmp(const char *a, uint32 a_length, const char *b, uint32 b_length);
int cmp_binary(const char *a,const char *b, uint32 max_length=~0L);
int key_cmp(const byte *,const byte*);
@ -1139,6 +1147,10 @@ public:
{
memcpy_fixed(str,ptr+packlength,sizeof(char*));
}
inline void get_ptr(char **str, uint row_offset)
{
memcpy_fixed(str,ptr+packlength+row_offset,sizeof(char*));
}
inline void set_ptr(char *length,char *data)
{
memcpy(ptr,length,packlength);
@ -1307,6 +1319,7 @@ public:
my_decimal *val_decimal(my_decimal *);
int cmp(const char *a, const char *b)
{ return cmp_binary(a, b); }
int cmp_max(const char *a, const char *b, uint max_length);
int key_cmp(const byte *a, const byte *b)
{ return cmp_binary((char *) a, (char *) b); }
int key_cmp(const byte *str, uint length);

View File

@ -1360,7 +1360,7 @@ int ha_berkeley::delete_row(const byte * record)
}
int ha_berkeley::index_init(uint keynr)
int ha_berkeley::index_init(uint keynr, bool sorted)
{
int error;
DBUG_ENTER("ha_berkeley::index_init");
@ -1638,7 +1638,7 @@ int ha_berkeley::rnd_init(bool scan)
{
DBUG_ENTER("rnd_init");
current_row.flags=DB_DBT_REALLOC;
DBUG_RETURN(index_init(primary_key));
DBUG_RETURN(index_init(primary_key, 0));
}
int ha_berkeley::rnd_end()
@ -2146,7 +2146,7 @@ ulonglong ha_berkeley::get_auto_increment()
(void) ha_berkeley::extra(HA_EXTRA_KEYREAD);
/* Set 'active_index' */
ha_berkeley::index_init(table->s->next_number_index);
ha_berkeley::index_init(table->s->next_number_index, 0);
if (!table->s->next_number_key_offset)
{ // Autoincrement at key-start
@ -2485,7 +2485,7 @@ void ha_berkeley::get_status()
if (!(share->status & STATUS_PRIMARY_KEY_INIT))
{
(void) extra(HA_EXTRA_KEYREAD);
index_init(primary_key);
index_init(primary_key, 0);
if (!index_last(table->record[1]))
share->auto_ident=uint5korr(current_ident);
index_end();

View File

@ -98,7 +98,7 @@ class ha_berkeley: public handler
const char **bas_ext() const;
ulong table_flags(void) const { return int_table_flags; }
uint max_supported_keys() const { return MAX_KEY-1; }
uint extra_rec_buf_length() { return BDB_HIDDEN_PRIMARY_KEY_LENGTH; }
uint extra_rec_buf_length() const { return BDB_HIDDEN_PRIMARY_KEY_LENGTH; }
ha_rows estimate_rows_upper_bound();
const key_map *keys_to_use_for_scanning() { return &key_map_full; }
bool has_transactions() { return 1;}
@ -109,7 +109,7 @@ class ha_berkeley: public handler
int write_row(byte * buf);
int update_row(const byte * old_data, byte * new_data);
int delete_row(const byte * buf);
int index_init(uint index);
int index_init(uint index, bool sorted);
int index_end();
int index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag);

View File

@ -1512,7 +1512,7 @@ int ha_federated::index_read_idx(byte *buf, uint index, const byte *key,
}
/* Initialized at each key walk (called multiple times unlike rnd_init()) */
int ha_federated::index_init(uint keynr)
int ha_federated::index_init(uint keynr, bool sorted)
{
DBUG_ENTER("ha_federated::index_init");
DBUG_PRINT("info",

View File

@ -154,7 +154,7 @@ public:
int write_row(byte * buf);
int update_row(const byte * old_data, byte * new_data);
int delete_row(const byte * buf);
int index_init(uint keynr);
int index_init(uint keynr, bool sorted);
int index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag);
int index_read_idx(byte * buf, uint idx, const byte * key,

View File

@ -3595,7 +3595,8 @@ int
ha_innobase::index_init(
/*====================*/
/* out: 0 or error number */
uint keynr) /* in: key (index) number */
uint keynr, /* in: key (index) number */
bool sorted) /* in: 1 if result MUST be sorted according to index */
{
int error = 0;
DBUG_ENTER("index_init");
@ -6646,7 +6647,7 @@ ha_innobase::innobase_read_and_init_auto_inc(
}
(void) extra(HA_EXTRA_KEYREAD);
index_init(table->s->next_number_index);
index_init(table->s->next_number_index, 1);
/* Starting from 5.0.9, we use a consistent read to read the auto-inc
column maximum value. This eliminates the spurious deadlocks caused

View File

@ -136,7 +136,7 @@ class ha_innobase: public handler
int delete_row(const byte * buf);
void unlock_row();
int index_init(uint index);
int index_init(uint index, bool sorted);
int index_end();
int index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag);

View File

@ -34,6 +34,7 @@
// options from from mysqld.cc
extern my_bool opt_ndb_optimized_node_selection;
extern my_bool opt_ndb_linear_hash;
extern const char *opt_ndbcluster_connectstring;
// Default value for parallelism
@ -99,6 +100,7 @@ static HASH ndbcluster_open_tables;
static byte *ndbcluster_get_key(NDB_SHARE *share,uint *length,
my_bool not_used __attribute__((unused)));
static void ndb_set_fragmentation(NDBTAB & tab, TABLE *table, uint pk_len);
static NDB_SHARE *get_share(const char *table_name);
static void free_share(NDB_SHARE *share);
@ -861,11 +863,9 @@ bool ha_ndbcluster::uses_blob_value()
{
uint no_fields= table->s->fields;
int i;
THD *thd= current_thd;
// They always put blobs at the end..
for (i= no_fields - 1; i >= 0; i--)
{
Field *field= table->field[i];
if ((m_write_op && ha_get_bit_in_write_set(i+1)) ||
(!m_write_op && ha_get_bit_in_read_set(i+1)))
{
@ -1292,8 +1292,6 @@ inline
int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
{
uint i;
THD *thd= current_thd;
DBUG_ENTER("define_read_attrs");
// Define attributes to read
@ -1333,7 +1331,8 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
Read one record from NDB using primary key
*/
int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf,
uint32 part_id)
{
uint no_fields= table->s->fields;
NdbConnection *trans= m_active_trans;
@ -1351,6 +1350,8 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
op->readTuple(lm) != 0)
ERR_RETURN(trans->getNdbError());
if (m_use_partition_function)
op->setPartitionId(part_id);
if (table->s->primary_key == MAX_KEY)
{
// This table has no primary key, use "hidden" primary key
@ -1388,12 +1389,12 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
Read one complementing record from NDB using primary key from old_data
*/
int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data,
uint32 old_part_id)
{
uint no_fields= table->s->fields, i;
NdbTransaction *trans= m_active_trans;
NdbOperation *op;
THD *thd= current_thd;
DBUG_ENTER("complemented_pk_read");
m_write_op= FALSE;
@ -1411,6 +1412,10 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
int res;
if ((res= set_primary_key_from_record(op, old_data)))
ERR_RETURN(trans->getNdbError());
if (m_use_partition_function)
op->setPartitionId(old_part_id);
// Read all unreferenced non-key field(s)
for (i= 0; i < no_fields; i++)
{
@ -1469,6 +1474,17 @@ int ha_ndbcluster::peek_row(const byte *record)
if ((res= set_primary_key_from_record(op, record)))
ERR_RETURN(trans->getNdbError());
if (m_use_partition_function)
{
uint32 part_id;
int error;
if ((error= m_part_info->get_partition_id(m_part_info, &part_id)))
{
DBUG_RETURN(error);
}
op->setPartitionId(part_id);
}
if (execute_no_commit_ie(this,trans) != 0)
{
table->status= STATUS_NOT_FOUND;
@ -1807,7 +1823,8 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
const key_range *end_key,
bool sorted, bool descending, byte* buf)
bool sorted, bool descending,
byte* buf, part_id_range *part_spec)
{
int res;
bool restart;
@ -1833,11 +1850,17 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
(const NDBTAB *) m_table)) ||
op->readTuples(lm, 0, parallelism, sorted, descending))
ERR_RETURN(trans->getNdbError());
if (m_use_partition_function && part_spec != NULL &&
part_spec->start_part == part_spec->end_part)
op->setPartitionId(part_spec->start_part);
m_active_cursor= op;
} else {
restart= TRUE;
op= (NdbIndexScanOperation*)m_active_cursor;
if (m_use_partition_function && part_spec != NULL &&
part_spec->start_part == part_spec->end_part)
op->setPartitionId(part_spec->start_part);
DBUG_ASSERT(op->getSorted() == sorted);
DBUG_ASSERT(op->getLockMode() ==
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type));
@ -1937,6 +1960,17 @@ int ha_ndbcluster::write_row(byte *record)
if (res != 0)
ERR_RETURN(trans->getNdbError());
if (m_use_partition_function)
{
uint32 part_id;
int error;
if ((error= m_part_info->get_partition_id(m_part_info, &part_id)))
{
DBUG_RETURN(error);
}
op->setPartitionId(part_id);
}
if (table->s->primary_key == MAX_KEY)
{
// Table has hidden primary key
@ -2094,6 +2128,8 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
NdbScanOperation* cursor= m_active_cursor;
NdbOperation *op;
uint i;
uint32 old_part_id= 0, new_part_id= 0;
int error;
DBUG_ENTER("update_row");
m_write_op= TRUE;
@ -2104,15 +2140,23 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
ha_set_bit_in_write_set(table->timestamp_field->fieldnr);
}
if (m_use_partition_function &&
(error= get_parts_for_update(old_data, new_data, table->record[0],
m_part_info, &old_part_id, &new_part_id)))
{
DBUG_RETURN(error);
}
/* Check for update of primary key for special handling */
if ((table->s->primary_key != MAX_KEY) &&
(key_cmp(table->s->primary_key, old_data, new_data)))
(key_cmp(table->s->primary_key, old_data, new_data)) ||
(old_part_id != new_part_id))
{
int read_res, insert_res, delete_res, undo_res;
DBUG_PRINT("info", ("primary key update, doing pk read+delete+insert"));
// Get all old fields, since we optimize away fields not in query
read_res= complemented_pk_read(old_data, new_data);
read_res= complemented_pk_read(old_data, new_data, old_part_id);
if (read_res)
{
DBUG_PRINT("info", ("pk read failed"));
@ -2168,6 +2212,8 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
m_ops_pending++;
if (uses_blob_value())
m_blobs_pending= TRUE;
if (m_use_partition_function)
cursor->setPartitionId(new_part_id);
}
else
{
@ -2175,6 +2221,8 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
op->updateTuple() != 0)
ERR_RETURN(trans->getNdbError());
if (m_use_partition_function)
op->setPartitionId(new_part_id);
if (table->s->primary_key == MAX_KEY)
{
// This table has no primary key, use "hidden" primary key
@ -2230,12 +2278,21 @@ int ha_ndbcluster::delete_row(const byte *record)
NdbTransaction *trans= m_active_trans;
NdbScanOperation* cursor= m_active_cursor;
NdbOperation *op;
uint32 part_id;
int error;
DBUG_ENTER("delete_row");
m_write_op= TRUE;
statistic_increment(thd->status_var.ha_delete_count,&LOCK_status);
m_rows_changed++;
if (m_use_partition_function &&
(error= get_part_for_delete(record, table->record[0], m_part_info,
&part_id)))
{
DBUG_RETURN(error);
}
if (cursor)
{
/*
@ -2250,6 +2307,9 @@ int ha_ndbcluster::delete_row(const byte *record)
ERR_RETURN(trans->getNdbError());
m_ops_pending++;
if (m_use_partition_function)
cursor->setPartitionId(part_id);
no_uncommitted_rows_update(-1);
if (!m_primary_key_update)
@ -2263,6 +2323,9 @@ int ha_ndbcluster::delete_row(const byte *record)
op->deleteTuple() != 0)
ERR_RETURN(trans->getNdbError());
if (m_use_partition_function)
op->setPartitionId(part_id);
no_uncommitted_rows_update(-1);
if (table->s->primary_key == MAX_KEY)
@ -2388,8 +2451,6 @@ void ha_ndbcluster::print_results()
DBUG_ENTER("print_results");
#ifndef DBUG_OFF
const NDBTAB *tab= (const NDBTAB*) m_table;
if (!_db_on_)
DBUG_VOID_RETURN;
@ -2444,11 +2505,13 @@ print_value:
}
int ha_ndbcluster::index_init(uint index)
int ha_ndbcluster::index_init(uint index, bool sorted)
{
DBUG_ENTER("ha_ndbcluster::index_init");
DBUG_PRINT("enter", ("index: %u", index));
DBUG_RETURN(handler::index_init(index));
DBUG_PRINT("enter", ("index: %u sorted: %d", index, sorted));
active_index= index;
m_sorted= sorted;
DBUG_RETURN(0);
}
@ -2485,56 +2548,16 @@ int ha_ndbcluster::index_read(byte *buf,
const byte *key, uint key_len,
enum ha_rkey_function find_flag)
{
key_range start_key;
bool descending= FALSE;
DBUG_ENTER("ha_ndbcluster::index_read");
DBUG_PRINT("enter", ("active_index: %u, key_len: %u, find_flag: %d",
active_index, key_len, find_flag));
int error;
ndb_index_type type= get_index_type(active_index);
const KEY* key_info= table->key_info+active_index;
m_write_op= FALSE;
switch (type){
case PRIMARY_KEY_ORDERED_INDEX:
case PRIMARY_KEY_INDEX:
if (find_flag == HA_READ_KEY_EXACT && key_info->key_length == key_len)
{
if (m_active_cursor && (error= close_scan()))
DBUG_RETURN(error);
DBUG_RETURN(pk_read(key, key_len, buf));
}
else if (type == PRIMARY_KEY_INDEX)
{
DBUG_RETURN(1);
}
break;
case UNIQUE_ORDERED_INDEX:
case UNIQUE_INDEX:
if (find_flag == HA_READ_KEY_EXACT && key_info->key_length == key_len &&
!check_null_in_key(key_info, key, key_len))
{
if (m_active_cursor && (error= close_scan()))
DBUG_RETURN(error);
DBUG_RETURN(unique_index_read(key, key_len, buf));
}
else if (type == UNIQUE_INDEX)
{
DBUG_RETURN(1);
}
break;
case ORDERED_INDEX:
break;
default:
case UNDEFINED_INDEX:
DBUG_ASSERT(FALSE);
DBUG_RETURN(1);
break;
}
key_range start_key;
start_key.key= key;
start_key.length= key_len;
start_key.flag= find_flag;
bool descending= FALSE;
descending= FALSE;
switch (find_flag) {
case HA_READ_KEY_OR_PREV:
case HA_READ_BEFORE_KEY:
@ -2545,8 +2568,8 @@ int ha_ndbcluster::index_read(byte *buf,
default:
break;
}
error= ordered_index_scan(&start_key, 0, TRUE, descending, buf);
DBUG_RETURN(error == HA_ERR_END_OF_FILE ? HA_ERR_KEY_NOT_FOUND : error);
DBUG_RETURN(read_range_first_to_buf(&start_key, 0, descending,
m_sorted, buf));
}
@ -2557,7 +2580,7 @@ int ha_ndbcluster::index_read_idx(byte *buf, uint index_no,
statistic_increment(current_thd->status_var.ha_read_key_count, &LOCK_status);
DBUG_ENTER("ha_ndbcluster::index_read_idx");
DBUG_PRINT("enter", ("index_no: %u, key_len: %u", index_no, key_len));
index_init(index_no);
index_init(index_no, 0);
DBUG_RETURN(index_read(buf, key, key_len, find_flag));
}
@ -2588,7 +2611,7 @@ int ha_ndbcluster::index_first(byte *buf)
// Start the ordered index scan and fetch the first row
// Only HA_READ_ORDER indexes get called by index_first
DBUG_RETURN(ordered_index_scan(0, 0, TRUE, FALSE, buf));
DBUG_RETURN(ordered_index_scan(0, 0, TRUE, FALSE, buf, NULL));
}
@ -2596,7 +2619,7 @@ int ha_ndbcluster::index_last(byte *buf)
{
DBUG_ENTER("ha_ndbcluster::index_last");
statistic_increment(current_thd->status_var.ha_read_last_count,&LOCK_status);
DBUG_RETURN(ordered_index_scan(0, 0, TRUE, TRUE, buf));
DBUG_RETURN(ordered_index_scan(0, 0, TRUE, TRUE, buf, NULL));
}
int ha_ndbcluster::index_read_last(byte * buf, const byte * key, uint key_len)
@ -2605,67 +2628,76 @@ int ha_ndbcluster::index_read_last(byte * buf, const byte * key, uint key_len)
DBUG_RETURN(index_read(buf, key, key_len, HA_READ_PREFIX_LAST));
}
inline
int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key,
const key_range *end_key,
bool eq_r, bool sorted,
bool desc, bool sorted,
byte* buf)
{
KEY* key_info;
int error= 1;
part_id_range part_spec;
ndb_index_type type= get_index_type(active_index);
const KEY* key_info= table->key_info+active_index;
int error;
DBUG_ENTER("ha_ndbcluster::read_range_first_to_buf");
DBUG_PRINT("info", ("eq_r: %d, sorted: %d", eq_r, sorted));
DBUG_PRINT("info", ("desc: %d, sorted: %d", desc, sorted));
switch (get_index_type(active_index)){
if (m_use_partition_function)
{
get_partition_set(table, buf, active_index, start_key, &part_spec);
if (part_spec.start_part > part_spec.end_part)
{
DBUG_RETURN(HA_ERR_END_OF_FILE);
}
else if (part_spec.start_part == part_spec.end_part)
{
/*
Only one partition is required to scan, if sorted is required we
don't need it any more since output from one ordered partitioned
index is always sorted.
*/
sorted= FALSE;
}
}
m_write_op= FALSE;
switch (type){
case PRIMARY_KEY_ORDERED_INDEX:
case PRIMARY_KEY_INDEX:
key_info= table->key_info + active_index;
if (start_key &&
start_key->length == key_info->key_length &&
start_key->flag == HA_READ_KEY_EXACT)
{
if (m_active_cursor && (error= close_scan()))
DBUG_RETURN(error);
error= pk_read(start_key->key, start_key->length, buf);
DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error);
DBUG_RETURN(pk_read(start_key->key, start_key->length, buf,
part_spec.start_part));
}
break;
case UNIQUE_ORDERED_INDEX:
case UNIQUE_INDEX:
key_info= table->key_info + active_index;
if (start_key && start_key->length == key_info->key_length &&
start_key->flag == HA_READ_KEY_EXACT &&
!check_null_in_key(key_info, start_key->key, start_key->length))
{
if (m_active_cursor && (error= close_scan()))
DBUG_RETURN(error);
error= unique_index_read(start_key->key, start_key->length, buf);
DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error);
DBUG_RETURN(unique_index_read(start_key->key, start_key->length, buf));
}
break;
default:
break;
}
// Start the ordered index scan and fetch the first row
error= ordered_index_scan(start_key, end_key, sorted, FALSE, buf);
DBUG_RETURN(error);
DBUG_RETURN(ordered_index_scan(start_key, end_key, sorted, desc, buf,
&part_spec));
}
int ha_ndbcluster::read_range_first(const key_range *start_key,
const key_range *end_key,
bool eq_r, bool sorted)
{
byte* buf= table->record[0];
DBUG_ENTER("ha_ndbcluster::read_range_first");
m_write_op= FALSE;
DBUG_RETURN(read_range_first_to_buf(start_key,
end_key,
eq_r,
sorted,
buf));
DBUG_RETURN(read_range_first_to_buf(start_key, end_key, FALSE,
sorted, buf));
}
int ha_ndbcluster::read_range_next()
@ -2691,7 +2723,7 @@ int ha_ndbcluster::rnd_init(bool scan)
DBUG_RETURN(-1);
}
}
index_init(table->s->primary_key);
index_init(table->s->primary_key, 0);
DBUG_RETURN(0);
}
@ -2758,7 +2790,20 @@ int ha_ndbcluster::rnd_pos(byte *buf, byte *pos)
&LOCK_status);
// The primary key for the record is stored in pos
// Perform a pk_read using primary key "index"
DBUG_RETURN(pk_read(pos, ref_length, buf));
{
part_id_range part_spec;
if (m_use_partition_function)
{
key_range key_spec;
KEY *key_info= table->key_info + active_index;
key_spec.key= pos;
key_spec.length= ref_length;
key_spec.flag= HA_READ_KEY_EXACT;
get_full_part_id_from_key(table, buf, key_info, &key_spec, &part_spec);
DBUG_ASSERT(part_spec.start_part == part_spec.end_part);
}
DBUG_RETURN(pk_read(pos, ref_length, buf, part_spec.start_part));
}
}
@ -2904,6 +2949,8 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
m_use_write= FALSE;
m_ignore_dup_key= FALSE;
break;
default:
break;
}
DBUG_RETURN(0);
@ -3691,56 +3738,6 @@ static int create_ndb_column(NDBCOL &col,
return 0;
}
/*
Create a table in NDB Cluster
*/
static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length)
{
if (form->s->max_rows == (ha_rows) 0) /* default setting, don't set fragmentation */
return;
/**
* get the number of fragments right
*/
uint no_fragments;
{
#if MYSQL_VERSION_ID >= 50000
uint acc_row_size= 25 + /*safety margin*/ 2;
#else
uint acc_row_size= pk_length*4;
/* add acc overhead */
if (pk_length <= 8) /* main page will set the limit */
acc_row_size+= 25 + /*safety margin*/ 2;
else /* overflow page will set the limit */
acc_row_size+= 4 + /*safety margin*/ 4;
#endif
ulonglong acc_fragment_size= 512*1024*1024;
ulonglong max_rows= form->s->max_rows;
#if MYSQL_VERSION_ID >= 50100
no_fragments= (max_rows*acc_row_size)/acc_fragment_size+1;
#else
no_fragments= ((max_rows*acc_row_size)/acc_fragment_size+1
+1/*correct rounding*/)/2;
#endif
}
{
uint no_nodes= g_ndb_cluster_connection->no_db_nodes();
NDBTAB::FragmentType ftype;
if (no_fragments > 2*no_nodes)
{
ftype= NDBTAB::FragAllLarge;
if (no_fragments > 4*no_nodes)
push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
"Ndb might have problems storing the max amount of rows specified");
}
else if (no_fragments > no_nodes)
ftype= NDBTAB::FragAllMedium;
else
ftype= NDBTAB::FragAllSmall;
tab.setFragmentType(ftype);
}
}
int ha_ndbcluster::create(const char *name,
TABLE *form,
HA_CREATE_INFO *info)
@ -3843,7 +3840,22 @@ int ha_ndbcluster::create(const char *name,
}
}
ndb_set_fragmentation(tab, form, pk_length);
// Check partition info
partition_info *part_info= form->s->part_info;
if (part_info)
{
int error;
if ((error= set_up_partition_info(part_info, form, (void*)&tab)))
{
DBUG_RETURN(error);
}
}
else
{
ndb_set_fragmentation(tab, form, pk_length);
}
if ((my_errno= check_ndb_connection()))
DBUG_RETURN(my_errno);
@ -4092,6 +4104,9 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
HA_NEED_READ_RANGE_BUFFER |
HA_CAN_BIT_FIELD),
m_share(0),
m_part_info(NULL),
m_use_partition_function(FALSE),
m_sorted(FALSE),
m_use_write(FALSE),
m_ignore_dup_key(FALSE),
m_primary_key_update(FALSE),
@ -4206,6 +4221,15 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked)
if (!res)
info(HA_STATUS_VARIABLE | HA_STATUS_CONST);
if (table->s->part_info)
{
m_part_info= table->s->part_info;
if (!(m_part_info->part_type == HASH_PARTITION &&
m_part_info->list_of_part_fields &&
!is_sub_partitioned(m_part_info)))
m_use_partition_function= TRUE;
}
DBUG_RETURN(res);
}
@ -5478,12 +5502,29 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
for (; multi_range_curr<multi_range_end && curr+reclength <= end_of_buffer;
multi_range_curr++)
{
switch (index_type){
part_id_range part_spec;
if (m_use_partition_function)
{
get_partition_set(table, curr, active_index,
&multi_range_curr->start_key,
&part_spec);
if (part_spec.start_part > part_spec.end_part)
{
/*
We can skip this partition since the key won't fit into any
partition
*/
curr += reclength;
multi_range_curr->range_flag |= SKIP_RANGE;
continue;
}
}
switch(index_type){
case PRIMARY_KEY_ORDERED_INDEX:
if (!(multi_range_curr->start_key.length == key_info->key_length &&
multi_range_curr->start_key.flag == HA_READ_KEY_EXACT))
goto range;
/* fall through */
multi_range_curr->start_key.flag == HA_READ_KEY_EXACT))
goto range;
// else fall through
case PRIMARY_KEY_INDEX:
{
multi_range_curr->range_flag |= UNIQUE_RANGE;
@ -5491,7 +5532,9 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
!op->readTuple(lm) &&
!set_primary_key(op, multi_range_curr->start_key.key) &&
!define_read_attrs(curr, op) &&
(op->setAbortOption(AO_IgnoreError), TRUE))
(op->setAbortOption(AO_IgnoreError), TRUE) &&
(!m_use_partition_function ||
(op->setPartitionId(part_spec.start_part), true)))
curr += reclength;
else
ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError());
@ -5500,11 +5543,11 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
break;
case UNIQUE_ORDERED_INDEX:
if (!(multi_range_curr->start_key.length == key_info->key_length &&
multi_range_curr->start_key.flag == HA_READ_KEY_EXACT &&
!check_null_in_key(key_info, multi_range_curr->start_key.key,
multi_range_curr->start_key.length)))
goto range;
/* fall through */
multi_range_curr->start_key.flag == HA_READ_KEY_EXACT &&
!check_null_in_key(key_info, multi_range_curr->start_key.key,
multi_range_curr->start_key.length)))
goto range;
// else fall through
case UNIQUE_INDEX:
{
multi_range_curr->range_flag |= UNIQUE_RANGE;
@ -5518,8 +5561,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError());
break;
}
case ORDERED_INDEX:
{
case ORDERED_INDEX: {
range:
multi_range_curr->range_flag &= ~(uint)UNIQUE_RANGE;
if (scanOp == 0)
@ -5594,7 +5636,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
}
#if 0
#define DBUG_MULTI_RANGE(x) printf("read_multi_range_next: case %d\n", x);
#define DBUG_MULTI_RANGE(x) DBUG_PRINT("info", ("read_multi_range_next: case %d\n", x));
#else
#define DBUG_MULTI_RANGE(x)
#endif
@ -5605,6 +5647,7 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p)
DBUG_ENTER("ha_ndbcluster::read_multi_range_next");
if (m_disable_multi_read)
{
DBUG_MULTI_RANGE(11);
DBUG_RETURN(handler::read_multi_range_next(multi_range_found_p));
}
@ -5614,10 +5657,16 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p)
const NdbOperation* op= m_current_multi_operation;
for (;multi_range_curr < m_multi_range_defined; multi_range_curr++)
{
DBUG_MULTI_RANGE(12);
if (multi_range_curr->range_flag & SKIP_RANGE)
continue;
if (multi_range_curr->range_flag & UNIQUE_RANGE)
{
if (op->getNdbError().code == 0)
{
DBUG_MULTI_RANGE(13);
goto found_next;
}
op= m_active_trans->getNextCompletedOperation(op);
m_multi_range_result_ptr += reclength;
@ -5634,6 +5683,7 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p)
}
else
{
DBUG_MULTI_RANGE(14);
goto close_scan;
}
}
@ -5667,6 +5717,7 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p)
DBUG_ASSERT(range_no == -1);
if ((res= m_multi_cursor->nextResult(true)))
{
DBUG_MULTI_RANGE(15);
goto close_scan;
}
multi_range_curr--; // Will be increased in for-loop
@ -5694,12 +5745,16 @@ close_scan:
}
else
{
DBUG_MULTI_RANGE(9);
DBUG_RETURN(ndb_err(m_active_trans));
}
}
if (multi_range_curr == multi_range_end)
{
DBUG_MULTI_RANGE(16);
DBUG_RETURN(HA_ERR_END_OF_FILE);
}
/**
* Read remaining ranges
@ -6916,6 +6971,8 @@ ha_ndbcluster::build_scan_filter_predicate(Ndb_cond * &cond,
: NULL;
break;
default:
field= NULL; //Keep compiler happy
DBUG_ASSERT(0);
break;
}
switch ((negated) ?
@ -7263,4 +7320,178 @@ ha_ndbcluster::generate_scan_filter(Ndb_cond_stack *ndb_cond_stack,
DBUG_RETURN(0);
}
/*
Create a table in NDB Cluster
*/
static uint get_no_fragments(ulonglong max_rows)
{
#if MYSQL_VERSION_ID >= 50000
uint acc_row_size= 25 + /*safety margin*/ 2;
#else
uint acc_row_size= pk_length*4;
/* add acc overhead */
if (pk_length <= 8) /* main page will set the limit */
acc_row_size+= 25 + /*safety margin*/ 2;
else /* overflow page will set the limit */
acc_row_size+= 4 + /*safety margin*/ 4;
#endif
ulonglong acc_fragment_size= 512*1024*1024;
#if MYSQL_VERSION_ID >= 50100
return (max_rows*acc_row_size)/acc_fragment_size+1;
#else
return ((max_rows*acc_row_size)/acc_fragment_size+1
+1/*correct rounding*/)/2;
#endif
}
/*
Routine to adjust default number of partitions to always be a multiple
of number of nodes and never more than 4 times the number of nodes.
*/
static bool adjusted_frag_count(uint no_fragments, uint no_nodes,
uint &reported_frags)
{
uint i= 0;
reported_frags= no_nodes;
while (reported_frags < no_fragments && ++i < 4 &&
(reported_frags + no_nodes) < MAX_PARTITIONS)
reported_frags+= no_nodes;
return (reported_frags < no_fragments);
}
int ha_ndbcluster::get_default_no_partitions(ulonglong max_rows)
{
uint reported_frags;
uint no_fragments= get_no_fragments(max_rows);
uint no_nodes= g_ndb_cluster_connection->no_db_nodes();
adjusted_frag_count(no_fragments, no_nodes, reported_frags);
return (int)reported_frags;
}
/*
User defined partitioning set-up. We need to check how many fragments the
user wants defined and which node groups to put those into. Later we also
want to attach those partitions to a tablespace.
All the functionality of the partition function, partition limits and so
forth are entirely handled by the MySQL Server. There is one exception to
this rule for PARTITION BY KEY where NDB handles the hash function and
this type can thus be handled transparently also by NDB API program.
For RANGE, HASH and LIST and subpartitioning the NDB API programs must
implement the function to map to a partition.
*/
uint ha_ndbcluster::set_up_partition_info(partition_info *part_info,
TABLE *table,
void *tab_par)
{
DBUG_ENTER("ha_ndbcluster::set_up_partition_info");
ushort node_group[MAX_PARTITIONS];
ulong ng_index= 0, i, j;
NDBTAB *tab= (NDBTAB*)tab_par;
NDBTAB::FragmentType ftype= NDBTAB::UserDefined;
partition_element *part_elem;
if (part_info->part_type == HASH_PARTITION &&
part_info->list_of_part_fields == TRUE)
{
Field **fields= part_info->part_field_array;
if (part_info->linear_hash_ind)
ftype= NDBTAB::DistrKeyLin;
else
ftype= NDBTAB::DistrKeyHash;
for (i= 0; i < part_info->part_field_list.elements; i++)
{
NDBCOL *col= tab->getColumn(fields[i]->fieldnr - 1);
DBUG_PRINT("info",("setting dist key on %s", col->getName()));
col->setPartitionKey(TRUE);
}
}
List_iterator<partition_element> part_it(part_info->partitions);
for (i= 0; i < part_info->no_parts; i++)
{
part_elem= part_it++;
if (!is_sub_partitioned(part_info))
{
node_group[ng_index++]= part_elem->nodegroup_id;
//Here we should insert tablespace id based on tablespace name
}
else
{
List_iterator<partition_element> sub_it(part_elem->subpartitions);
for (j= 0; j < part_info->no_subparts; j++)
{
part_elem= sub_it++;
node_group[ng_index++]= part_elem->nodegroup_id;
//Here we should insert tablespace id based on tablespace name
}
}
}
{
uint no_nodes= g_ndb_cluster_connection->no_db_nodes();
if (ng_index > 4 * no_nodes)
{
DBUG_RETURN(1300);
}
}
tab->setNodeGroupIds(&node_group, ng_index);
tab->setFragmentType(ftype);
DBUG_RETURN(0);
}
/*
This routine is used to set-up fragmentation when the user has only specified
ENGINE = NDB and no user defined partitioning what so ever. Thus all values
will be based on default values. We will choose Linear Hash or Hash with
perfect spread dependent on a session variable defined in MySQL.
*/
static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length)
{
NDBTAB::FragmentType ftype;
ushort node_group[MAX_PARTITIONS];
uint no_nodes= g_ndb_cluster_connection->no_db_nodes(), no_fragments, i;
DBUG_ENTER("ndb_set_fragmentation");
if (form->s->max_rows == (ha_rows) 0)
{
no_fragments= no_nodes;
}
else
{
/*
Ensure that we get enough fragments to handle all rows and ensure that
the table is fully distributed by keeping the number of fragments a
multiple of the number of nodes.
*/
uint fragments= get_no_fragments(form->s->max_rows);
if (adjusted_frag_count(fragments, no_nodes, no_fragments))
{
push_warning(current_thd,
MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
"Ndb might have problems storing the max amount of rows specified");
}
}
/*
Always start with node group 0 and continue with next node group from
there
*/
node_group[0]= 0;
for (i= 1; i < no_fragments; i++)
node_group[i]= UNDEF_NODEGROUP;
if (opt_ndb_linear_hash)
ftype= NDBTAB::DistrKeyLin;
else
ftype= NDBTAB::DistrKeyHash;
tab.setFragmentType(ftype);
tab.setNodeGroupIds(&node_group, no_fragments);
DBUG_VOID_RETURN;
}
#endif /* HAVE_NDBCLUSTER_DB */

View File

@ -420,7 +420,7 @@ class ha_ndbcluster: public handler
int write_row(byte *buf);
int update_row(const byte *old_data, byte *new_data);
int delete_row(const byte *buf);
int index_init(uint index);
int index_init(uint index, bool sorted);
int index_end();
int index_read(byte *buf, const byte *key, uint key_len,
enum ha_rkey_function find_flag);
@ -462,6 +462,11 @@ class ha_ndbcluster: public handler
const char * table_type() const;
const char ** bas_ext() const;
ulong table_flags(void) const;
ulong partition_flags(void) const
{
return (HA_CAN_PARTITION | HA_CAN_UPDATE_PARTITION_KEY |
HA_CAN_PARTITION_UNIQUE);
}
ulong index_flags(uint idx, uint part, bool all_parts) const;
uint max_supported_record_length() const;
uint max_supported_keys() const;
@ -471,6 +476,7 @@ class ha_ndbcluster: public handler
int rename_table(const char *from, const char *to);
int delete_table(const char *name);
int create(const char *name, TABLE *form, HA_CREATE_INFO *info);
int get_default_no_partitions(ulonglong max_rows);
THR_LOCK_DATA **store_lock(THD *thd,
THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
@ -549,15 +555,21 @@ private:
NDB_INDEX_TYPE get_index_type_from_table(uint index_no) const;
int check_index_fields_not_null(uint index_no);
int pk_read(const byte *key, uint key_len, byte *buf);
int complemented_pk_read(const byte *old_data, byte *new_data);
uint set_up_partition_info(partition_info *part_info,
TABLE *table,
void *tab);
int complemented_pk_read(const byte *old_data, byte *new_data,
uint32 old_part_id);
int pk_read(const byte *key, uint key_len, byte *buf, uint32 part_id);
int ordered_index_scan(const key_range *start_key,
const key_range *end_key,
bool sorted, bool descending, byte* buf,
part_id_range *part_spec);
int full_table_scan(byte * buf);
int peek_row(const byte *record);
int unique_index_read(const byte *key, uint key_len,
byte *buf);
int ordered_index_scan(const key_range *start_key,
const key_range *end_key,
bool sorted, bool descending, byte* buf);
int full_table_scan(byte * buf);
int fetch_next(NdbScanOperation* op);
int next_result(byte *buf);
int define_read_attrs(byte* buf, NdbOperation* op);
@ -637,6 +649,11 @@ private:
// NdbRecAttr has no reference to blob
typedef union { const NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue;
NdbValue m_value[NDB_MAX_ATTRIBUTES_IN_TABLE];
partition_info *m_part_info;
byte *m_rec0;
Field **m_part_field_array;
bool m_use_partition_function;
bool m_sorted;
bool m_use_write;
bool m_ignore_dup_key;
bool m_primary_key_update;

3162
sql/ha_partition.cc Normal file

File diff suppressed because it is too large Load Diff

916
sql/ha_partition.h Normal file
View File

@ -0,0 +1,916 @@
/* Copyright (C) 2005 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#ifdef __GNUC__
#pragma interface /* gcc class implementation */
#endif
/*
PARTITION_SHARE is a structure that will be shared amoung all open handlers
The partition implements the minimum of what you will probably need.
*/
typedef struct st_partition_share
{
char *table_name;
uint table_name_length, use_count;
pthread_mutex_t mutex;
THR_LOCK lock;
} PARTITION_SHARE;
#define PARTITION_BYTES_IN_POS 2
class ha_partition :public handler
{
private:
enum partition_index_scan_type
{
partition_index_read= 0,
partition_index_first= 1,
partition_index_last= 2,
partition_no_index_scan= 3
};
/* Data for the partition handler */
char *m_file_buffer; // Buffer with names
char *m_name_buffer_ptr; // Pointer to first partition name
uchar *m_engine_array; // Array of types of the handlers
handler **m_file; // Array of references to handler inst.
partition_info *m_part_info; // local reference to partition
byte *m_start_key_ref; // Reference of start key in current
// index scan info
Field **m_part_field_array; // Part field array locally to save acc
byte *m_ordered_rec_buffer; // Row and key buffer for ord. idx scan
KEY *m_curr_key_info; // Current index
byte *m_rec0; // table->record[0]
QUEUE queue; // Prio queue used by sorted read
/*
Since the partition handler is a handler on top of other handlers, it
is necessary to keep information about what the underlying handler
characteristics is. It is not possible to keep any handler instances
for this since the MySQL Server sometimes allocating the handler object
without freeing them.
*/
u_long m_table_flags;
u_long m_low_byte_first;
uint m_tot_parts; // Total number of partitions;
uint m_last_part; // Last file that we update,write
int m_lock_type; // Remembers type of last
// external_lock
part_id_range m_part_spec; // Which parts to scan
uint m_scan_value; // Value passed in rnd_init
// call
uint m_ref_length; // Length of position in this
// handler object
key_range m_start_key; // index read key range
enum partition_index_scan_type m_index_scan_type;// What type of index
// scan
uint m_top_entry; // Which partition is to
// deliver next result
uint m_rec_length; // Local copy of record length
bool m_ordered; // Ordered/Unordered index scan
bool m_has_transactions; // Can we support transactions
bool m_pkey_is_clustered; // Is primary key clustered
bool m_create_handler; // Handler used to create table
bool m_is_sub_partitioned; // Is subpartitioned
bool m_ordered_scan_ongoing;
bool m_use_bit_array;
/*
We keep track if all underlying handlers are MyISAM since MyISAM has a
great number of extra flags not needed by other handlers.
*/
bool m_myisam; // Are all underlying handlers
// MyISAM
/*
We keep track of InnoDB handlers below since it requires proper setting
of query_id in fields at index_init and index_read calls.
*/
bool m_innodb; // Are all underlying handlers
// InnoDB
/*
When calling extra(HA_EXTRA_CACHE) we do not pass this to the underlying
handlers immediately. Instead we cache it and call the underlying
immediately before starting the scan on the partition. This is to
prevent allocating a READ CACHE for each partition in parallel when
performing a full table scan on MyISAM partitioned table.
This state is cleared by extra(HA_EXTRA_NO_CACHE).
*/
bool m_extra_cache;
uint m_extra_cache_size;
void init_handler_variables();
/*
Variables for lock structures.
*/
THR_LOCK_DATA lock; /* MySQL lock */
PARTITION_SHARE *share; /* Shared lock info */
public:
/*
-------------------------------------------------------------------------
MODULE create/delete handler object
-------------------------------------------------------------------------
Object create/delete methode. The normal called when a table object
exists. There is also a method to create the handler object with only
partition information. This is used from mysql_create_table when the
table is to be created and the engine type is deduced to be the
partition handler.
-------------------------------------------------------------------------
*/
ha_partition(TABLE * table);
ha_partition(partition_info * part_info);
~ha_partition();
/*
A partition handler has no characteristics in itself. It only inherits
those from the underlying handlers. Here we set-up those constants to
enable later calls of the methods to retrieve constants from the under-
lying handlers. Returns false if not successful.
*/
int ha_initialise();
/*
-------------------------------------------------------------------------
MODULE meta data changes
-------------------------------------------------------------------------
Meta data routines to CREATE, DROP, RENAME table and often used at
ALTER TABLE (update_create_info used from ALTER TABLE and SHOW ..).
update_table_comment is used in SHOW TABLE commands to provide a
chance for the handler to add any interesting comments to the table
comments not provided by the users comment.
create_handler_files is called before opening a new handler object
with openfrm to call create. It is used to create any local handler
object needed in opening the object in openfrm
-------------------------------------------------------------------------
*/
virtual int delete_table(const char *from);
virtual int rename_table(const char *from, const char *to);
virtual int create(const char *name, TABLE * form,
HA_CREATE_INFO * create_info);
virtual int create_handler_files(const char *name);
virtual void update_create_info(HA_CREATE_INFO * create_info);
virtual char *update_table_comment(const char *comment);
private:
/*
delete_table, rename_table and create uses very similar logic which
is packed into this routine.
*/
uint del_ren_cre_table(const char *from,
const char *to= NULL,
TABLE * table_arg= NULL,
HA_CREATE_INFO * create_info= NULL);
/*
One method to create the table_name.par file containing the names of the
underlying partitions, their engine and the number of partitions.
And one method to read it in.
*/
bool create_handler_file(const char *name);
bool get_from_handler_file(const char *name);
bool new_handlers_from_part_info();
bool create_handlers();
void clear_handler_file();
void set_up_table_before_create(TABLE * table_arg, HA_CREATE_INFO * info,
uint part_id);
partition_element *find_partition_element(uint part_id);
public:
/*
-------------------------------------------------------------------------
MODULE open/close object
-------------------------------------------------------------------------
Open and close handler object to ensure all underlying files and
objects allocated and deallocated for query handling is handled
properly.
-------------------------------------------------------------------------
A handler object is opened as part of its initialisation and before
being used for normal queries (not before meta-data changes always.
If the object was opened it will also be closed before being deleted.
*/
virtual int open(const char *name, int mode, uint test_if_locked);
virtual int close(void);
/*
-------------------------------------------------------------------------
MODULE start/end statement
-------------------------------------------------------------------------
This module contains methods that are used to understand start/end of
statements, transaction boundaries, and aid for proper concurrency
control.
The partition handler need not implement abort and commit since this
will be handled by any underlying handlers implementing transactions.
There is only one call to each handler type involved per transaction
and these go directly to the handlers supporting transactions
currently InnoDB, BDB and NDB).
-------------------------------------------------------------------------
*/
virtual THR_LOCK_DATA **store_lock(THD * thd, THR_LOCK_DATA ** to,
enum thr_lock_type lock_type);
virtual int external_lock(THD * thd, int lock_type);
/*
When table is locked a statement is started by calling start_stmt
instead of external_lock
*/
virtual int start_stmt(THD * thd);
/*
Lock count is number of locked underlying handlers (I assume)
*/
virtual uint lock_count(void) const;
/*
Call to unlock rows not to be updated in transaction
*/
virtual void unlock_row();
/*
-------------------------------------------------------------------------
MODULE change record
-------------------------------------------------------------------------
This part of the handler interface is used to change the records
after INSERT, DELETE, UPDATE, REPLACE method calls but also other
special meta-data operations as ALTER TABLE, LOAD DATA, TRUNCATE.
-------------------------------------------------------------------------
These methods are used for insert (write_row), update (update_row)
and delete (delete_row). All methods to change data always work on
one row at a time. update_row and delete_row also contains the old
row.
delete_all_rows will delete all rows in the table in one call as a
special optimisation for DELETE from table;
Bulk inserts are supported if all underlying handlers support it.
start_bulk_insert and end_bulk_insert is called before and after a
number of calls to write_row.
Not yet though.
*/
virtual int write_row(byte * buf);
virtual int update_row(const byte * old_data, byte * new_data);
virtual int delete_row(const byte * buf);
virtual int delete_all_rows(void);
virtual void start_bulk_insert(ha_rows rows);
virtual int end_bulk_insert();
/*
-------------------------------------------------------------------------
MODULE full table scan
-------------------------------------------------------------------------
This module is used for the most basic access method for any table
handler. This is to fetch all data through a full table scan. No
indexes are needed to implement this part.
It contains one method to start the scan (rnd_init) that can also be
called multiple times (typical in a nested loop join). Then proceeding
to the next record (rnd_next) and closing the scan (rnd_end).
To remember a record for later access there is a method (position)
and there is a method used to retrieve the record based on the stored
position.
The position can be a file position, a primary key, a ROWID dependent
on the handler below.
-------------------------------------------------------------------------
*/
/*
unlike index_init(), rnd_init() can be called two times
without rnd_end() in between (it only makes sense if scan=1).
then the second call should prepare for the new table scan
(e.g if rnd_init allocates the cursor, second call should
position it to the start of the table, no need to deallocate
and allocate it again
*/
virtual int rnd_init(bool scan);
virtual int rnd_end();
virtual int rnd_next(byte * buf);
virtual int rnd_pos(byte * buf, byte * pos);
virtual void position(const byte * record);
/*
-------------------------------------------------------------------------
MODULE index scan
-------------------------------------------------------------------------
This part of the handler interface is used to perform access through
indexes. The interface is defined as a scan interface but the handler
can also use key lookup if the index is a unique index or a primary
key index.
Index scans are mostly useful for SELECT queries but are an important
part also of UPDATE, DELETE, REPLACE and CREATE TABLE table AS SELECT
and so forth.
Naturally an index is needed for an index scan and indexes can either
be ordered, hash based. Some ordered indexes can return data in order
but not necessarily all of them.
There are many flags that define the behavior of indexes in the
various handlers. These methods are found in the optimizer module.
-------------------------------------------------------------------------
index_read is called to start a scan of an index. The find_flag defines
the semantics of the scan. These flags are defined in
include/my_base.h
index_read_idx is the same but also initializes index before calling doing
the same thing as index_read. Thus it is similar to index_init followed
by index_read. This is also how we implement it.
index_read/index_read_idx does also return the first row. Thus for
key lookups, the index_read will be the only call to the handler in
the index scan.
index_init initializes an index before using it and index_end does
any end processing needed.
*/
virtual int index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag);
virtual int index_read_idx(byte * buf, uint idx, const byte * key,
uint key_len, enum ha_rkey_function find_flag);
virtual int index_init(uint idx, bool sorted);
virtual int index_end();
/*
These methods are used to jump to next or previous entry in the index
scan. There are also methods to jump to first and last entry.
*/
virtual int index_next(byte * buf);
virtual int index_prev(byte * buf);
virtual int index_first(byte * buf);
virtual int index_last(byte * buf);
virtual int index_next_same(byte * buf, const byte * key, uint keylen);
virtual int index_read_last(byte * buf, const byte * key, uint keylen);
/*
read_first_row is virtual method but is only implemented by
handler.cc, no storage engine has implemented it so neither
will the partition handler.
virtual int read_first_row(byte *buf, uint primary_key);
*/
/*
We don't implement multi read range yet, will do later.
virtual int read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
KEY_MULTI_RANGE *ranges, uint range_count,
bool sorted, HANDLER_BUFFER *buffer);
virtual int read_multi_range_next(KEY_MULTI_RANGE **found_range_p);
*/
virtual int read_range_first(const key_range * start_key,
const key_range * end_key,
bool eq_range, bool sorted);
virtual int read_range_next();
private:
int common_index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag);
int common_first_last(byte * buf);
int partition_scan_set_up(byte * buf, bool idx_read_flag);
int handle_unordered_next(byte * buf, bool next_same);
int handle_unordered_scan_next_partition(byte * buf);
byte *queue_buf(uint part_id)
{
return (m_ordered_rec_buffer +
(part_id * (m_rec_length + PARTITION_BYTES_IN_POS)));
}
byte *rec_buf(uint part_id)
{
return (queue_buf(part_id) +
PARTITION_BYTES_IN_POS);
}
int handle_ordered_index_scan(byte * buf);
int handle_ordered_next(byte * buf, bool next_same);
int handle_ordered_prev(byte * buf);
void return_top_record(byte * buf);
void include_partition_fields_in_used_fields();
public:
/*
-------------------------------------------------------------------------
MODULE information calls
-------------------------------------------------------------------------
This calls are used to inform the handler of specifics of the ongoing
scans and other actions. Most of these are used for optimisation
purposes.
-------------------------------------------------------------------------
*/
virtual void info(uint);
virtual int extra(enum ha_extra_function operation);
virtual int extra_opt(enum ha_extra_function operation, ulong cachesize);
virtual int reset(void);
private:
static const uint NO_CURRENT_PART_ID= 0xFFFFFFFF;
int loop_extra(enum ha_extra_function operation);
void late_extra_cache(uint partition_id);
void late_extra_no_cache(uint partition_id);
void prepare_extra_cache(uint cachesize);
public:
/*
-------------------------------------------------------------------------
MODULE optimiser support
-------------------------------------------------------------------------
-------------------------------------------------------------------------
*/
/*
NOTE !!!!!!
-------------------------------------------------------------------------
-------------------------------------------------------------------------
One important part of the public handler interface that is not depicted in
the methods is the attribute records
which is defined in the base class. This is looked upon directly and is
set by calling info(HA_STATUS_INFO) ?
-------------------------------------------------------------------------
*/
/*
keys_to_use_for_scanning can probably be implemented as the
intersection of all underlying handlers if mixed handlers are used.
This method is used to derive whether an index can be used for
index-only scanning when performing an ORDER BY query.
Only called from one place in sql_select.cc
*/
virtual const key_map *keys_to_use_for_scanning();
/*
Called in test_quick_select to determine if indexes should be used.
*/
virtual double scan_time();
/*
The next method will never be called if you do not implement indexes.
*/
virtual double read_time(uint index, uint ranges, ha_rows rows);
/*
For the given range how many records are estimated to be in this range.
Used by optimiser to calculate cost of using a particular index.
*/
virtual ha_rows records_in_range(uint inx, key_range * min_key,
key_range * max_key);
/*
Upper bound of number records returned in scan is sum of all
underlying handlers.
*/
virtual ha_rows estimate_rows_upper_bound();
/*
table_cache_type is implemented by the underlying handler but all
underlying handlers must have the same implementation for it to work.
*/
virtual uint8 table_cache_type();
/*
-------------------------------------------------------------------------
MODULE print messages
-------------------------------------------------------------------------
This module contains various methods that returns text messages for
table types, index type and error messages.
-------------------------------------------------------------------------
*/
/*
The name of the index type that will be used for display
Here we must ensure that all handlers use the same index type
for each index created.
*/
virtual const char *index_type(uint inx);
/* The name of the table type that will be used for display purposes */
virtual const char *table_type() const
{ return "PARTITION"; }
/*
Handler specific error messages
*/
virtual void print_error(int error, myf errflag);
virtual bool get_error_message(int error, String * buf);
/*
-------------------------------------------------------------------------
MODULE handler characteristics
-------------------------------------------------------------------------
This module contains a number of methods defining limitations and
characteristics of the handler. The partition handler will calculate
this characteristics based on underlying handler characteristics.
-------------------------------------------------------------------------
This is a list of flags that says what the storage engine
implements. The current table flags are documented in handler.h
The partition handler will support whatever the underlying handlers
support except when specifically mentioned below about exceptions
to this rule.
HA_READ_RND_SAME:
Not currently used. (Means that the handler supports the rnd_same() call)
(MyISAM, HEAP)
HA_TABLE_SCAN_ON_INDEX:
Used to avoid scanning full tables on an index. If this flag is set then
the handler always has a primary key (hidden if not defined) and this
index is used for scanning rather than a full table scan in all
situations.
(InnoDB, BDB, Federated)
HA_REC_NOT_IN_SEQ:
This flag is set for handlers that cannot guarantee that the rows are
returned accroding to incremental positions (0, 1, 2, 3...).
This also means that rnd_next() should return HA_ERR_RECORD_DELETED
if it finds a deleted row.
(MyISAM (not fixed length row), BDB, HEAP, NDB, InooDB)
HA_CAN_GEOMETRY:
Can the storage engine handle spatial data.
Used to check that no spatial attributes are declared unless
the storage engine is capable of handling it.
(MyISAM)
HA_FAST_KEY_READ:
Setting this flag indicates that the handler is equally fast in
finding a row by key as by position.
This flag is used in a very special situation in conjunction with
filesort's. For further explanation see intro to init_read_record.
(BDB, HEAP, InnoDB)
HA_NULL_IN_KEY:
Is NULL values allowed in indexes.
If this is not allowed then it is not possible to use an index on a
NULLable field.
(BDB, HEAP, MyISAM, NDB, InnoDB)
HA_DUPP_POS:
Tells that we can the position for the conflicting duplicate key
record is stored in table->file->dupp_ref. (insert uses rnd_pos() on
this to find the duplicated row)
(MyISAM)
HA_CAN_INDEX_BLOBS:
Is the storage engine capable of defining an index of a prefix on
a BLOB attribute.
(BDB, Federated, MyISAM, InnoDB)
HA_AUTO_PART_KEY:
Auto increment fields can be part of a multi-part key. For second part
auto-increment keys, the auto_incrementing is done in handler.cc
(BDB, Federated, MyISAM, NDB)
HA_REQUIRE_PRIMARY_KEY:
Can't define a table without primary key (and cannot handle a table
with hidden primary key)
(No handler has this limitation currently)
HA_NOT_EXACT_COUNT:
Does the counter of records after the info call specify an exact
value or not. If it doesn't this flag is set.
Only MyISAM and HEAP uses exact count.
(MyISAM, HEAP, BDB, InnoDB, NDB, Federated)
HA_CAN_INSERT_DELAYED:
Can the storage engine support delayed inserts.
To start with the partition handler will not support delayed inserts.
Further investigation needed.
(HEAP, MyISAM)
HA_PRIMARY_KEY_IN_READ_INDEX:
This parameter is set when the handler will also return the primary key
when doing read-only-key on another index.
HA_NOT_DELETE_WITH_CACHE:
Seems to be an old MyISAM feature that is no longer used. No handler
has it defined but it is checked in init_read_record.
Further investigation needed.
(No handler defines it)
HA_NO_PREFIX_CHAR_KEYS:
Indexes on prefixes of character fields is not allowed.
(NDB)
HA_CAN_FULLTEXT:
Does the storage engine support fulltext indexes
The partition handler will start by not supporting fulltext indexes.
(MyISAM)
HA_CAN_SQL_HANDLER:
Can the HANDLER interface in the MySQL API be used towards this
storage engine.
(MyISAM, InnoDB)
HA_NO_AUTO_INCREMENT:
Set if the storage engine does not support auto increment fields.
(Currently not set by any handler)
HA_HAS_CHECKSUM:
Special MyISAM feature. Has special SQL support in CREATE TABLE.
No special handling needed by partition handler.
(MyISAM)
HA_FILE_BASED:
Should file names always be in lower case (used by engines
that map table names to file names.
Since partition handler has a local file this flag is set.
(BDB, Federated, MyISAM)
HA_CAN_BIT_FIELD:
Is the storage engine capable of handling bit fields?
(MyISAM, NDB)
HA_NEED_READ_RANGE_BUFFER:
Is Read Multi-Range supported => need multi read range buffer
This parameter specifies whether a buffer for read multi range
is needed by the handler. Whether the handler supports this
feature or not is dependent of whether the handler implements
read_multi_range* calls or not. The only handler currently
supporting this feature is NDB so the partition handler need
not handle this call. There are methods in handler.cc that will
transfer those calls into index_read and other calls in the
index scan module.
(NDB)
*/
virtual ulong table_flags() const
{ return m_table_flags; }
/*
HA_CAN_PARTITION:
Used by storage engines that can handle partitioning without this
partition handler
(Partition, NDB)
HA_CAN_UPDATE_PARTITION_KEY:
Set if the handler can update fields that are part of the partition
function.
HA_CAN_PARTITION_UNIQUE:
Set if the handler can handle unique indexes where the fields of the
unique key are not part of the fields of the partition function. Thus
a unique key can be set on all fields.
*/
virtual ulong partition_flags() const
{ return HA_CAN_PARTITION; }
/*
This is a bitmap of flags that says how the storage engine
implements indexes. The current index flags are documented in
handler.h. If you do not implement indexes, just return zero
here.
part is the key part to check. First key part is 0
If all_parts it's set, MySQL want to know the flags for the combined
index up to and including 'part'.
HA_READ_NEXT:
Does the index support read next, this is assumed in the server
code and never checked so all indexes must support this.
Note that the handler can be used even if it doesn't have any index.
(BDB, HEAP, MyISAM, Federated, NDB, InnoDB)
HA_READ_PREV:
Can the index be used to scan backwards.
(BDB, HEAP, MyISAM, NDB, InnoDB)
HA_READ_ORDER:
Can the index deliver its record in index order. Typically true for
all ordered indexes and not true for hash indexes.
In first step this is not true for partition handler until a merge
sort has been implemented in partition handler.
Used to set keymap part_of_sortkey
This keymap is only used to find indexes usable for resolving an ORDER BY
in the query. Thus in most cases index_read will work just fine without
order in result production. When this flag is set it is however safe to
order all output started by index_read since most engines do this. With
read_multi_range calls there is a specific flag setting order or not
order so in those cases ordering of index output can be avoided.
(BDB, InnoDB, HEAP, MyISAM, NDB)
HA_READ_RANGE:
Specify whether index can handle ranges, typically true for all
ordered indexes and not true for hash indexes.
Used by optimiser to check if ranges (as key >= 5) can be optimised
by index.
(BDB, InnoDB, NDB, MyISAM, HEAP)
HA_ONLY_WHOLE_INDEX:
Can't use part key searches. This is typically true for hash indexes
and typically not true for ordered indexes.
(Federated, NDB, HEAP)
HA_KEYREAD_ONLY:
Does the storage engine support index-only scans on this index.
Enables use of HA_EXTRA_KEYREAD and HA_EXTRA_NO_KEYREAD
Used to set key_map keys_for_keyread and to check in optimiser for
index-only scans. When doing a read under HA_EXTRA_KEYREAD the handler
only have to fill in the columns the key covers. If
HA_PRIMARY_KEY_IN_READ_INDEX is set then also the PRIMARY KEY columns
must be updated in the row.
(BDB, InnoDB, MyISAM)
*/
virtual ulong index_flags(uint inx, uint part, bool all_parts) const
{
return m_file[0]->index_flags(inx, part, all_parts);
}
/*
extensions of table handler files
*/
virtual const char **bas_ext() const;
/*
unireg.cc will call the following to make sure that the storage engine
can handle the data it is about to send.
The maximum supported values is the minimum of all handlers in the table
*/
uint min_of_the_max_uint(uint (handler::*operator_func)(void) const) const;
virtual uint max_supported_record_length() const;
virtual uint max_supported_keys() const;
virtual uint max_supported_key_parts() const;
virtual uint max_supported_key_length() const;
virtual uint max_supported_key_part_length() const;
/*
All handlers in a partitioned table must have the same low_byte_first
*/
virtual bool low_byte_first() const
{ return m_low_byte_first; }
/*
The extra record buffer length is the maximum needed by all handlers.
The minimum record length is the maximum of all involved handlers.
*/
virtual uint extra_rec_buf_length() const;
virtual uint min_record_length(uint options) const;
/*
Transactions on the table is supported if all handlers below support
transactions.
*/
virtual bool has_transactions()
{ return m_has_transactions; }
/*
Primary key is clustered can only be true if all underlying handlers have
this feature.
*/
virtual bool primary_key_is_clustered()
{ return m_pkey_is_clustered; }
/*
-------------------------------------------------------------------------
MODULE compare records
-------------------------------------------------------------------------
cmp_ref checks if two references are the same. For most handlers this is
a simple memcmp of the reference. However some handlers use primary key
as reference and this can be the same even if memcmp says they are
different. This is due to character sets and end spaces and so forth.
For the partition handler the reference is first two bytes providing the
partition identity of the referred record and then the reference of the
underlying handler.
Thus cmp_ref for the partition handler always returns FALSE for records
not in the same partition and uses cmp_ref on the underlying handler
to check whether the rest of the reference part is also the same.
-------------------------------------------------------------------------
*/
virtual int cmp_ref(const byte * ref1, const byte * ref2);
/*
-------------------------------------------------------------------------
MODULE auto increment
-------------------------------------------------------------------------
This module is used to handle the support of auto increments.
This variable in the handler is used as part of the handler interface
It is maintained by the parent handler object and should not be
touched by child handler objects (see handler.cc for its use).
auto_increment_column_changed
-------------------------------------------------------------------------
*/
virtual void restore_auto_increment();
virtual ulonglong get_auto_increment();
/*
-------------------------------------------------------------------------
MODULE initialise handler for HANDLER call
-------------------------------------------------------------------------
This method is a special InnoDB method called before a HANDLER query.
-------------------------------------------------------------------------
*/
virtual void init_table_handle_for_HANDLER();
/*
The remainder of this file defines the handler methods not implemented
by the partition handler
*/
/*
-------------------------------------------------------------------------
MODULE foreign key support
-------------------------------------------------------------------------
The following methods are used to implement foreign keys as supported by
InnoDB. Implement this ??
get_foreign_key_create_info is used by SHOW CREATE TABLE to get a textual
description of how the CREATE TABLE part to define FOREIGN KEY's is done.
free_foreign_key_create_info is used to free the memory area that provided
this description.
-------------------------------------------------------------------------
virtual char* get_foreign_key_create_info()
virtual void free_foreign_key_create_info(char* str)
virtual int get_foreign_key_list(THD *thd,
List<FOREIGN_KEY_INFO> *f_key_list)
virtual uint referenced_by_foreign_key()
*/
/*
-------------------------------------------------------------------------
MODULE fulltext index
-------------------------------------------------------------------------
Fulltext stuff not yet.
-------------------------------------------------------------------------
virtual int ft_init() { return HA_ERR_WRONG_COMMAND; }
virtual FT_INFO *ft_init_ext(uint flags,uint inx,const byte *key,
uint keylen)
{ return NULL; }
virtual int ft_read(byte *buf) { return HA_ERR_WRONG_COMMAND; }
*/
/*
-------------------------------------------------------------------------
MODULE restart full table scan at position (MyISAM)
-------------------------------------------------------------------------
The following method is only used by MyISAM when used as
temporary tables in a join.
virtual int restart_rnd_next(byte *buf, byte *pos);
*/
/*
-------------------------------------------------------------------------
MODULE on-line ALTER TABLE
-------------------------------------------------------------------------
These methods are in the handler interface but never used (yet)
They are to be used by on-line alter table add/drop index:
-------------------------------------------------------------------------
virtual ulong index_ddl_flags(KEY *wanted_index) const
virtual int add_index(TABLE *table_arg,KEY *key_info,uint num_of_keys);
virtual int drop_index(TABLE *table_arg,uint *key_num,uint num_of_keys);
*/
/*
-------------------------------------------------------------------------
MODULE tablespace support
-------------------------------------------------------------------------
Admin of table spaces is not applicable to the partition handler (InnoDB)
This means that the following method is not implemented:
-------------------------------------------------------------------------
virtual int discard_or_import_tablespace(my_bool discard)
*/
/*
-------------------------------------------------------------------------
MODULE admin MyISAM
-------------------------------------------------------------------------
Admin commands not supported currently (almost purely MyISAM routines)
This means that the following methods are not implemented:
-------------------------------------------------------------------------
virtual int check(THD* thd, HA_CHECK_OPT *check_opt);
virtual int backup(TD* thd, HA_CHECK_OPT *check_opt);
virtual int restore(THD* thd, HA_CHECK_OPT *check_opt);
virtual int repair(THD* thd, HA_CHECK_OPT *check_opt);
virtual int optimize(THD* thd, HA_CHECK_OPT *check_opt);
virtual int analyze(THD* thd, HA_CHECK_OPT *check_opt);
virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt);
virtual int preload_keys(THD *thd, HA_CHECK_OPT *check_opt);
virtual bool check_and_repair(THD *thd);
virtual int dump(THD* thd, int fd = -1);
virtual int net_read_dump(NET* net);
virtual uint checksum() const;
virtual bool is_crashed() const;
virtual bool auto_repair() const;
-------------------------------------------------------------------------
MODULE enable/disable indexes
-------------------------------------------------------------------------
Enable/Disable Indexes are not supported currently (Heap, MyISAM)
This means that the following methods are not implemented:
-------------------------------------------------------------------------
virtual int disable_indexes(uint mode);
virtual int enable_indexes(uint mode);
virtual int indexes_are_disabled(void);
*/
/*
-------------------------------------------------------------------------
MODULE append_create_info
-------------------------------------------------------------------------
append_create_info is only used by MyISAM MERGE tables and the partition
handler will not support this handler as underlying handler.
Implement this??
-------------------------------------------------------------------------
virtual void append_create_info(String *packet)
*/
};

View File

@ -34,6 +34,9 @@
#ifdef HAVE_EXAMPLE_DB
#include "examples/ha_example.h"
#endif
#ifdef HAVE_PARTITION_DB
#include "ha_partition.h"
#endif
#ifdef HAVE_ARCHIVE_DB
#include "examples/ha_archive.h"
#endif
@ -170,7 +173,13 @@ enum db_type ha_checktype(THD *thd, enum db_type database_type,
{
if (ha_storage_engine_is_enabled(database_type))
return database_type;
#ifdef HAVE_PARTITION_DB
/*
Partition handler is not in the list of handlers shown since it is an internal handler
*/
if (database_type == DB_TYPE_PARTITION_DB)
return database_type;
#endif
if (no_substitute)
{
if (report_error)
@ -236,6 +245,13 @@ handler *get_new_handler(TABLE *table, enum db_type db_type)
file= new ha_example(table);
break;
#endif
#ifdef HAVE_PARTITION_DB
case DB_TYPE_PARTITION_DB:
{
file= new ha_partition(table);
break;
}
#endif
#ifdef HAVE_ARCHIVE_DB
case DB_TYPE_ARCHIVE_DB:
file= new ha_archive(table);
@ -290,6 +306,29 @@ handler *get_new_handler(TABLE *table, enum db_type db_type)
return file;
}
#ifdef HAVE_PARTITION_DB
handler *get_ha_partition(partition_info *part_info)
{
ha_partition *partition;
DBUG_ENTER("get_ha_partition");
if ((partition= new ha_partition(part_info)))
{
if (partition->ha_initialise())
{
delete partition;
partition= 0;
}
}
else
{
my_error(ER_OUTOFMEMORY, MYF(0), sizeof(ha_partition));
}
DBUG_RETURN(((handler*) partition));
}
#endif
/*
Register handler error messages for use with my_error().
@ -1406,27 +1445,41 @@ int handler::ha_allocate_read_write_set(ulong no_fields)
my_bool r;
#endif
DBUG_ENTER("ha_allocate_read_write_set");
DBUG_PRINT("info", ("no_fields = %d", no_fields));
read_set= (MY_BITMAP*)sql_alloc(sizeof(MY_BITMAP));
write_set= (MY_BITMAP*)sql_alloc(sizeof(MY_BITMAP));
read_buf= (uint32*)sql_alloc(bitmap_size);
write_buf= (uint32*)sql_alloc(bitmap_size);
if (!read_set || !write_set || !read_buf || !write_buf)
DBUG_PRINT("enter", ("no_fields = %d", no_fields));
if (table)
{
ha_deallocate_read_write_set();
DBUG_RETURN(TRUE);
if (table->read_set == NULL)
{
read_set= (MY_BITMAP*)sql_alloc(sizeof(MY_BITMAP));
write_set= (MY_BITMAP*)sql_alloc(sizeof(MY_BITMAP));
read_buf= (uint32*)sql_alloc(bitmap_size);
write_buf= (uint32*)sql_alloc(bitmap_size);
if (!read_set || !write_set || !read_buf || !write_buf)
{
ha_deallocate_read_write_set();
DBUG_RETURN(TRUE);
}
#ifndef DEBUG_OFF
r =
#endif
bitmap_init(read_set, read_buf, no_fields+1, FALSE);
DBUG_ASSERT(!r /*bitmap_init(read_set...)*/);
#ifndef DEBUG_OFF
r =
#endif
bitmap_init(write_set, write_buf, no_fields+1, FALSE);
DBUG_ASSERT(!r /*bitmap_init(write_set...)*/);
table->read_set= read_set;
table->write_set= write_set;
ha_clear_all_set();
}
else
{
read_set= table->read_set;
write_set= table->write_set;
}
}
#ifndef DEBUG_OFF
r =
#endif
bitmap_init(read_set, read_buf, no_fields+1, FALSE);
DBUG_ASSERT(!r /*bitmap_init(read_set...)*/);
#ifndef DEBUG_OFF
r =
#endif
bitmap_init(write_set, write_buf, no_fields+1, FALSE);
DBUG_ASSERT(!r /*bitmap_init(write_set...)*/);
ha_clear_all_set();
DBUG_RETURN(FALSE);
}
@ -1476,6 +1529,8 @@ void handler::ha_set_primary_key_in_read_set()
}
DBUG_VOID_RETURN;
}
/*
Read first row (only) from a table
This is never called for InnoDB or BDB tables, as these table types
@ -1504,7 +1559,7 @@ int handler::read_first_row(byte * buf, uint primary_key)
else
{
/* Find the first row through the primary key */
(void) ha_index_init(primary_key);
(void) ha_index_init(primary_key, 0);
error=index_first(buf);
(void) ha_index_end();
}
@ -1688,7 +1743,7 @@ ulonglong handler::get_auto_increment()
int error;
(void) extra(HA_EXTRA_KEYREAD);
index_init(table->s->next_number_index);
index_init(table->s->next_number_index, 1);
if (!table->s->next_number_key_offset)
{ // Autoincrement at key-start
error=index_last(table->record[1]);
@ -2512,7 +2567,7 @@ int handler::compare_key(key_range *range)
int handler::index_read_idx(byte * buf, uint index, const byte * key,
uint key_len, enum ha_rkey_function find_flag)
{
int error= ha_index_init(index);
int error= ha_index_init(index, 0);
if (!error)
error= index_read(buf, key, key_len, find_flag);
if (!error)

View File

@ -89,6 +89,11 @@
#define HA_NEED_READ_RANGE_BUFFER (1 << 29) /* for read_multi_range */
#define HA_ANY_INDEX_MAY_BE_UNIQUE (1 << 30)
/* Flags for partition handlers */
#define HA_CAN_PARTITION (1 << 0) /* Partition support */
#define HA_CAN_UPDATE_PARTITION_KEY (1 << 1)
#define HA_CAN_PARTITION_UNIQUE (1 << 2)
/* bits in index_flags(index_number) for what you can do with index */
#define HA_READ_NEXT 1 /* TODO really use this flag */
@ -172,6 +177,7 @@ enum db_type
DB_TYPE_EXAMPLE_DB, DB_TYPE_ARCHIVE_DB, DB_TYPE_CSV_DB,
DB_TYPE_FEDERATED_DB,
DB_TYPE_BLACKHOLE_DB,
DB_TYPE_PARTITION_DB,
DB_TYPE_DEFAULT // Must be last
};
@ -364,6 +370,208 @@ typedef struct st_thd_trans
enum enum_tx_isolation { ISO_READ_UNCOMMITTED, ISO_READ_COMMITTED,
ISO_REPEATABLE_READ, ISO_SERIALIZABLE};
typedef struct {
uint32 start_part;
uint32 end_part;
bool use_bit_array;
} part_id_range;
/**
* An enum and a struct to handle partitioning and subpartitioning.
*/
enum partition_type {
NOT_A_PARTITION= 0,
RANGE_PARTITION,
HASH_PARTITION,
LIST_PARTITION
};
#define UNDEF_NODEGROUP 65535
class Item;
class partition_element :public Sql_alloc {
public:
List<partition_element> subpartitions;
List<Item> list_expr_list;
ulonglong part_max_rows;
ulonglong part_min_rows;
char *partition_name;
char *tablespace_name;
Item* range_expr;
char* part_comment;
char* data_file_name;
char* index_file_name;
enum db_type engine_type;
uint16 nodegroup_id;
partition_element()
: part_max_rows(0), part_min_rows(0), partition_name(NULL),
tablespace_name(NULL), range_expr(NULL), part_comment(NULL),
data_file_name(NULL), index_file_name(NULL),
engine_type(DB_TYPE_UNKNOWN), nodegroup_id(UNDEF_NODEGROUP)
{
subpartitions.empty();
list_expr_list.empty();
}
~partition_element() {}
};
typedef struct {
longlong list_value;
uint partition_id;
} LIST_PART_ENTRY;
enum Item_result;
class partition_info;
typedef bool (*get_part_id_func)(partition_info *part_info,
uint32 *part_id);
typedef uint32 (*get_subpart_id_func)(partition_info *part_info);
class partition_info :public Sql_alloc {
public:
/*
* Here comes a set of definitions needed for partitioned table handlers.
*/
List<partition_element> partitions;
List<char> part_field_list;
List<char> subpart_field_list;
get_part_id_func get_partition_id;
get_part_id_func get_part_partition_id;
get_subpart_id_func get_subpartition_id;
Field **part_field_array;
Field **subpart_field_array;
Field **full_part_field_array;
Item *part_expr;
Item *subpart_expr;
Item *item_free_list;
union {
longlong *range_int_array;
LIST_PART_ENTRY *list_array;
};
char* part_info_string;
char *part_func_string;
char *subpart_func_string;
partition_element *curr_part_elem;
partition_element *current_partition;
/*
These key_map's are used for Partitioning to enable quick decisions
on whether we can derive more information about which partition to
scan just by looking at what index is used.
*/
key_map all_fields_in_PF, all_fields_in_PPF, all_fields_in_SPF;
key_map some_fields_in_PF;
enum db_type default_engine_type;
Item_result part_result_type;
partition_type part_type;
partition_type subpart_type;
uint part_info_len;
uint part_func_len;
uint subpart_func_len;
uint no_full_parts;
uint no_parts;
uint no_subparts;
uint count_curr_parts;
uint count_curr_subparts;
uint part_error_code;
uint no_list_values;
uint no_part_fields;
uint no_subpart_fields;
uint no_full_part_fields;
uint16 linear_hash_mask;
bool use_default_partitions;
bool use_default_subpartitions;
bool defined_max_value;
bool list_of_part_fields;
bool list_of_subpart_fields;
bool linear_hash_ind;
partition_info()
: get_partition_id(NULL), get_part_partition_id(NULL),
get_subpartition_id(NULL),
part_field_array(NULL), subpart_field_array(NULL),
full_part_field_array(NULL),
part_expr(NULL), subpart_expr(NULL), item_free_list(NULL),
list_array(NULL),
part_info_string(NULL),
part_func_string(NULL), subpart_func_string(NULL),
curr_part_elem(NULL), current_partition(NULL),
default_engine_type(DB_TYPE_UNKNOWN),
part_result_type(INT_RESULT),
part_type(NOT_A_PARTITION), subpart_type(NOT_A_PARTITION),
part_info_len(0), part_func_len(0), subpart_func_len(0),
no_full_parts(0), no_parts(0), no_subparts(0),
count_curr_parts(0), count_curr_subparts(0), part_error_code(0),
no_list_values(0), no_part_fields(0), no_subpart_fields(0),
no_full_part_fields(0), linear_hash_mask(0),
use_default_partitions(TRUE),
use_default_subpartitions(TRUE), defined_max_value(FALSE),
list_of_part_fields(FALSE), list_of_subpart_fields(FALSE),
linear_hash_ind(FALSE)
{
all_fields_in_PF.clear_all();
all_fields_in_PPF.clear_all();
all_fields_in_SPF.clear_all();
some_fields_in_PF.clear_all();
partitions.empty();
part_field_list.empty();
subpart_field_list.empty();
}
~partition_info() {}
};
#ifdef HAVE_PARTITION_DB
/*
Answers the question if subpartitioning is used for a certain table
SYNOPSIS
is_sub_partitioned()
part_info A reference to the partition_info struct
RETURN VALUE
Returns true if subpartitioning used and false otherwise
DESCRIPTION
A routine to check for subpartitioning for improved readability of code
*/
inline
bool is_sub_partitioned(partition_info *part_info)
{ return (part_info->subpart_type == NOT_A_PARTITION ? FALSE : TRUE); }
/*
Returns the total number of partitions on the leaf level.
SYNOPSIS
get_tot_partitions()
part_info A reference to the partition_info struct
RETURN VALUE
Returns the number of partitions
DESCRIPTION
A routine to check for number of partitions for improved readability
of code
*/
inline
uint get_tot_partitions(partition_info *part_info)
{
return part_info->no_parts *
(is_sub_partitioned(part_info) ? part_info->no_subparts : 1);
}
#endif
typedef struct st_ha_create_information
{
CHARSET_INFO *table_charset, *default_table_charset;
@ -412,6 +620,31 @@ typedef struct st_ha_check_opt
} HA_CHECK_OPT;
#ifdef HAVE_PARTITION_DB
handler *get_ha_partition(partition_info *part_info);
int get_parts_for_update(const byte *old_data, byte *new_data,
const byte *rec0, partition_info *part_info,
uint32 *old_part_id, uint32 *new_part_id);
int get_part_for_delete(const byte *buf, const byte *rec0,
partition_info *part_info, uint32 *part_id);
bool check_partition_info(partition_info *part_info,enum db_type eng_type,
handler *file, ulonglong max_rows);
bool fix_partition_func(THD *thd, const char *name, TABLE *table);
char *generate_partition_syntax(partition_info *part_info,
uint *buf_length, bool use_sql_alloc);
bool partition_key_modified(TABLE *table, List<Item> &fields);
void get_partition_set(const TABLE *table, byte *buf, const uint index,
const key_range *key_spec,
part_id_range *part_spec);
void get_full_part_id_from_key(const TABLE *table, byte *buf,
KEY *key_info,
const key_range *key_spec,
part_id_range *part_spec);
bool mysql_unpack_partition(File file, THD *thd, uint part_info_len,
TABLE *table);
#endif
/*
This is a buffer area that the handler can use to store rows.
'end_of_used_area' should be kept updated after calls to
@ -429,10 +662,13 @@ typedef struct st_handler_buffer
class handler :public Sql_alloc
{
#ifdef HAVE_PARTITION_DB
friend class ha_partition;
#endif
protected:
struct st_table *table; /* The table definition */
virtual int index_init(uint idx) { active_index=idx; return 0; }
virtual int index_init(uint idx, bool sorted) { active_index=idx; return 0; }
virtual int index_end() { active_index=MAX_KEY; return 0; }
/*
rnd_init() can be called two times without rnd_end() in between
@ -518,7 +754,7 @@ public:
{ return rows2double(ranges+rows); }
virtual const key_map *keys_to_use_for_scanning() { return &key_map_empty; }
virtual bool has_transactions(){ return 0;}
virtual uint extra_rec_buf_length() { return 0; }
virtual uint extra_rec_buf_length() const { return 0; }
/*
Return upper bound of current number of records in the table
@ -537,12 +773,12 @@ public:
virtual const char *index_type(uint key_number) { DBUG_ASSERT(0); return "";}
int ha_index_init(uint idx)
int ha_index_init(uint idx, bool sorted)
{
DBUG_ENTER("ha_index_init");
DBUG_ASSERT(inited==NONE);
inited=INDEX;
DBUG_RETURN(index_init(idx));
DBUG_RETURN(index_init(idx, sorted));
}
int ha_index_end()
{
@ -902,6 +1138,10 @@ public:
virtual const char *table_type() const =0;
virtual const char **bas_ext() const =0;
virtual ulong table_flags(void) const =0;
#ifdef HAVE_PARTITION_DB
virtual ulong partition_flags(void) const { return 0;}
virtual int get_default_no_partitions(ulonglong max_rows) { return 1;}
#endif
virtual ulong index_flags(uint idx, uint part, bool all_parts) const =0;
virtual ulong index_ddl_flags(KEY *wanted_index) const
{ return (HA_DDL_SUPPORT); }
@ -941,6 +1181,7 @@ public:
virtual int delete_table(const char *name);
virtual int create(const char *name, TABLE *form, HA_CREATE_INFO *info)=0;
virtual int create_handler_files(const char *name) { return FALSE;}
/* lock_count() can be more than one if the table is a MERGE */
virtual uint lock_count(void) const { return 1; }

View File

@ -1492,7 +1492,7 @@ int subselect_uniquesubquery_engine::exec()
}
if (!table->file->inited)
table->file->ha_index_init(tab->ref.key);
table->file->ha_index_init(tab->ref.key, 0);
error= table->file->index_read(table->record[0],
tab->ref.key_buff,
tab->ref.key_length,HA_READ_KEY_EXACT);
@ -1545,7 +1545,7 @@ int subselect_indexsubquery_engine::exec()
}
if (!table->file->inited)
table->file->ha_index_init(tab->ref.key);
table->file->ha_index_init(tab->ref.key, 1);
error= table->file->index_read(table->record[0],
tab->ref.key_buff,
tab->ref.key_length,HA_READ_KEY_EXACT);

View File

@ -429,3 +429,86 @@ int key_cmp(KEY_PART_INFO *key_part, const byte *key, uint key_length)
}
return 0; // Keys are equal
}
/*
Compare two records in index order
SYNOPSIS
key_rec_cmp()
key Index information
rec0 Pointer to table->record[0]
first_rec Pointer to record compare with
second_rec Pointer to record compare against first_rec
DESCRIPTION
This method is set-up such that it can be called directly from the
priority queue and it is attempted to be optimised as much as possible
since this will be called O(N * log N) times while performing a merge
sort in various places in the code.
We retrieve the pointer to table->record[0] using the fact that key_parts
have an offset making it possible to calculate the start of the record.
We need to get the diff to the compared record since none of the records
being compared are stored in table->record[0].
We first check for NULL values, if there are no NULL values we use
a compare method that gets two field pointers and a max length
and return the result of the comparison.
*/
int key_rec_cmp(void *key, byte *first_rec, byte *second_rec)
{
KEY *key_info= (KEY*)key;
uint key_parts= key_info->key_parts, i= 0;
KEY_PART_INFO *key_part= key_info->key_part;
char *rec0= key_part->field->ptr - key_part->offset;
my_ptrdiff_t first_diff= first_rec - rec0, sec_diff= second_rec - rec0;
int result= 0;
DBUG_ENTER("key_rec_cmp");
do
{
Field *field= key_part->field;
uint length;
if (key_part->null_bit)
{
/* The key_part can contain NULL values */
bool first_is_null= field->is_null(first_diff);
bool sec_is_null= field->is_null(sec_diff);
/*
NULL is smaller then everything so if first is NULL and the other
not then we know that we should return -1 and for the opposite
we should return +1. If both are NULL then we call it equality
although it is a strange form of equality, we have equally little
information of the real value.
*/
if (!first_is_null)
{
if (!sec_is_null)
; /* Fall through, no NULL fields */
else
{
DBUG_RETURN(+1);
}
}
else if (!sec_is_null)
{
DBUG_RETURN(-1);
}
else
goto next_loop; /* Both were NULL */
}
/*
No null values in the fields
We use the virtual method cmp_max with a max length parameter.
For most field types this translates into a cmp without
max length. The exceptions are the BLOB and VARCHAR field types
that take the max length into account.
*/
result= field->cmp_max(field->ptr+first_diff, field->ptr+sec_diff,
key_part->length);
next_loop:
key_part++;
} while (!result && ++i < key_parts);
DBUG_RETURN(result);
}

View File

@ -274,11 +274,14 @@ static SYMBOL symbols[] = {
{ "LEAVE", SYM(LEAVE_SYM)},
{ "LEAVES", SYM(LEAVES)},
{ "LEFT", SYM(LEFT)},
{ "LESS", SYM(LESS_SYM)},
{ "LEVEL", SYM(LEVEL_SYM)},
{ "LIKE", SYM(LIKE)},
{ "LIMIT", SYM(LIMIT)},
{ "LINEAR", SYM(LINEAR_SYM)},
{ "LINES", SYM(LINES)},
{ "LINESTRING", SYM(LINESTRING)},
{ "LIST", SYM(LIST_SYM)},
{ "LOAD", SYM(LOAD)},
{ "LOCAL", SYM(LOCAL_SYM)},
{ "LOCALTIME", SYM(NOW_SYM)},
@ -312,6 +315,7 @@ static SYMBOL symbols[] = {
{ "MAX_ROWS", SYM(MAX_ROWS)},
{ "MAX_UPDATES_PER_HOUR", SYM(MAX_UPDATES_PER_HOUR)},
{ "MAX_USER_CONNECTIONS", SYM(MAX_USER_CONNECTIONS_SYM)},
{ "MAXVALUE", SYM(MAX_VALUE_SYM)},
{ "MEDIUM", SYM(MEDIUM_SYM)},
{ "MEDIUMBLOB", SYM(MEDIUMBLOB)},
{ "MEDIUMINT", SYM(MEDIUMINT)},
@ -343,6 +347,7 @@ static SYMBOL symbols[] = {
{ "NEW", SYM(NEW_SYM)},
{ "NEXT", SYM(NEXT_SYM)},
{ "NO", SYM(NO_SYM)},
{ "NODEGROUP", SYM(NODEGROUP_SYM)},
{ "NONE", SYM(NONE_SYM)},
{ "NOT", SYM(NOT_SYM)},
{ "NO_WRITE_TO_BINLOG", SYM(NO_WRITE_TO_BINLOG)},
@ -365,6 +370,10 @@ static SYMBOL symbols[] = {
{ "OUTFILE", SYM(OUTFILE)},
{ "PACK_KEYS", SYM(PACK_KEYS_SYM)},
{ "PARTIAL", SYM(PARTIAL)},
#ifdef HAVE_PARTITION_DB
{ "PARTITION", SYM(PARTITION_SYM)},
#endif
{ "PARTITIONS", SYM(PARTITIONS_SYM)},
{ "PASSWORD", SYM(PASSWORD)},
{ "PHASE", SYM(PHASE_SYM)},
{ "POINT", SYM(POINT_SYM)},
@ -385,6 +394,7 @@ static SYMBOL symbols[] = {
{ "RAID_CHUNKS", SYM(RAID_CHUNKS)},
{ "RAID_CHUNKSIZE", SYM(RAID_CHUNKSIZE)},
{ "RAID_TYPE", SYM(RAID_TYPE)},
{ "RANGE", SYM(RANGE_SYM)},
{ "READ", SYM(READ_SYM)},
{ "READS", SYM(READS_SYM)},
{ "REAL", SYM(REAL)},
@ -476,6 +486,8 @@ static SYMBOL symbols[] = {
{ "STRING", SYM(STRING_SYM)},
{ "STRIPED", SYM(RAID_STRIPED_SYM)},
{ "SUBJECT", SYM(SUBJECT_SYM)},
{ "SUBPARTITION", SYM(SUBPARTITION_SYM)},
{ "SUBPARTITIONS", SYM(SUBPARTITIONS_SYM)},
{ "SUPER", SYM(SUPER_SYM)},
{ "SUSPEND", SYM(SUSPEND_SYM)},
{ "TABLE", SYM(TABLE_SYM)},
@ -485,6 +497,7 @@ static SYMBOL symbols[] = {
{ "TEMPTABLE", SYM(TEMPTABLE_SYM)},
{ "TERMINATED", SYM(TERMINATED)},
{ "TEXT", SYM(TEXT_SYM)},
{ "THAN", SYM(THAN_SYM)},
{ "THEN", SYM(THEN_SYM)},
{ "TIME", SYM(TIME_SYM)},
{ "TIMESTAMP", SYM(TIMESTAMP)},

View File

@ -614,6 +614,18 @@ bool check_table_access(THD *thd, ulong want_access, TABLE_LIST *tables,
bool no_errors);
bool check_global_access(THD *thd, ulong want_access);
/*
General routine to change field->ptr of a NULL-terminated array of Field
objects. Useful when needed to call val_int, val_str or similar and the
field data is not in table->record[0] but in some other structure.
set_key_field_ptr changes all fields of an index using a key_info object.
All methods presume that there is at least one field to change.
*/
void set_field_ptr(Field **ptr, const byte *new_buf, const byte *old_buf);
void set_key_field_ptr(KEY *key_info, const byte *new_buf,
const byte *old_buf);
bool mysql_backup_table(THD* thd, TABLE_LIST* table_list);
bool mysql_restore_table(THD* thd, TABLE_LIST* table_list);
@ -772,6 +784,9 @@ Field *
find_field_in_real_table(THD *thd, TABLE *table, const char *name,
uint length, bool check_grants, bool allow_rowid,
uint *cached_field_index_ptr);
Field *
find_field_in_table_sef(TABLE *table, const char *name);
#ifdef HAVE_OPENSSL
#include <openssl/des.h>
struct st_des_keyblock
@ -1020,6 +1035,7 @@ bool key_cmp_if_same(TABLE *form,const byte *key,uint index,uint key_length);
void key_unpack(String *to,TABLE *form,uint index);
bool check_if_key_used(TABLE *table, uint idx, List<Item> &fields);
int key_cmp(KEY_PART_INFO *key_part, const byte *key, uint key_length);
int key_rec_cmp(void *key_info, byte *a, byte *b);
bool init_errmessage(void);
void sql_perror(const char *message);
@ -1188,6 +1204,7 @@ extern SHOW_COMP_OPTION have_query_cache;
extern SHOW_COMP_OPTION have_geometry, have_rtree_keys;
extern SHOW_COMP_OPTION have_crypt;
extern SHOW_COMP_OPTION have_compress;
extern SHOW_COMP_OPTION have_partition_db;
#ifndef __WIN__
extern pthread_t signal_thread;
@ -1238,7 +1255,7 @@ bool mysql_create_frm(THD *thd, my_string file_name,
uint key_count,KEY *key_info,handler *db_type);
int rea_create_table(THD *thd, my_string file_name,HA_CREATE_INFO *create_info,
List<create_field> &create_field,
uint key_count,KEY *key_info);
uint key_count,KEY *key_info, handler *file);
int format_number(uint inputflag,uint max_length,my_string pos,uint length,
my_string *errpos);
int openfrm(THD *thd, const char *name,const char *alias,uint filestat,

View File

@ -325,6 +325,7 @@ my_bool opt_ndb_shm, opt_ndb_optimized_node_selection;
ulong opt_ndb_cache_check_time;
const char *opt_ndb_mgmd;
ulong opt_ndb_nodeid;
bool opt_ndb_linear_hash;
#endif
my_bool opt_readonly, use_temp_pool, relay_log_purge;
my_bool opt_sync_frm, opt_allow_suspicious_udfs;
@ -430,6 +431,7 @@ CHARSET_INFO *national_charset_info, *table_alias_charset;
SHOW_COMP_OPTION have_berkeley_db, have_innodb, have_isam, have_ndbcluster,
have_example_db, have_archive_db, have_csv_db;
SHOW_COMP_OPTION have_federated_db;
SHOW_COMP_OPTION have_partition_db;
SHOW_COMP_OPTION have_raid, have_openssl, have_symlink, have_query_cache;
SHOW_COMP_OPTION have_geometry, have_rtree_keys;
SHOW_COMP_OPTION have_crypt, have_compress;
@ -4235,6 +4237,7 @@ enum options_mysqld
OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ,
OPT_NDB_SHM, OPT_NDB_OPTIMIZED_NODE_SELECTION, OPT_NDB_CACHE_CHECK_TIME,
OPT_NDB_MGMD, OPT_NDB_NODEID,
OPT_NDB_LINEAR_HASH,
OPT_SKIP_SAFEMALLOC,
OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_COMPLETION_TYPE,
OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS,
@ -4800,6 +4803,16 @@ Disable with --skip-ndbcluster (will save memory).",
(gptr*) &global_system_variables.ndb_autoincrement_prefetch_sz,
(gptr*) &global_system_variables.ndb_autoincrement_prefetch_sz,
0, GET_ULONG, REQUIRED_ARG, 32, 1, 256, 0, 0, 0},
{"ndb-use-linear-hash", OPT_NDB_LINEAR_HASH,
"Flag to indicate whether to use linear hash for default in new tables",
(gptr*) &opt_ndb_linear_hash,
(gptr*) &opt_ndb_linear_hash,
0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
{"ndb_use_linear_hash", OPT_NDB_LINEAR_HASH,
"Flag to indicate whether to use linear hash for default in new tables",
(gptr*) &opt_ndb_linear_hash,
(gptr*) &opt_ndb_linear_hash,
0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
{"ndb-force-send", OPT_NDB_FORCE_SEND,
"Force send of buffers to ndb immediately without waiting for "
"other threads.",
@ -6055,6 +6068,11 @@ static void mysql_init_variables(void)
#else
have_example_db= SHOW_OPTION_NO;
#endif
#ifdef HAVE_PARTITION_DB
have_partition_db= SHOW_OPTION_YES;
#else
have_partition_db= SHOW_OPTION_NO;
#endif
#ifdef HAVE_ARCHIVE_DB
have_archive_db= SHOW_OPTION_YES;
#else

View File

@ -751,7 +751,7 @@ int QUICK_RANGE_SELECT::init()
DBUG_ENTER("QUICK_RANGE_SELECT::init");
if (file->inited == handler::NONE)
DBUG_RETURN(error= file->ha_index_init(index));
DBUG_RETURN(error= file->ha_index_init(index, 1));
error= 0;
DBUG_RETURN(0);
}
@ -6049,7 +6049,7 @@ int QUICK_RANGE_SELECT::reset()
range= NULL;
cur_range= (QUICK_RANGE**) ranges.buffer;
if (file->inited == handler::NONE && (error= file->ha_index_init(index)))
if (file->inited == handler::NONE && (error= file->ha_index_init(index,1)))
DBUG_RETURN(error);
/* Do not allocate the buffers twice. */
@ -6308,7 +6308,7 @@ int QUICK_RANGE_SELECT_GEOM::get_next()
(byte*) range->min_key,
range->min_length,
(ha_rkey_function)(range->flag ^ GEOM_FLAG));
if (result != HA_ERR_KEY_NOT_FOUND)
if (result != HA_ERR_KEY_NOT_FOUND && result != HA_ERR_END_OF_FILE)
DBUG_RETURN(result);
range=0; // Not found, to next range
}
@ -6451,7 +6451,7 @@ int QUICK_SELECT_DESC::get_next()
}
if (result)
{
if (result != HA_ERR_KEY_NOT_FOUND)
if (result != HA_ERR_KEY_NOT_FOUND && result != HA_ERR_END_OF_FILE)
DBUG_RETURN(result);
range=0; // Not found, to next range
continue;
@ -8083,7 +8083,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::reset(void)
DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::reset");
file->extra(HA_EXTRA_KEYREAD); /* We need only the key attributes */
result= file->ha_index_init(index);
result= file->ha_index_init(index, 1);
result= file->index_last(record);
if (result == HA_ERR_END_OF_FILE)
DBUG_RETURN(0);
@ -8159,7 +8159,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next()
DBUG_ASSERT(is_last_prefix <= 0);
if (result == HA_ERR_KEY_NOT_FOUND)
continue;
else if (result)
if (result)
break;
if (have_min)
@ -8189,10 +8189,11 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next()
HA_READ_KEY_EXACT);
result= have_min ? min_res : have_max ? max_res : result;
}
while (result == HA_ERR_KEY_NOT_FOUND && is_last_prefix != 0);
} while ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) &&
is_last_prefix != 0);
if (result == 0)
{
/*
Partially mimic the behavior of end_select_send. Copy the
field data from Item_field::field into Item_field::result_field
@ -8200,6 +8201,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next()
other fields in non-ANSI SQL mode).
*/
copy_fields(&join->tmp_table_param);
}
else if (result == HA_ERR_KEY_NOT_FOUND)
result= HA_ERR_END_OF_FILE;
@ -8226,6 +8228,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next()
RETURN
0 on success
HA_ERR_KEY_NOT_FOUND if no MIN key was found that fulfills all conditions.
HA_ERR_END_OF_FILE - "" -
other if some error occurred
*/
@ -8279,7 +8282,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min()
if (key_cmp(index_info->key_part, group_prefix, real_prefix_len))
key_restore(record, tmp_record, index_info, 0);
}
else if (result == HA_ERR_KEY_NOT_FOUND)
else if (result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE)
result= 0; /* There is a result in any case. */
}
}
@ -8304,6 +8307,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min()
RETURN
0 on success
HA_ERR_KEY_NOT_FOUND if no MAX key was found that fulfills all conditions.
HA_ERR_END_OF_FILE - "" -
other if some error occurred
*/
@ -8404,6 +8408,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_prefix()
0 on success
HA_ERR_KEY_NOT_FOUND if there is no key with the given prefix in any of
the ranges
HA_ERR_END_OF_FILE - "" -
other if some error
*/
@ -8448,11 +8453,12 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range()
result= file->index_read(record, group_prefix, search_prefix_len,
find_flag);
if ((result == HA_ERR_KEY_NOT_FOUND) &&
(cur_range->flag & (EQ_RANGE | NULL_RANGE)))
continue; /* Check the next range. */
else if (result)
if (result)
{
if ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) &&
(cur_range->flag & (EQ_RANGE | NULL_RANGE)))
continue; /* Check the next range. */
/*
In all other cases (HA_ERR_*, HA_READ_KEY_EXACT with NO_MIN_RANGE,
HA_READ_AFTER_KEY, HA_READ_KEY_OR_NEXT) if the lookup failed for this
@ -8479,7 +8485,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range()
/* Check if record belongs to the current group. */
if (key_cmp(index_info->key_part, group_prefix, real_prefix_len))
{
result = HA_ERR_KEY_NOT_FOUND;
result= HA_ERR_KEY_NOT_FOUND;
continue;
}
@ -8497,7 +8503,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range()
if (!((cur_range->flag & NEAR_MAX) && (cmp_res == -1) ||
(cmp_res <= 0)))
{
result = HA_ERR_KEY_NOT_FOUND;
result= HA_ERR_KEY_NOT_FOUND;
continue;
}
}
@ -8536,6 +8542,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range()
0 on success
HA_ERR_KEY_NOT_FOUND if there is no key with the given prefix in any of
the ranges
HA_ERR_END_OF_FILE - "" -
other if some error
*/
@ -8581,10 +8588,12 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_max_in_range()
result= file->index_read(record, group_prefix, search_prefix_len,
find_flag);
if ((result == HA_ERR_KEY_NOT_FOUND) && (cur_range->flag & EQ_RANGE))
continue; /* Check the next range. */
if (result)
{
if ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) &&
(cur_range->flag & EQ_RANGE))
continue; /* Check the next range. */
/*
In no key was found with this upper bound, there certainly are no keys
in the ranges to the left.

View File

@ -181,7 +181,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
const_result= 0;
break;
}
error= table->file->ha_index_init((uint) ref.key);
error= table->file->ha_index_init((uint) ref.key, 1);
if (!ref.key_length)
error= table->file->index_first(table->record[0]);
@ -253,7 +253,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
const_result= 0;
break;
}
error= table->file->ha_index_init((uint) ref.key);
error= table->file->ha_index_init((uint) ref.key, 1);
if (!ref.key_length)
error= table->file->index_last(table->record[0]);

View File

@ -31,6 +31,74 @@ static int rr_cmp(uchar *a,uchar *b);
/* init struct for read with info->read_record */
/*
init_read_record is used to scan by using a number of different methods.
Which method to use is set-up in this call so that later calls to
the info->read_record will call the appropriate method using a function
pointer.
There are five methods that relate completely to the sort function
filesort. The result of a filesort is retrieved using read_record
calls. The other two methods are used for normal table access.
The filesort will produce references to the records sorted, these
references can be stored in memory or in a temporary file.
The temporary file is normally used when the references doesn't fit into
a properly sized memory buffer. For most small queries the references
are stored in the memory buffer.
The temporary file is also used when performing an update where a key is
modified.
Methods used when ref's are in memory (using rr_from_pointers):
rr_unpack_from_buffer:
----------------------
This method is used when table->sort.addon_field is allocated.
This is allocated for most SELECT queries not involving any BLOB's.
In this case the records are fetched from a memory buffer.
rr_from_pointers:
-----------------
Used when the above is not true, UPDATE, DELETE and so forth and
SELECT's involving BLOB's. It is also used when the addon_field
buffer is not allocated due to that its size was bigger than the
session variable max_length_for_sort_data.
In this case the record data is fetched from the handler using the
saved reference using the rnd_pos handler call.
Methods used when ref's are in a temporary file (using rr_from_tempfile)
rr_unpack_from_tempfile:
------------------------
Same as rr_unpack_from_buffer except that references are fetched from
temporary file. Should obviously not really happen other than in
strange configurations.
rr_from_tempfile:
-----------------
Same as rr_from_pointers except that references are fetched from
temporary file instead of from
rr_from_cache:
--------------
This is a special variant of rr_from_tempfile that can be used for
handlers that is not using the HA_FAST_KEY_READ table flag. Instead
of reading the references one by one from the temporary file it reads
a set of them, sorts them and reads all of them into a buffer which
is then used for a number of subsequent calls to rr_from_cache.
It is only used for SELECT queries and a number of other conditions
on table size.
All other accesses use either index access methods (rr_quick) or a full
table scan (rr_sequential).
rr_quick:
---------
rr_quick uses one of the QUICK_SELECT classes in opt_range.cc to
perform an index scan. There are loads of functionality hidden
in these quick classes. It handles all index scans of various kinds.
rr_sequential:
--------------
This is the most basic access method of a table using rnd_init,
rnd_next and rnd_end. No indexes are used.
*/
void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
SQL_SELECT *select,
int use_record_cache, bool print_error)

View File

@ -786,6 +786,7 @@ struct show_var_st init_vars[]= {
{"have_isam", (char*) &have_isam, SHOW_HAVE},
{"have_ndbcluster", (char*) &have_ndbcluster, SHOW_HAVE},
{"have_openssl", (char*) &have_openssl, SHOW_HAVE},
{"have_partition_engine", (char*) &have_partition_db, SHOW_HAVE},
{"have_query_cache", (char*) &have_query_cache, SHOW_HAVE},
{"have_raid", (char*) &have_raid, SHOW_HAVE},
{"have_rtree_keys", (char*) &have_rtree_keys, SHOW_HAVE},

View File

@ -5370,3 +5370,81 @@ ER_SCALE_BIGGER_THAN_PRECISION 42000 S1009
eng "Scale may not be larger than the precision (column '%-.64s')."
ER_WRONG_LOCK_OF_SYSTEM_TABLE
eng "You can't combine write-locking of system '%-.64s.%-.64s' table with other tables"
ER_PARTITION_REQUIRES_VALUES_ERROR
eng "%s PARTITIONING requires definition of VALUES %s for each partition"
swe "%s PARTITIONering kräver definition av VALUES %s för varje partition"
ER_PARTITION_WRONG_VALUES_ERROR
eng "Only %s PARTITIONING can use VALUES %s in partition definition"
swe "Endast %s partitionering kan använda VALUES %s i definition av partitionen"
ER_PARTITION_MAXVALUE_ERROR
eng "MAXVALUE can only be used in last partition definition"
swe "MAXVALUE kan bara användas i definitionen av den sista partitionen"
ER_PARTITION_SUBPARTITION_ERROR
eng "Subpartitions can only be hash partitions and by key"
swe "Subpartitioner kan bara vara hash och key partitioner"
ER_PARTITION_WRONG_NO_PART_ERROR
eng "Wrong number of partitions defined, mismatch with previous setting"
swe "Antal partitioner definierade och antal partitioner är inte lika"
ER_PARTITION_WRONG_NO_SUBPART_ERROR
eng "Wrong number of subpartitions defined, mismatch with previous setting"
swe "Antal subpartitioner definierade och antal subpartitioner är inte lika"
ER_CONST_EXPR_IN_PARTITION_FUNC_ERROR
eng "Constant/Random expression in (sub)partitioning function is not allowed"
swe "Konstanta uttryck eller slumpmässiga uttryck är inte tillåtna (sub)partitioneringsfunktioner"
ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR
eng "Expression in RANGE/LIST VALUES must be constant"
swe "Uttryck i RANGE/LIST VALUES måste vara ett konstant uttryck"
ER_FIELD_NOT_FOUND_PART_ERROR
eng "Field in list of fields for partition function not found in table"
swe "Fält i listan av fält för partitionering med key inte funnen i tabellen"
ER_LIST_OF_FIELDS_ONLY_IN_HASH_ERROR
eng "List of fields is only allowed in KEY partitions"
swe "En lista av fält är endast tillåtet för KEY partitioner"
ER_INCONSISTENT_PARTITION_INFO_ERROR
eng "The partition info in the frm file is not consistent with what can be written into the frm file"
swe "Partitioneringsinformationen i frm-filen är inte konsistent med vad som kan skrivas i frm-filen"
ER_PARTITION_FUNC_NOT_ALLOWED_ERROR
eng "The %s function returns the wrong type"
swe "%s-funktionen returnerar felaktig typ"
ER_PARTITIONS_MUST_BE_DEFINED_ERROR
eng "For %s partitions each partition must be defined"
swe "För %s partitionering så måste varje partition definieras"
ER_RANGE_NOT_INCREASING_ERROR
eng "VALUES LESS THAN value must be strictly increasing for each partition"
swe "Värden i VALUES LESS THAN måste vara strikt växande för varje partition"
ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR
eng "VALUES %s value must be of same type as partition function"
swe "Värden i VALUES %s måste vara av samma typ som partitioneringsfunktionen"
ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR
eng "Multiple definition of same constant in list partitioning"
swe "Multipel definition av samma konstant i list partitionering"
ER_PARTITION_ENTRY_ERROR
eng "Partitioning can not be used stand-alone in query"
swe "Partitioneringssyntax kan inte användas på egen hand i en SQL-fråga"
ER_MIX_HANDLER_ERROR
eng "The mix of handlers in the partitions is not allowed in this version in MySQL"
swe "Denna mix av lagringsmotorer är inte tillåten i denna version av MySQL"
ER_PARTITION_NOT_DEFINED_ERROR
eng "For the partitioned engine it is necessary to define all %s"
swe "För partitioneringsmotorn så är det nödvändigt att definiera alla %s"
ER_TOO_MANY_PARTITIONS_ERROR
eng "Too many partitions were defined"
swe "För många partitioner definierades"
ER_SUBPARTITION_ERROR
eng "It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning"
swe "Det är endast möjligt att blanda RANGE/LIST partitionering med HASH/KEY partitionering för subpartitionering"
ER_CANT_CREATE_HANDLER_FILE
eng "Failed to create specific handler file"
swe "Misslyckades med att skapa specifik fil i lagringsmotor"
ER_BLOB_FIELD_IN_PART_FUNC_ERROR
eng "A BLOB field is not allowed in partition function"
swe "Ett BLOB-fält är inte tillåtet i partitioneringsfunktioner"
ER_CHAR_SET_IN_PART_FIELD_ERROR
eng "VARCHAR only allowed if binary collation for partition functions"
swe "VARCHAR endast tillåten med binär collation för partitioneringsfunktion"
ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF
eng "A %s need to include all fields in the partition function"
swe "En %s behöver inkludera alla fält i partitioneringsfunktionen för denna lagringsmotor"
ER_NO_PARTS_ERROR
eng "Number of %s = 0 is not an allowed value"
swe "Antal %s = 0 är inte ett tillåten värde"

View File

@ -799,7 +799,7 @@ db_show_routine_status(THD *thd, int type, const char *wild)
}
}
table->file->ha_index_init(0);
table->file->ha_index_init(0, 1);
if ((res= table->file->index_first(table->record[0])))
{
res= (res == HA_ERR_END_OF_FILE) ? 0 : SP_INTERNAL_ERROR;
@ -849,7 +849,7 @@ sp_drop_db_routines(THD *thd, char *db)
goto err;
ret= SP_OK;
table->file->ha_index_init(0);
table->file->ha_index_init(0, 1);
if (! table->file->index_read(table->record[0],
key, keylen, HA_READ_KEY_EXACT))
{

View File

@ -2048,7 +2048,7 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs)
key_copy(key, col_privs->record[0], col_privs->key_info, key_prefix_len);
col_privs->field[4]->store("",0, &my_charset_latin1);
col_privs->file->ha_index_init(0);
col_privs->file->ha_index_init(0, 1);
if (col_privs->file->index_read(col_privs->record[0],
(byte*) key,
key_prefix_len, HA_READ_KEY_EXACT))
@ -2193,7 +2193,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
List_iterator <LEX_COLUMN> iter(columns);
class LEX_COLUMN *column;
table->file->ha_index_init(0);
table->file->ha_index_init(0, 1);
while ((column= iter++))
{
ulong privileges= column->rights;
@ -3168,8 +3168,8 @@ my_bool grant_init(THD *org_thd)
t_table = tables[0].table; c_table = tables[1].table;
p_table= tables[2].table;
t_table->file->ha_index_init(0);
p_table->file->ha_index_init(0);
t_table->file->ha_index_init(0, 1);
p_table->file->ha_index_init(0, 1);
if (!t_table->file->index_first(t_table->record[0]))
{
/* Will be restored by org_thd->store_globals() */
@ -4473,7 +4473,7 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop,
user_key, key_prefix_length,
HA_READ_KEY_EXACT)))
{
if (error != HA_ERR_KEY_NOT_FOUND)
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
{
table->file->print_error(error, MYF(0));
result= -1;

View File

@ -2560,6 +2560,42 @@ find_field_in_table(THD *thd, TABLE_LIST *table_list,
}
/*
Find field in table, no side effects, only purpose is to check for field
in table object and get reference to the field if found.
SYNOPSIS
find_field_in_table_sef()
table table where to find
name Name of field searched for
RETURN
0 field is not found
# pointer to field
*/
Field *find_field_in_table_sef(TABLE *table, const char *name)
{
Field **field_ptr;
if (table->s->name_hash.records)
field_ptr= (Field**)hash_search(&table->s->name_hash,(byte*) name,
strlen(name));
else
{
if (!(field_ptr= table->field))
return (Field *)0;
for (; *field_ptr; ++field_ptr)
if (!my_strcasecmp(system_charset_info, (*field_ptr)->field_name, name))
break;
}
if (field_ptr)
return *field_ptr;
else
return (Field *)0;
}
/*
Find field in table
@ -2623,13 +2659,16 @@ Field *find_field_in_real_table(THD *thd, TABLE *table,
(bool)(thd->set_query_id-1));
if (field->query_id != thd->query_id)
{
if (table->get_fields_in_item_tree)
field->flags|= GET_FIXED_FIELDS_FLAG;
field->query_id=thd->query_id;
table->used_fields++;
table->used_keys.intersect(field->part_of_key);
}
else
thd->dupp_field=field;
}
} else if (table->get_fields_in_item_tree)
field->flags|= GET_FIXED_FIELDS_FLAG;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (check_grants && check_grant_column(thd, &table->grant,
table->s->db,

View File

@ -461,7 +461,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
if (keyname)
{
table->file->ha_index_or_rnd_end();
table->file->ha_index_init(keyno);
table->file->ha_index_init(keyno, 1);
error= table->file->index_first(table->record[0]);
}
else
@ -483,7 +483,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
case RLAST:
DBUG_ASSERT(keyname != 0);
table->file->ha_index_or_rnd_end();
table->file->ha_index_init(keyno);
table->file->ha_index_init(keyno, 1);
error= table->file->index_last(table->record[0]);
mode=RPREV;
break;
@ -522,7 +522,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
if (!(key= (byte*) thd->calloc(ALIGN_SIZE(key_len))))
goto err;
table->file->ha_index_or_rnd_end();
table->file->ha_index_init(keyno);
table->file->ha_index_init(keyno, 1);
key_copy(key, table->record[0], table->key_info + keyno, key_len);
error= table->file->index_read(table->record[0],
key,key_len,ha_rkey_mode);

View File

@ -286,8 +286,8 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
rtopic_id= find_fields[help_relation_help_topic_id].field;
rkey_id= find_fields[help_relation_help_keyword_id].field;
topics->file->ha_index_init(iindex_topic);
relations->file->ha_index_init(iindex_relations);
topics->file->ha_index_init(iindex_topic,1);
relations->file->ha_index_init(iindex_relations,1);
rkey_id->store((longlong) key_id);
rkey_id->get_key_image(buff, rkey_id->pack_length(), Field::itRAW);

View File

@ -155,6 +155,7 @@ void lex_start(THD *thd, uchar *buf,uint length)
lex->yylineno = 1;
lex->in_comment=0;
lex->length=0;
lex->part_info= 0;
lex->select_lex.in_sum_expr=0;
lex->select_lex.expr_list.empty();
lex->select_lex.ftfunc_list_alloc.empty();

View File

@ -25,6 +25,7 @@ class sp_head;
class sp_name;
class sp_instr;
class sp_pcontext;
class partition_info;
/*
The following hack is needed because mysql_yacc.cc does not define
@ -721,6 +722,8 @@ typedef struct st_lex
TABLE_LIST **query_tables_last;
/* store original leaf_tables for INSERT SELECT and PS/SP */
TABLE_LIST *leaf_tables_insert;
/* Partition info structure filled in by PARTITION BY parse part */
partition_info *part_info;
List<key_part_spec> col_list;
List<key_part_spec> ref_list;

3117
sql/sql_partition.cc Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1277,6 +1277,9 @@ JOIN::exec()
/* Copy data to the temporary table */
thd->proc_info= "Copying to tmp table";
DBUG_PRINT("info", ("%s", thd->proc_info));
if (!curr_join->sort_and_group &&
curr_join->const_tables != curr_join->tables)
curr_join->join_tab[curr_join->const_tables].sorted= 0;
if ((tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, 0)))
{
error= tmp_error;
@ -1423,6 +1426,9 @@ JOIN::exec()
1, TRUE))
DBUG_VOID_RETURN;
curr_join->group_list= 0;
if (!curr_join->sort_and_group &&
curr_join->const_tables != curr_join->tables)
curr_join->join_tab[curr_join->const_tables].sorted= 0;
if (setup_sum_funcs(curr_join->thd, curr_join->sum_funcs) ||
(tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table,
0)))
@ -1608,6 +1614,16 @@ JOIN::exec()
(select_options & OPTION_FOUND_ROWS ?
HA_POS_ERROR : unit->select_limit_cnt)))
DBUG_VOID_RETURN;
if (curr_join->const_tables != curr_join->tables &&
!curr_join->join_tab[curr_join->const_tables].table->sort.io_cache)
{
/*
If no IO cache exists for the first table then we are using an
INDEX SCAN and no filesort. Thus we should not remove the sorted
attribute on the INDEX SCAN.
*/
skip_sort_order= 1;
}
}
}
/* XXX: When can we have here thd->net.report_error not zero? */
@ -5659,6 +5675,7 @@ make_join_readinfo(JOIN *join, uint options)
uint i;
bool statistics= test(!(join->select_options & SELECT_DESCRIBE));
bool sorted= 1;
DBUG_ENTER("make_join_readinfo");
for (i=join->const_tables ; i < join->tables ; i++)
@ -5668,6 +5685,8 @@ make_join_readinfo(JOIN *join, uint options)
tab->read_record.table= table;
tab->read_record.file=table->file;
tab->next_select=sub_select; /* normal select */
tab->sorted= sorted;
sorted= 0; // only first must be sorted
switch (tab->type) {
case JT_SYSTEM: // Only happens with left join
table->status=STATUS_NO_RECORD;
@ -8915,7 +8934,12 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
new_table.file->extra(HA_EXTRA_WRITE_CACHE);
#endif
/* copy all old rows */
/*
copy all old rows from heap table to MyISAM table
This is the only code that uses record[1] to read/write but this
is safe as this is a temporary MyISAM table without timestamp/autoincrement
or partitioning.
*/
while (!table->file->rnd_next(new_table.record[1]))
{
if ((write_err=new_table.file->write_row(new_table.record[1])))
@ -9046,7 +9070,7 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
empty_record(table);
if (table->group && join->tmp_table_param.sum_func_count &&
table->s->keys && !table->file->inited)
table->file->ha_index_init(0);
table->file->ha_index_init(0, 0);
}
/* Set up select_end */
join->join_tab[join->tables-1].next_select= setup_end_select_func(join);
@ -9660,7 +9684,13 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos)
table->file->extra(HA_EXTRA_KEYREAD);
tab->index= tab->ref.key;
}
if ((error=join_read_const(tab)))
error=join_read_const(tab);
if (table->key_read)
{
table->key_read=0;
table->file->extra(HA_EXTRA_NO_KEYREAD);
}
if (error)
{
tab->info="unique row not found";
/* Mark for EXPLAIN that the row was not found */
@ -9668,11 +9698,6 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos)
if (!table->maybe_null || error > 0)
DBUG_RETURN(error);
}
if (table->key_read)
{
table->key_read=0;
table->file->extra(HA_EXTRA_NO_KEYREAD);
}
}
if (*tab->on_expr_ref && !table->null_row)
{
@ -9744,7 +9769,7 @@ join_read_const(JOIN_TAB *tab)
table->status= STATUS_NOT_FOUND;
mark_as_null_row(tab->table);
empty_record(table);
if (error != HA_ERR_KEY_NOT_FOUND)
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
return -1;
}
@ -9767,7 +9792,9 @@ join_read_key(JOIN_TAB *tab)
TABLE *table= tab->table;
if (!table->file->inited)
table->file->ha_index_init(tab->ref.key);
{
table->file->ha_index_init(tab->ref.key, tab->sorted);
}
if (cmp_buffer_with_ref(tab) ||
(table->status & (STATUS_GARBAGE | STATUS_NO_PARENT | STATUS_NULL_ROW)))
{
@ -9779,7 +9806,7 @@ join_read_key(JOIN_TAB *tab)
error=table->file->index_read(table->record[0],
tab->ref.key_buff,
tab->ref.key_length,HA_READ_KEY_EXACT);
if (error && error != HA_ERR_KEY_NOT_FOUND)
if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
}
table->null_row=0;
@ -9794,14 +9821,16 @@ join_read_always_key(JOIN_TAB *tab)
TABLE *table= tab->table;
if (!table->file->inited)
table->file->ha_index_init(tab->ref.key);
{
table->file->ha_index_init(tab->ref.key, tab->sorted);
}
if (cp_buffer_from_ref(tab->join->thd, &tab->ref))
return -1;
if ((error=table->file->index_read(table->record[0],
tab->ref.key_buff,
tab->ref.key_length,HA_READ_KEY_EXACT)))
{
if (error != HA_ERR_KEY_NOT_FOUND)
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
return -1; /* purecov: inspected */
}
@ -9821,14 +9850,14 @@ join_read_last_key(JOIN_TAB *tab)
TABLE *table= tab->table;
if (!table->file->inited)
table->file->ha_index_init(tab->ref.key);
table->file->ha_index_init(tab->ref.key, tab->sorted);
if (cp_buffer_from_ref(tab->join->thd, &tab->ref))
return -1;
if ((error=table->file->index_read_last(table->record[0],
tab->ref.key_buff,
tab->ref.key_length)))
{
if (error != HA_ERR_KEY_NOT_FOUND)
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
return -1; /* purecov: inspected */
}
@ -9931,7 +9960,7 @@ join_read_first(JOIN_TAB *tab)
tab->read_record.index=tab->index;
tab->read_record.record=table->record[0];
if (!table->file->inited)
table->file->ha_index_init(tab->index);
table->file->ha_index_init(tab->index, tab->sorted);
if ((error=tab->table->file->index_first(tab->table->record[0])))
{
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
@ -9970,7 +9999,7 @@ join_read_last(JOIN_TAB *tab)
tab->read_record.index=tab->index;
tab->read_record.record=table->record[0];
if (!table->file->inited)
table->file->ha_index_init(tab->index);
table->file->ha_index_init(tab->index, 1);
if ((error= tab->table->file->index_last(tab->table->record[0])))
return report_error(table, error);
return 0;
@ -9994,7 +10023,7 @@ join_ft_read_first(JOIN_TAB *tab)
TABLE *table= tab->table;
if (!table->file->inited)
table->file->ha_index_init(tab->ref.key);
table->file->ha_index_init(tab->ref.key, 1);
#if NOT_USED_YET
if (cp_buffer_from_ref(tab->join->thd, &tab->ref)) // as ft-key doesn't use store_key's
return -1; // see also FT_SELECT::init()
@ -10380,7 +10409,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
error, 0))
DBUG_RETURN(NESTED_LOOP_ERROR); // Not a table_is_full error
/* Change method to update rows */
table->file->ha_index_init(0);
table->file->ha_index_init(0, 0);
join->join_tab[join->tables-1].next_select=end_unique_update;
}
join->send_records++;

View File

@ -133,6 +133,7 @@ typedef struct st_join_table {
uint used_fields,used_fieldlength,used_blobs;
enum join_type type;
bool cached_eq_ref_table,eq_ref_table,not_used_in_distinct;
bool sorted;
TABLE_REF ref;
JOIN_CACHE cache;
JOIN *join;

View File

@ -963,11 +963,16 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
packet->append("\n)", 2);
if (!(thd->variables.sql_mode & MODE_NO_TABLE_OPTIONS) && !foreign_db_mode)
{
if (thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40))
packet->append(" TYPE=", 6);
else
packet->append(" ENGINE=", 8);
packet->append(file->table_type());
#ifdef HAVE_PARTITION_DB
if (!table->s->part_info)
#endif
{
if (thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40))
packet->append(" TYPE=", 6);
else
packet->append(" ENGINE=", 8);
packet->append(file->table_type());
}
if (share->table_charset &&
!(thd->variables.sql_mode & MODE_MYSQL323) &&
@ -1034,6 +1039,23 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
append_directory(thd, packet, "DATA", create_info.data_file_name);
append_directory(thd, packet, "INDEX", create_info.index_file_name);
}
#ifdef HAVE_PARTITION_DB
{
/*
Partition syntax for CREATE TABLE is at the end of the syntax.
*/
uint part_syntax_len;
char *part_syntax;
if (table->s->part_info &&
((part_syntax= generate_partition_syntax(table->s->part_info,
&part_syntax_len,
FALSE))))
{
packet->append(part_syntax, part_syntax_len);
my_free(part_syntax, MYF(0));
}
}
#endif
DBUG_RETURN(0);
}
@ -2728,7 +2750,7 @@ int fill_schema_proc(THD *thd, TABLE_LIST *tables, COND *cond)
{
DBUG_RETURN(1);
}
proc_table->file->ha_index_init(0);
proc_table->file->ha_index_init(0, 1);
if ((res= proc_table->file->index_first(proc_table->record[0])))
{
res= (res == HA_ERR_END_OF_FILE) ? 0 : 1;

View File

@ -28,6 +28,7 @@
#include <io.h>
#endif
const char *primary_key_name="PRIMARY";
static bool check_if_keyname_exists(const char *name,KEY *start, KEY *end);
@ -1513,7 +1514,66 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
if (create_info->row_type == ROW_TYPE_DYNAMIC)
db_options|=HA_OPTION_PACK_RECORD;
alias= table_case_name(create_info, table_name);
file=get_new_handler((TABLE*) 0, create_info->db_type);
if (!(file=get_new_handler((TABLE*) 0, create_info->db_type)))
{
my_error(ER_OUTOFMEMORY, MYF(0), 128);//128 bytes invented
DBUG_RETURN(TRUE);
}
#ifdef HAVE_PARTITION_DB
partition_info *part_info= thd->lex->part_info;
if (part_info)
{
/*
The table has been specified as a partitioned table.
If this is part of an ALTER TABLE the handler will be the partition
handler but we need to specify the default handler to use for
partitions also in the call to check_partition_info. We transport
this information in the default_db_type variable, it is either
DB_TYPE_DEFAULT or the engine set in the ALTER TABLE command.
*/
enum db_type part_engine_type= create_info->db_type;
char *part_syntax_buf;
uint syntax_len;
if (part_engine_type == DB_TYPE_PARTITION_DB)
{
/*
This only happens at ALTER TABLE.
default_engine_type was assigned from the engine set in the ALTER
TABLE command.
*/
part_engine_type= ha_checktype(thd,
part_info->default_engine_type, 0, 0);
}
if (check_partition_info(part_info, part_engine_type,
file, create_info->max_rows))
DBUG_RETURN(TRUE);
/*
We reverse the partitioning parser and generate a standard format
for syntax stored in frm file.
*/
if (!(part_syntax_buf= generate_partition_syntax(part_info,
&syntax_len,
TRUE)))
DBUG_RETURN(TRUE);
part_info->part_info_string= part_syntax_buf;
part_info->part_info_len= syntax_len;
if ((!(file->partition_flags() & HA_CAN_PARTITION)) ||
create_info->db_type == DB_TYPE_PARTITION_DB)
{
/*
The handler assigned to the table cannot handle partitioning.
Assign the partition handler as the handler of the table.
*/
DBUG_PRINT("info", ("db_type= %d, part_flag= %d", create_info->db_type,file->partition_flags()));
delete file;
create_info->db_type= DB_TYPE_PARTITION_DB;
if (!(file= get_ha_partition(part_info)))
{
DBUG_RETURN(TRUE);
}
}
}
#endif
#ifdef NOT_USED
/*
@ -1527,7 +1587,7 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
(file->table_flags() & HA_NO_TEMP_TABLES))
{
my_error(ER_ILLEGAL_HA, MYF(0), table_name);
DBUG_RETURN(TRUE);
goto err;
}
#endif
@ -1550,7 +1610,7 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
&keys, internal_tmp_table, &db_options, file,
&key_info_buffer, &key_count,
select_field_count))
DBUG_RETURN(TRUE);
goto err;
/* Check if table exists */
if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
@ -1572,13 +1632,13 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS)
{
create_info->table_existed= 1; // Mark that table existed
DBUG_RETURN(FALSE);
goto no_err;
}
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alias);
DBUG_RETURN(TRUE);
goto err;
}
if (wait_if_global_read_lock(thd, 0, 1))
DBUG_RETURN(error);
goto err;
VOID(pthread_mutex_lock(&LOCK_open));
if (!internal_tmp_table && !(create_info->options & HA_LEX_CREATE_TMP_TABLE))
{
@ -1631,7 +1691,7 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
create_info->table_options=db_options;
if (rea_create_table(thd, path, create_info, fields, key_count,
key_info_buffer))
key_info_buffer, file))
{
/* my_error(ER_CANT_CREATE_TABLE,MYF(0),table_name,my_errno); */
goto end;
@ -1660,6 +1720,13 @@ end:
delete file;
thd->proc_info="After create";
DBUG_RETURN(error);
err:
delete file;
DBUG_RETURN(TRUE);
no_err:
delete file;
DBUG_RETURN(FALSE);
}
/*
@ -3138,6 +3205,59 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
old_db_type= table->s->db_type;
if (create_info->db_type == DB_TYPE_DEFAULT)
create_info->db_type= old_db_type;
#ifdef HAVE_PARTITION_DB
/*
When thd->lex->part_info has a reference to a partition_info the
ALTER TABLE contained a definition of a partitioning.
Case I:
If there was a partition before and there is a new one defined.
We use the new partitioning. The new partitioning is already
defined in the correct variable so no work is needed to
accomplish this.
Case IIa:
There was a partitioning before and there is no new one defined.
Also the user has not specified an explicit engine to use.
We use the old partitioning also for the new table. We do this
by assigning the partition_info from the table loaded in
open_ltable to the partition_info struct used by mysql_create_table
later in this method.
Case IIb:
There was a partitioning before and there is no new one defined.
The user has specified an explicit engine to use.
Since the user has specified an explicit engine to use we override
the old partitioning info and create a new table using the specified
engine. This is the reason for the extra check if old and new engine
is equal.
Case III:
There was no partitioning before altering the table, there is
partitioning defined in the altered table. Use the new partitioning.
No work needed since the partitioning info is already in the
correct variable.
Case IV:
There was no partitioning before and no partitioning defined. Obviously
no work needed.
*/
if (table->s->part_info)
if (!thd->lex->part_info &&
create_info->db_type == old_db_type)
thd->lex->part_info= table->s->part_info;
if (thd->lex->part_info)
{
/*
Need to cater for engine types that can handle partition without
using the partition handler.
*/
thd->lex->part_info->default_engine_type= create_info->db_type;
create_info->db_type= DB_TYPE_PARTITION_DB;
}
#endif
if (check_engine(thd, new_name, &create_info->db_type))
DBUG_RETURN(TRUE);
new_db_type= create_info->db_type;

View File

@ -148,7 +148,7 @@ int mysql_update(THD *thd,
/* pass counter value */
thd->lex->table_count= table_count;
/* convert to multiupdate */
return 2;
DBUG_RETURN(2);
}
if (lock_tables(thd, table_list, table_count) ||
@ -265,7 +265,12 @@ int mysql_update(THD *thd,
else
used_key_is_modified=0;
#ifdef HAVE_PARTITION_DB
if (used_key_is_modified || order ||
partition_key_modified(table, fields))
#else
if (used_key_is_modified || order)
#endif
{
/*
We can't update table directly; We must first search after all
@ -452,8 +457,8 @@ int mysql_update(THD *thd,
call then it should be included in the count of dup_key_found
and error should be set to 0 (only if these errors are ignored).
*/
error= table->file->bulk_update_row(table->record[0],
table->record[1],
error= table->file->bulk_update_row(table->record[1],
table->record[0],
&dup_key_found);
limit+= dup_key_found;
updated-= dup_key_found;

View File

@ -356,13 +356,16 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token LEAVES
%token LEAVE_SYM
%token LEFT
%token LESS_SYM
%token LEVEL_SYM
%token LEX_HOSTNAME
%token LIKE
%token LIMIT
%token LINEAR_SYM
%token LINEFROMTEXT
%token LINES
%token LINESTRING
%token LIST_SYM
%token LOAD
%token LOCAL_SYM
%token LOCATE
@ -402,6 +405,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token MAX_SYM
%token MAX_UPDATES_PER_HOUR
%token MAX_USER_CONNECTIONS_SYM
%token MAX_VALUE_SYM
%token MEDIUMBLOB
%token MEDIUMINT
%token MEDIUMTEXT
@ -436,6 +440,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token NE
%token NEW_SYM
%token NEXT_SYM
%token NODEGROUP_SYM
%token NONE_SYM
%token NOT2_SYM
%token NOT_SYM
@ -464,6 +469,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token OUT_SYM
%token PACK_KEYS_SYM
%token PARTIAL
%token PARTITION_SYM
%token PARTITIONS_SYM
%token PASSWORD
%token PARAM_MARKER
%token PHASE_SYM
@ -490,6 +497,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token RAID_STRIPED_SYM
%token RAID_TYPE
%token RAND
%token RANGE_SYM
%token READS_SYM
%token READ_SYM
%token REAL
@ -575,6 +583,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token STRING_SYM
%token SUBDATE_SYM
%token SUBJECT_SYM
%token SUBPARTITION_SYM
%token SUBPARTITIONS_SYM
%token SUBSTRING
%token SUBSTRING_INDEX
%token SUM_SYM
@ -595,6 +605,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token TINYBLOB
%token TINYINT
%token TINYTEXT
%token THAN_SYM
%token TO_SYM
%token TRAILING
%token TRANSACTION_SYM
@ -618,11 +629,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token UNIX_TIMESTAMP
%token UNKNOWN_SYM
%token UNLOCK_SYM
%token UNLOCK_SYM
%token UNSIGNED
%token UNTIL_SYM
%token UNTIL_SYM
%token UPDATE_SYM
%token UPDATE_SYM
%token USAGE
%token USER
@ -723,6 +731,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
sp_opt_default
simple_ident_nospvar simple_ident_q
field_or_var limit_option
part_bit_expr part_func_expr
%type <item_num>
NUM_literal
@ -821,6 +830,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
statement sp_suid opt_view_list view_list or_replace algorithm
sp_c_chistics sp_a_chistics sp_chistic sp_c_chistic xa
load_data opt_field_or_var_spec fields_or_vars opt_load_data_set_spec
partition_entry
END_OF_INPUT
%type <NONE> call sp_proc_stmts sp_proc_stmts1 sp_proc_stmt
@ -886,6 +896,7 @@ statement:
| lock
| optimize
| keycache
| partition_entry
| preload
| prepare
| purge
@ -2538,7 +2549,9 @@ trg_event:
create2:
'(' create2a {}
| opt_create_table_options create3 {}
| opt_create_table_options
opt_partitioning {}
create3 {}
| LIKE table_ident
{
LEX *lex=Lex;
@ -2554,8 +2567,12 @@ create2:
;
create2a:
field_list ')' opt_create_table_options create3 {}
| create_select ')' { Select->set_braces(1);} union_opt {}
field_list ')' opt_create_table_options
opt_partitioning {}
create3 {}
| opt_partitioning {}
create_select ')'
{ Select->set_braces(1);} union_opt {}
;
create3:
@ -2566,6 +2583,411 @@ create3:
{ Select->set_braces(1);} union_opt {}
;
/*
This part of the parser is about handling of the partition information.
It's first version was written by Mikael Ronström with lots of answers to
questions provided by Antony Curtis.
The partition grammar can be called from three places.
1) CREATE TABLE ... PARTITION ..
2) ALTER TABLE table_name PARTITION ...
3) PARTITION ...
The first place is called when a new table is created from a MySQL client.
The second place is called when a table is altered with the ALTER TABLE
command from a MySQL client.
The third place is called when opening an frm file and finding partition
info in the .frm file. It is necessary to avoid allowing PARTITION to be
an allowed entry point for SQL client queries. This is arranged by setting
some state variables before arriving here.
To be able to handle errors we will only set error code in this code
and handle the error condition in the function calling the parser. This
is necessary to ensure we can also handle errors when calling the parser
from the openfrm function.
*/
opt_partitioning:
/* empty */ {}
| partitioning
;
partitioning:
PARTITION_SYM
{ Lex->part_info= new partition_info(); }
partition
;
partition_entry:
PARTITION_SYM
{
LEX *lex= Lex;
if (lex->part_info)
{
/*
We enter here when opening the frm file to translate
partition info string into part_info data structure.
*/
lex->part_info= new partition_info();
}
else
{
yyerror(ER(ER_PARTITION_ENTRY_ERROR));
YYABORT;
}
}
partition {};
partition:
BY part_type_def opt_no_parts {} opt_sub_part {} part_defs;
part_type_def:
opt_linear KEY_SYM '(' part_field_list ')'
{
LEX *lex= Lex;
lex->part_info->list_of_part_fields= TRUE;
lex->part_info->part_type= HASH_PARTITION;
}
| opt_linear HASH_SYM
{ Lex->part_info->part_type= HASH_PARTITION; }
part_func {}
| RANGE_SYM
{ Lex->part_info->part_type= RANGE_PARTITION; }
part_func {}
| LIST_SYM
{ Lex->part_info->part_type= LIST_PARTITION; }
part_func {};
opt_linear:
/* empty */ {}
| LINEAR_SYM
{ Lex->part_info->linear_hash_ind= TRUE;};
part_field_list:
part_field_item {}
| part_field_list ',' part_field_item {};
part_field_item:
ident
{
Lex->part_info->part_field_list.push_back($1.str);
};
part_func:
'(' remember_name part_func_expr remember_end ')'
{
LEX *lex= Lex;
uint expr_len= (uint)($4 - $2) - 1;
lex->part_info->list_of_part_fields= FALSE;
lex->part_info->part_expr= $3;
lex->part_info->part_func_string= $2+1;
lex->part_info->part_func_len= expr_len;
};
sub_part_func:
'(' remember_name part_func_expr remember_end ')'
{
LEX *lex= Lex;
uint expr_len= (uint)($4 - $2) - 1;
lex->part_info->list_of_subpart_fields= FALSE;
lex->part_info->subpart_expr= $3;
lex->part_info->subpart_func_string= $2+1;
lex->part_info->subpart_func_len= expr_len;
};
opt_no_parts:
/* empty */ {}
| PARTITIONS_SYM ulong_num
{
uint no_parts= $2;
if (no_parts == 0)
{
my_error(ER_NO_PARTS_ERROR, MYF(0), "partitions");
YYABORT;
}
Lex->part_info->no_parts= no_parts;
};
opt_sub_part:
/* empty */ {}
| SUBPARTITION_SYM BY opt_linear HASH_SYM sub_part_func
{ Lex->part_info->subpart_type= HASH_PARTITION; }
opt_no_subparts {}
| SUBPARTITION_SYM BY opt_linear KEY_SYM
'(' sub_part_field_list ')'
{
LEX *lex= Lex;
lex->part_info->subpart_type= HASH_PARTITION;
lex->part_info->list_of_subpart_fields= TRUE;
}
opt_no_subparts {};
sub_part_field_list:
sub_part_field_item {}
| sub_part_field_list ',' sub_part_field_item {};
sub_part_field_item:
ident
{ Lex->part_info->subpart_field_list.push_back($1.str); };
part_func_expr:
bit_expr
{
LEX *lex= Lex;
bool not_corr_func;
not_corr_func= !lex->safe_to_cache_query;
lex->safe_to_cache_query= 1;
if (not_corr_func)
{
yyerror(ER(ER_CONST_EXPR_IN_PARTITION_FUNC_ERROR));
YYABORT;
}
$$=$1;
}
opt_no_subparts:
/* empty */ {}
| SUBPARTITIONS_SYM ulong_num
{
uint no_parts= $2;
if (no_parts == 0)
{
my_error(ER_NO_PARTS_ERROR, MYF(0), "subpartitions");
YYABORT;
}
Lex->part_info->no_subparts= no_parts;
};
part_defs:
/* empty */
{}
| '(' part_def_list ')'
{
LEX *lex= Lex;
partition_info *part_info= lex->part_info;
if (part_info->no_parts != 0)
{
if (part_info->no_parts !=
part_info->count_curr_parts)
{
yyerror(ER(ER_PARTITION_WRONG_NO_PART_ERROR));
YYABORT;
}
}
else if (part_info->count_curr_parts > 0)
{
part_info->no_parts= part_info->count_curr_parts;
}
part_info->count_curr_subparts= 0;
part_info->count_curr_parts= 0;
};
part_def_list:
part_definition {}
| part_def_list ',' part_definition {};
part_definition:
PARTITION_SYM
{
LEX *lex= Lex;
partition_info *part_info= lex->part_info;
partition_element *p_elem= new partition_element();
if (!p_elem)
{
my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_element));
YYABORT;
}
part_info->curr_part_elem= p_elem;
part_info->current_partition= p_elem;
part_info->use_default_partitions= FALSE;
part_info->partitions.push_back(p_elem);
p_elem->engine_type= DB_TYPE_UNKNOWN;
part_info->count_curr_parts++;
}
part_name {}
opt_part_values {}
opt_part_options {}
opt_sub_partition {};
part_name:
ident_or_text
{ Lex->part_info->curr_part_elem->partition_name= $1.str; };
opt_part_values:
/* empty */
{
LEX *lex= Lex;
if (lex->part_info->part_type == RANGE_PARTITION)
{
my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
"RANGE", "LESS THAN");
YYABORT;
}
if (lex->part_info->part_type == LIST_PARTITION)
{
my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
"LIST", "IN");
YYABORT;
}
}
| VALUES LESS_SYM THAN_SYM part_func_max
{
if (Lex->part_info->part_type != RANGE_PARTITION)
{
my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
"RANGE", "LESS THAN");
YYABORT;
}
}
| VALUES IN_SYM '(' part_list_func ')'
{
if (Lex->part_info->part_type != LIST_PARTITION)
{
my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
"LIST", "IN");
YYABORT;
}
};
part_func_max:
MAX_VALUE_SYM
{
LEX *lex= Lex;
if (lex->part_info->defined_max_value)
{
yyerror(ER(ER_PARTITION_MAXVALUE_ERROR));
YYABORT;
}
lex->part_info->defined_max_value= TRUE;
}
| part_range_func
{
if (Lex->part_info->defined_max_value)
{
yyerror(ER(ER_PARTITION_MAXVALUE_ERROR));
YYABORT;
}
};
part_range_func:
'(' part_bit_expr ')'
{
Lex->part_info->curr_part_elem->range_expr= $2;
};
part_list_func:
part_list_item {}
| part_list_func ',' part_list_item {};
part_list_item:
part_bit_expr
{
Lex->part_info->curr_part_elem->list_expr_list.push_back($1);
};
part_bit_expr:
bit_expr
{
Item *part_expr= $1;
bool not_corr_func;
LEX *lex= Lex;
Name_resolution_context *context= &lex->current_select->context;
TABLE_LIST *save_list= context->table_list;
context->table_list= 0;
part_expr->fix_fields(YYTHD, (Item**)0);
context->table_list= save_list;
not_corr_func= !part_expr->const_item() ||
!lex->safe_to_cache_query;
lex->safe_to_cache_query= 1;
if (not_corr_func)
{
yyerror(ER(ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR));
YYABORT;
}
$$= part_expr;
}
opt_sub_partition:
/* empty */ {}
| '(' sub_part_list ')'
{
LEX *lex= Lex;
partition_info *part_info= lex->part_info;
if (part_info->no_subparts != 0)
{
if (part_info->no_subparts !=
part_info->count_curr_subparts)
{
yyerror(ER(ER_PARTITION_WRONG_NO_SUBPART_ERROR));
YYABORT;
}
}
else if (part_info->count_curr_subparts > 0)
{
part_info->no_subparts= part_info->count_curr_subparts;
}
part_info->count_curr_subparts= 0;
};
sub_part_list:
sub_part_definition {}
| sub_part_list ',' sub_part_definition {};
sub_part_definition:
SUBPARTITION_SYM
{
LEX *lex= Lex;
partition_info *part_info= lex->part_info;
partition_element *p_elem= new partition_element();
if (!p_elem)
{
my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_element));
YYABORT;
}
part_info->curr_part_elem= p_elem;
part_info->current_partition->subpartitions.push_back(p_elem);
part_info->use_default_subpartitions= FALSE;
part_info->count_curr_subparts++;
p_elem->engine_type= DB_TYPE_UNKNOWN;
}
sub_name opt_part_options {};
sub_name:
ident_or_text
{ Lex->part_info->curr_part_elem->partition_name= $1.str; };
opt_part_options:
/* empty */ {}
| opt_part_option_list {};
opt_part_option_list:
opt_part_option_list opt_part_option {}
| opt_part_option {};
opt_part_option:
TABLESPACE opt_equal ident_or_text
{ Lex->part_info->curr_part_elem->tablespace_name= $3.str; }
| opt_storage ENGINE_SYM opt_equal storage_engines
{ Lex->part_info->curr_part_elem->engine_type= $4; }
| NODEGROUP_SYM opt_equal ulong_num
{ Lex->part_info->curr_part_elem->nodegroup_id= $3; }
| MAX_ROWS opt_equal ulonglong_num
{ Lex->part_info->curr_part_elem->part_max_rows= $3; }
| MIN_ROWS opt_equal ulonglong_num
{ Lex->part_info->curr_part_elem->part_min_rows= $3; }
| DATA_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys
{ Lex->part_info->curr_part_elem->data_file_name= $4.str; }
| INDEX_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys
{ Lex->part_info->curr_part_elem->index_file_name= $4.str; }
| COMMENT_SYM opt_equal TEXT_STRING_sys
{ Lex->part_info->curr_part_elem->part_comment= $3.str; };
/*
End of partition parser part
*/
create_select:
SELECT_SYM
{
@ -3338,7 +3760,7 @@ alter:
lex->alter_info.reset();
lex->alter_info.flags= 0;
}
alter_list
alter_commands
{}
| ALTER DATABASE ident_or_empty
{
@ -3404,11 +3826,18 @@ ident_or_empty:
/* empty */ { $$= 0; }
| ident { $$= $1.str; };
alter_list:
alter_commands:
| DISCARD TABLESPACE { Lex->alter_info.tablespace_op= DISCARD_TABLESPACE; }
| IMPORT TABLESPACE { Lex->alter_info.tablespace_op= IMPORT_TABLESPACE; }
| alter_list_item
| alter_list ',' alter_list_item;
| alter_list
opt_partitioning
| partitioning
;
alter_list:
alter_list_item
| alter_list ',' alter_list_item
;
add_column:
ADD opt_column
@ -7363,6 +7792,7 @@ keyword:
| LANGUAGE_SYM {}
| NO_SYM {}
| OPEN_SYM {}
| PARTITION_SYM {}
| PREPARE_SYM {}
| REPAIR {}
| RESET_SYM {}
@ -7463,8 +7893,10 @@ keyword_sp:
| RELAY_THREAD {}
| LAST_SYM {}
| LEAVES {}
| LESS_SYM {}
| LEVEL_SYM {}
| LINESTRING {}
| LIST_SYM {}
| LOCAL_SYM {}
| LOCKS_SYM {}
| LOGS_SYM {}
@ -7488,6 +7920,7 @@ keyword_sp:
| MAX_QUERIES_PER_HOUR {}
| MAX_UPDATES_PER_HOUR {}
| MAX_USER_CONNECTIONS_SYM {}
| MAX_VALUE_SYM {}
| MEDIUM_SYM {}
| MERGE_SYM {}
| MICROSECOND_SYM {}
@ -7508,6 +7941,7 @@ keyword_sp:
| NDBCLUSTER_SYM {}
| NEXT_SYM {}
| NEW_SYM {}
| NODEGROUP_SYM {}
| NONE_SYM {}
| NVARCHAR_SYM {}
| OFFSET_SYM {}
@ -7516,6 +7950,7 @@ keyword_sp:
| ONE_SYM {}
| PACK_KEYS_SYM {}
| PARTIAL {}
| PARTITIONS_SYM {}
| PASSWORD {}
| PHASE_SYM {}
| POINT_SYM {}
@ -7566,6 +8001,8 @@ keyword_sp:
| STRING_SYM {}
| SUBDATE_SYM {}
| SUBJECT_SYM {}
| SUBPARTITION_SYM {}
| SUBPARTITIONS_SYM {}
| SUPER_SYM {}
| SUSPEND_SYM {}
| TABLES {}
@ -7573,6 +8010,7 @@ keyword_sp:
| TEMPORARY {}
| TEMPTABLE_SYM {}
| TEXT_SYM {}
| THAN_SYM {}
| TRANSACTION_SYM {}
| TIMESTAMP {}
| TIMESTAMP_ADD {}

View File

@ -70,7 +70,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
int j,error, errarg= 0;
uint rec_buff_length,n_length,int_length,records,key_parts,keys,
interval_count,interval_parts,read_length,db_create_options;
uint key_info_length, com_length;
uint key_info_length, com_length, part_info_len, extra_rec_buf_length;
ulong pos;
char index_file[FN_REFLEN], *names, *keynames, *comment_pos;
uchar head[288],*disk_buff,new_field_pack_flag;
@ -153,6 +153,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
goto err; /* purecov: inspected */
*fn_ext(index_file)='\0'; // Remove .frm extension
part_info_len= uint4korr(head+55);
share->frm_version= head[2];
/*
Check if .frm file created by MySQL 5.0. In this case we want to
@ -300,10 +301,6 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
}
#endif
/* Allocate handler */
if (!(outparam->file= get_new_handler(outparam, share->db_type)))
goto err;
error=4;
outparam->reginfo.lock_type= TL_UNLOCK;
outparam->current_lock=F_UNLCK;
@ -314,8 +311,9 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
if (prgflag & (READ_ALL+EXTRA_RECORD))
records++;
/* QQ: TODO, remove the +1 from below */
extra_rec_buf_length= uint2korr(head+59);
rec_buff_length= ALIGN_SIZE(share->reclength + 1 +
outparam->file->extra_rec_buf_length());
extra_rec_buf_length);
share->rec_buff_length= rec_buff_length;
if (!(record= (char *) alloc_root(&outparam->mem_root,
rec_buff_length * records)))
@ -435,9 +433,22 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
if (keynames)
fix_type_pointers(&int_array, &share->keynames, 1, &keynames);
if (part_info_len > 0)
{
#ifdef HAVE_PARTITION_DB
if (mysql_unpack_partition(file, thd, part_info_len, outparam))
goto err;
#else
goto err;
#endif
}
VOID(my_close(file,MYF(MY_WME)));
file= -1;
/* Allocate handler */
if (!(outparam->file= get_new_handler(outparam, share->db_type)))
goto err;
record= (char*) outparam->record[0]-1; /* Fieldstart = 1 */
if (null_field_first)
{
@ -859,6 +870,13 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
if (outparam->file->ha_allocate_read_write_set(share->fields))
goto err;
/* Fix the partition functions and ensure they are not constant functions*/
if (part_info_len > 0)
#ifdef HAVE_PARTITION_DB
if (fix_partition_func(thd,name,outparam))
#endif
goto err;
/* The table struct is now initialized; Open the table */
error=2;
if (db_stat)
@ -916,6 +934,13 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
if (! error_reported)
frm_error(error,outparam,name,ME_ERROR+ME_WAITTANG, errarg);
delete outparam->file;
#ifdef HAVE_PARTITION_DB
if (outparam->s->part_info)
{
free_items(outparam->s->part_info->item_free_list);
outparam->s->part_info->item_free_list= 0;
}
#endif
outparam->file=0; // For easier errorchecking
outparam->db_stat=0;
hash_free(&share->name_hash);
@ -942,6 +967,13 @@ int closefrm(register TABLE *table)
table->field= 0;
}
delete table->file;
#ifdef HAVE_PARTITION_DB
if (table->s->part_info)
{
free_items(table->s->part_info->item_free_list);
table->s->part_info->item_free_list= 0;
}
#endif
table->file= 0; /* For easier errorchecking */
hash_free(&table->s->name_hash);
free_root(&table->mem_root, MYF(0));

View File

@ -21,6 +21,7 @@ class Item; /* Needed by ORDER */
class GRANT_TABLE;
class st_select_lex_unit;
class st_select_lex;
class partition_info;
class COND_EQUAL;
/* Order clause list element */
@ -96,6 +97,9 @@ class Table_triggers_list;
typedef struct st_table_share
{
#ifdef HAVE_PARTITION_DB
partition_info *part_info; /* Partition related information */
#endif
/* hash of field names (contains pointers to elements of field array) */
HASH name_hash; /* hash of field names */
MEM_ROOT mem_root;
@ -203,6 +207,8 @@ struct st_table {
ORDER *group;
const char *alias; /* alias or table name */
uchar *null_flags;
MY_BITMAP *read_set;
MY_BITMAP *write_set;
query_id_t query_id;
ha_rows quick_rows[MAX_KEY];
@ -256,6 +262,7 @@ struct st_table {
my_bool auto_increment_field_not_null;
my_bool insert_or_update; /* Can be used by the handler */
my_bool alias_name_used; /* true if table_name is alias */
my_bool get_fields_in_item_tree; /* Signal to fix_field */
REGINFO reginfo; /* field connections */
MEM_ROOT mem_root;

View File

@ -1623,7 +1623,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
mysql.time_zone* tables are MyISAM and these operations always succeed
for MyISAM.
*/
(void)table->file->ha_index_init(0);
(void)table->file->ha_index_init(0, 1);
tz_leapcnt= 0;
res= table->file->index_first(table->record[0]);
@ -1800,7 +1800,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
mysql.time_zone* tables are MyISAM and these operations always succeed
for MyISAM.
*/
(void)table->file->ha_index_init(0);
(void)table->file->ha_index_init(0, 1);
if (table->file->index_read(table->record[0], (byte*)table->field[0]->ptr,
0, HA_READ_KEY_EXACT))
@ -1827,7 +1827,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
table= tz_tables->table;
tz_tables= tz_tables->next_local;
table->field[0]->store((longlong)tzid);
(void)table->file->ha_index_init(0);
(void)table->file->ha_index_init(0, 1);
if (table->file->index_read(table->record[0], (byte*)table->field[0]->ptr,
0, HA_READ_KEY_EXACT))
@ -1854,7 +1854,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
table= tz_tables->table;
tz_tables= tz_tables->next_local;
table->field[0]->store((longlong)tzid);
(void)table->file->ha_index_init(0);
(void)table->file->ha_index_init(0, 1);
// FIXME Is there any better approach than explicitly specifying 4 ???
res= table->file->index_read(table->record[0], (byte*)table->field[0]->ptr,
@ -1926,7 +1926,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
*/
table= tz_tables->table;
table->field[0]->store((longlong)tzid);
(void)table->file->ha_index_init(0);
(void)table->file->ha_index_init(0, 1);
// FIXME Is there any better approach than explicitly specifying 4 ???
res= table->file->index_read(table->record[0], (byte*)table->field[0]->ptr,

View File

@ -46,7 +46,8 @@ static bool pack_fields(File file, List<create_field> &create_fields,
static bool make_empty_rec(THD *thd, int file, enum db_type table_type,
uint table_options,
List<create_field> &create_fields,
uint reclength, ulong data_offset);
uint reclength, ulong data_offset,
handler *handler);
/*
Create a frm (table definition) file
@ -79,13 +80,18 @@ bool mysql_create_frm(THD *thd, my_string file_name,
uchar fileinfo[64],forminfo[288],*keybuff;
TYPELIB formnames;
uchar *screen_buff;
#ifdef HAVE_PARTITION_DB
partition_info *part_info= thd->lex->part_info;
#endif
DBUG_ENTER("mysql_create_frm");
#ifdef HAVE_PARTITION_DB
thd->lex->part_info= NULL;
#endif
formnames.type_names=0;
if (!(screen_buff=pack_screens(create_fields,&info_length,&screens,0)))
DBUG_RETURN(1);
if (db_file == NULL)
db_file= get_new_handler((TABLE*) 0, create_info->db_type);
DBUG_ASSERT(db_file != NULL);
/* If fixed row records, we need one bit to check for deleted rows */
if (!(create_info->table_options & HA_OPTION_PACK_RECORD))
@ -136,6 +142,13 @@ bool mysql_create_frm(THD *thd, my_string file_name,
60);
forminfo[46]=(uchar) strlen((char*)forminfo+47); // Length of comment
#ifdef HAVE_PARTITION_DB
if (part_info)
{
int4store(fileinfo+55,part_info->part_info_len);
}
#endif
int2store(fileinfo+59,db_file->extra_rec_buf_length());
if (my_pwrite(file,(byte*) fileinfo,64,0L,MYF_RW) ||
my_pwrite(file,(byte*) keybuff,key_info_length,
(ulong) uint2korr(fileinfo+6),MYF_RW))
@ -144,7 +157,7 @@ bool mysql_create_frm(THD *thd, my_string file_name,
(ulong) uint2korr(fileinfo+6)+ (ulong) key_buff_length,
MY_SEEK_SET,MYF(0)));
if (make_empty_rec(thd,file,create_info->db_type,create_info->table_options,
create_fields,reclength, data_offset))
create_fields,reclength, data_offset, db_file))
goto err;
VOID(my_seek(file,filepos,MY_SEEK_SET,MYF(0)));
@ -153,6 +166,14 @@ bool mysql_create_frm(THD *thd, my_string file_name,
pack_fields(file, create_fields, data_offset))
goto err;
#ifdef HAVE_PARTITION_DB
if (part_info)
{
if (my_write(file, (byte*) part_info->part_info_string,
part_info->part_info_len, MYF_RW))
goto err;
}
#endif
#ifdef HAVE_CRYPTED_FRM
if (create_info->password)
{
@ -211,15 +232,14 @@ err3:
Create a frm (table definition) file and the tables
SYNOPSIS
mysql_create_frm()
rea_create_table()
thd Thread handler
file_name Name of file (including database and .frm)
create_info create info parameters
create_fields Fields to create
keys number of keys to create
key_info Keys to create
db_file Handler to use. May be zero, in which case we use
create_info->db_type
file Handler to use.
RETURN
0 ok
1 error
@ -228,19 +248,21 @@ err3:
int rea_create_table(THD *thd, my_string file_name,
HA_CREATE_INFO *create_info,
List<create_field> &create_fields,
uint keys, KEY *key_info)
uint keys, KEY *key_info, handler *file)
{
DBUG_ENTER("rea_create_table");
if (mysql_create_frm(thd, file_name, create_info,
create_fields, keys, key_info, NULL))
create_fields, keys, key_info, file))
DBUG_RETURN(1);
if (file->create_handler_files(file_name))
goto err_handler;
if (!create_info->frm_only && ha_create_table(file_name,create_info,0))
{
my_delete(file_name,MYF(0));
DBUG_RETURN(1);
}
goto err_handler;
DBUG_RETURN(0);
err_handler:
my_delete(file_name, MYF(0));
DBUG_RETURN(1);
} /* rea_create_table */
@ -664,7 +686,8 @@ static bool make_empty_rec(THD *thd, File file,enum db_type table_type,
uint table_options,
List<create_field> &create_fields,
uint reclength,
ulong data_offset)
ulong data_offset,
handler *handler)
{
int error;
Field::utype type;
@ -672,19 +695,15 @@ static bool make_empty_rec(THD *thd, File file,enum db_type table_type,
uchar *buff,*null_pos;
TABLE table;
create_field *field;
handler *handler;
enum_check_fields old_count_cuted_fields= thd->count_cuted_fields;
DBUG_ENTER("make_empty_rec");
/* We need a table to generate columns for default values */
bzero((char*) &table,sizeof(table));
table.s= &table.share_not_to_be_used;
handler= get_new_handler((TABLE*) 0, table_type);
if (!handler ||
!(buff=(uchar*) my_malloc((uint) reclength,MYF(MY_WME | MY_ZEROFILL))))
if (!(buff=(uchar*) my_malloc((uint) reclength,MYF(MY_WME | MY_ZEROFILL))))
{
delete handler;
DBUG_RETURN(1);
}
@ -771,7 +790,6 @@ static bool make_empty_rec(THD *thd, File file,enum db_type table_type,
err:
my_free((gptr) buff,MYF(MY_FAE));
delete handler;
thd->count_cuted_fields= old_count_cuted_fields;
DBUG_RETURN(error);
} /* make_empty_rec */

View File

@ -80,6 +80,7 @@
#define PSEUDO_TABLE_BITS (PARAM_TABLE_BIT | OUTER_REF_TABLE_BIT | \
RAND_TABLE_BIT)
#define MAX_FIELDS 4096 /* Limit in the .frm file */
#define MAX_PARTITIONS 1024
#define MAX_SORT_MEMORY (2048*1024-MALLOC_OVERHEAD)
#define MIN_SORT_MEMORY (32*1024-MALLOC_OVERHEAD)

View File

@ -30,9 +30,9 @@ class AttributeHeader {
public:
/**
* Psuedo columns
* Pseudo columns
*/
STATIC_CONST( PSUEDO = 0x8000 );
STATIC_CONST( PSEUDO = 0x8000 );
STATIC_CONST( FRAGMENT = 0xFFFE ); // Read fragment no
STATIC_CONST( ROW_COUNT = 0xFFFD ); // Read row count (committed)
STATIC_CONST( COMMIT_COUNT = 0xFFFC ); // Read commit count

View File

@ -944,6 +944,6 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_TUX_BOUND_INFO 710
#define GSN_ACC_LOCKREQ 711
#define GSN_READ_PSUEDO_REQ 712
#define GSN_READ_PSEUDO_REQ 712
#endif

View File

@ -63,6 +63,7 @@
#define MAX_FRM_DATA_SIZE 6000
#define MAX_NULL_BITS 4096
#define MAX_FRAGMENT_DATA_BYTES (4+(2 * 8 * MAX_REPLICAS * MAX_NDB_NODES))
#define MAX_NDB_PARTITIONS 1024
#define MIN_ATTRBUF ((MAX_ATTRIBUTES_IN_TABLE/24) + 1)
/*

View File

@ -33,14 +33,13 @@ class CreateFragmentationReq {
friend bool printCREATE_FRAGMENTATION_REQ(FILE *,
const Uint32 *, Uint32, Uint16);
public:
STATIC_CONST( SignalLength = 6 );
STATIC_CONST( SignalLength = 5 );
private:
Uint32 senderRef;
Uint32 senderData;
Uint32 fragmentationType;
Uint32 noOfFragments;
Uint32 fragmentNode;
Uint32 primaryTableId; // use same fragmentation as this table if not RNIL
};
@ -62,10 +61,11 @@ public:
enum ErrorCode {
OK = 0
,InvalidFragmentationType = 1
,InvalidNodeId = 2
,InvalidNodeType = 3
,InvalidPrimaryTable = 4
,InvalidFragmentationType = 1301
,InvalidNodeId = 1302
,InvalidNodeType = 1303
,InvalidPrimaryTable = 1304
,InvalidNodeGroup = 1305
};
private:

View File

@ -151,7 +151,12 @@ public:
AllNodesSmallTable = 0,
AllNodesMediumTable = 1,
AllNodesLargeTable = 2,
SingleFragment = 3
SingleFragment = 3,
DistrKeyHash = 4,
DistrKeyLin = 5,
UserDefined = 6,
DistrKeyUniqueHashIndex = 7,
DistrKeyOrderedIndex = 8
};
// TableType constants + objects

View File

@ -52,9 +52,9 @@ class FireTrigOrd {
friend bool printFIRE_TRIG_ORD(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo);
public:
STATIC_CONST( SignalLength = 7 );
STATIC_CONST( SignalWithGCILength = 8 );
STATIC_CONST( SignalWithHashValueLength = 9 );
STATIC_CONST( SignalLength = 8 );
STATIC_CONST( SignalWithGCILength = 9 );
STATIC_CONST( SignalWithHashValueLength = 10 );
private:
Uint32 m_connectionPtr;
@ -64,6 +64,7 @@ private:
Uint32 m_noPrimKeyWords;
Uint32 m_noBeforeValueWords;
Uint32 m_noAfterValueWords;
Uint32 fragId;
Uint32 m_gci;
Uint32 m_hashValue;
// Public methods

View File

@ -145,7 +145,10 @@ public:
FragSingle = 1, ///< Only one fragment
FragAllSmall = 2, ///< One fragment per node, default
FragAllMedium = 3, ///< two fragments per node
FragAllLarge = 4 ///< Four fragments per node.
FragAllLarge = 4, ///< Four fragments per node.
DistrKeyHash = 5,
DistrKeyLin = 6,
UserDefined = 7
};
};
@ -614,6 +617,12 @@ public:
const void* getFrmData() const;
Uint32 getFrmLength() const;
/**
* Get Node Group and Tablespace id's for fragments in table
*/
const void *getNodeGroupIds() const;
Uint32 getNodeGroupIdsLength() const;
/** @} *******************************************************************/
/**
@ -712,6 +721,11 @@ public:
*/
void setFrm(const void* data, Uint32 len);
/**
* Set node group for fragments
*/
void setNodeGroupIds(const void *data, Uint32 len);
/**
* Set table object type
*/

View File

@ -24,7 +24,6 @@ printCREATE_FRAGMENTATION_REQ(FILE * output, const Uint32 * theData,
fprintf(output, " senderData: %x\n", sig->senderData);
fprintf(output, " fragmentationType: %x\n", sig->fragmentationType);
fprintf(output, " noOfFragments: %x\n", sig->noOfFragments);
fprintf(output, " fragmentNode: %x\n", sig->fragmentNode);
if (sig->primaryTableId == RNIL)
fprintf(output, " primaryTableId: none\n");
else

View File

@ -889,7 +889,7 @@ private:
void execACC_OVER_REC(Signal* signal);
void execACC_SAVE_PAGES(Signal* signal);
void execNEXTOPERATION(Signal* signal);
void execREAD_PSUEDO_REQ(Signal* signal);
void execREAD_PSEUDO_REQ(Signal* signal);
// Received signals
void execSTTOR(Signal* signal);

View File

@ -165,7 +165,7 @@ Dbacc::Dbacc(const class Configuration & conf):
addRecSignal(GSN_ACC_OVER_REC, &Dbacc::execACC_OVER_REC);
addRecSignal(GSN_ACC_SAVE_PAGES, &Dbacc::execACC_SAVE_PAGES);
addRecSignal(GSN_NEXTOPERATION, &Dbacc::execNEXTOPERATION);
addRecSignal(GSN_READ_PSUEDO_REQ, &Dbacc::execREAD_PSUEDO_REQ);
addRecSignal(GSN_READ_PSEUDO_REQ, &Dbacc::execREAD_PSEUDO_REQ);
// Received signals
addRecSignal(GSN_STTOR, &Dbacc::execSTTOR);

View File

@ -11788,7 +11788,7 @@ void Dbacc::execSET_VAR_REQ(Signal* signal)
}//execSET_VAR_REQ()
void
Dbacc::execREAD_PSUEDO_REQ(Signal* signal){
Dbacc::execREAD_PSEUDO_REQ(Signal* signal){
jamEntry();
fragrecptr.i = signal->theData[0];
Uint32 attrId = signal->theData[1];

View File

@ -271,7 +271,6 @@ Dbdict::packTableIntoPagesImpl(SimpleProperties::Writer & w,
req->senderData = RNIL;
req->fragmentationType = tablePtr.p->fragmentType;
req->noOfFragments = 0;
req->fragmentNode = 0;
req->primaryTableId = tablePtr.i;
EXECUTE_DIRECT(DBDIH, GSN_CREATE_FRAGMENTATION_REQ, signal,
CreateFragmentationReq::SignalLength);
@ -1492,8 +1491,11 @@ void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr)
tablePtr.p->gciTableCreated = 0;
tablePtr.p->noOfAttributes = ZNIL;
tablePtr.p->noOfNullAttr = 0;
tablePtr.p->ngLen = 0;
memset(tablePtr.p->ngData, 0, sizeof(tablePtr.p->ngData));
tablePtr.p->frmLen = 0;
memset(tablePtr.p->frmData, 0, sizeof(tablePtr.p->frmData));
tablePtr.p->fragmentCount = 0;
/*
tablePtr.p->lh3PageIndexBits = 0;
tablePtr.p->lh3DistrBits = 0;
@ -2919,25 +2921,52 @@ Dbdict::execCREATE_TABLE_REQ(Signal* signal){
createTabPtr.p->m_fragmentsPtrI = RNIL;
createTabPtr.p->m_dihAddFragPtr = RNIL;
Uint32 * theData = signal->getDataPtrSend();
Uint32 *theData = signal->getDataPtrSend(), i;
Uint16 *node_group= (Uint16*)&signal->theData[25];
CreateFragmentationReq * const req = (CreateFragmentationReq*)theData;
req->senderRef = reference();
req->senderData = createTabPtr.p->key;
req->primaryTableId = parseRecord.tablePtr.p->primaryTableId;
req->noOfFragments = parseRecord.tablePtr.p->ngLen >> 1;
req->fragmentationType = parseRecord.tablePtr.p->fragmentType;
req->noOfFragments = 0;
req->fragmentNode = 0;
req->primaryTableId = RNIL;
for (i = 0; i < req->noOfFragments; i++)
node_group[i] = parseRecord.tablePtr.p->ngData[i];
if (parseRecord.tablePtr.p->isOrderedIndex()) {
jam();
// ordered index has same fragmentation as the table
const Uint32 primaryTableId = parseRecord.tablePtr.p->primaryTableId;
TableRecordPtr primaryTablePtr;
c_tableRecordPool.getPtr(primaryTablePtr, primaryTableId);
// fragmentationType must be consistent
req->fragmentationType = primaryTablePtr.p->fragmentType;
req->primaryTableId = primaryTableId;
req->primaryTableId = parseRecord.tablePtr.p->primaryTableId;
req->fragmentationType = DictTabInfo::DistrKeyOrderedIndex;
}
else if (parseRecord.tablePtr.p->isHashIndex())
{
jam();
/*
Unique hash indexes has same amount of fragments as primary table
and distributed in the same manner but has always a normal hash
fragmentation.
*/
req->primaryTableId = parseRecord.tablePtr.p->primaryTableId;
req->fragmentationType = DictTabInfo::DistrKeyUniqueHashIndex;
}
else
{
jam();
/*
Blob tables come here with primaryTableId != RNIL but we only need
it for creating the fragments so we set it to RNIL now that we got
what we wanted from it to avoid other side effects.
*/
parseRecord.tablePtr.p->primaryTableId = RNIL;
}
EXECUTE_DIRECT(DBDIH, GSN_CREATE_FRAGMENTATION_REQ, signal,
CreateFragmentationReq::SignalLength);
jamEntry();
if (signal->theData[0] != 0)
{
jam();
parseRecord.errorCode= signal->theData[0];
break;
}
sendSignal(DBDIH_REF, GSN_CREATE_FRAGMENTATION_REQ, signal,
CreateFragmentationReq::SignalLength, JBB);
c_blockState = BS_CREATE_TAB;
return;
@ -4884,6 +4913,10 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
tablePtr.p->frmLen = tableDesc.FrmLen;
memcpy(tablePtr.p->frmData, tableDesc.FrmData, tableDesc.FrmLen);
tablePtr.p->ngLen = tableDesc.FragmentDataLen;
memcpy(tablePtr.p->ngData, tableDesc.FragmentData,
tableDesc.FragmentDataLen);
if(tableDesc.PrimaryTableId != RNIL) {
tablePtr.p->primaryTableId = tableDesc.PrimaryTableId;
@ -6510,7 +6543,7 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
initialiseTableRecord(indexPtr);
if (req->getIndexType() == DictTabInfo::UniqueHashIndex) {
indexPtr.p->storedTable = opPtr.p->m_storedIndex;
indexPtr.p->fragmentType = tablePtr.p->fragmentType;
indexPtr.p->fragmentType = DictTabInfo::DistrKeyUniqueHashIndex;
} else if (req->getIndexType() == DictTabInfo::OrderedIndex) {
// first version will not supported logging
if (opPtr.p->m_storedIndex) {
@ -6520,8 +6553,7 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
return;
}
indexPtr.p->storedTable = false;
// follows table fragmentation
indexPtr.p->fragmentType = tablePtr.p->fragmentType;
indexPtr.p->fragmentType = DictTabInfo::DistrKeyOrderedIndex;
} else {
jam();
opPtr.p->m_errorCode = CreateIndxRef::InvalidIndexType;
@ -6645,7 +6677,7 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
w.add(DictTabInfo::AttributeKeyFlag, (Uint32)false);
w.add(DictTabInfo::AttributeNullableFlag, (Uint32)false);
w.add(DictTabInfo::AttributeExtType, (Uint32)DictTabInfo::ExtUnsigned);
w.add(DictTabInfo::AttributeExtLength, tablePtr.p->tupKeyLength);
w.add(DictTabInfo::AttributeExtLength, tablePtr.p->tupKeyLength+1);
w.add(DictTabInfo::AttributeEnd, (Uint32)true);
}
if (indexPtr.p->isOrderedIndex()) {
@ -11834,11 +11866,19 @@ Dbdict::alterTrigger_sendReply(Signal* signal, OpAlterTriggerPtr opPtr,
* MODULE: Support routines for index and trigger.
*/
/*
This routine is used to set-up the primary key attributes of the unique
hash index. Since we store fragment id as part of the primary key here
we insert the pseudo column for getting fragment id first in the array.
This routine is used as part of the building of the index.
*/
void
Dbdict::getTableKeyList(TableRecordPtr tablePtr, AttributeList& list)
{
jam();
list.sz = 0;
list.id[list.sz++] = AttributeHeader::FRAGMENT;
for (Uint32 tAttr = tablePtr.p->firstAttribute; tAttr != RNIL; ) {
AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr);
if (aRec->tupleKey)

View File

@ -232,6 +232,10 @@ public:
/** TODO Could preferrably be made dynamic size */
Uint32 frmLen;
char frmData[MAX_FRM_DATA_SIZE];
/** Node Group and Tablespace id for this table */
/** TODO Could preferrably be made dynamic size */
Uint32 ngLen;
Uint16 ngData[MAX_NDB_PARTITIONS];
Uint32 fragmentCount;
};

View File

@ -61,6 +61,7 @@
// ------------------------------------------
// Error Codes for Transactions (None sofar)
// ------------------------------------------
#define ZUNDEFINED_FRAGMENT_ERROR 311
// --------------------------------------
// Error Codes for Add Table
@ -469,8 +470,10 @@ public:
TS_DROPPING = 3
};
enum Method {
HASH = 0,
NOTDEFINED = 1
LINEAR_HASH = 0,
NOTDEFINED = 1,
NORMAL_HASH = 2,
USER_DEFINED = 3
};
CopyStatus tabCopyStatus;
UpdateState tabUpdateState;

View File

@ -5491,11 +5491,9 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){
#endif
}
bool ok = false;
MasterLCPConf::State lcpState;
switch (c_lcpState.lcpStatus) {
case LCP_STATUS_IDLE:
ok = true;
jam();
/*------------------------------------------------*/
/* LOCAL CHECKPOINT IS CURRENTLY NOT ACTIVE */
@ -5506,7 +5504,6 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){
lcpState = MasterLCPConf::LCP_STATUS_IDLE;
break;
case LCP_STATUS_ACTIVE:
ok = true;
jam();
/*--------------------------------------------------*/
/* COPY OF RESTART INFORMATION HAS BEEN */
@ -5515,7 +5512,6 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){
lcpState = MasterLCPConf::LCP_STATUS_ACTIVE;
break;
case LCP_TAB_COMPLETED:
ok = true;
jam();
/*--------------------------------------------------------*/
/* ALL LCP_REPORT'S HAVE BEEN COMPLETED FOR */
@ -5525,7 +5521,6 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){
lcpState = MasterLCPConf::LCP_TAB_COMPLETED;
break;
case LCP_TAB_SAVED:
ok = true;
jam();
/*--------------------------------------------------------*/
/* ALL LCP_REPORT'S HAVE BEEN COMPLETED FOR */
@ -5549,15 +5544,15 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){
break;
case LCP_COPY_GCI:
case LCP_INIT_TABLES:
ok = true;
/**
* These two states are handled by if statements above
*/
ndbrequire(false);
lcpState= MasterLCPConf::LCP_STATUS_IDLE; // remove warning
break;
default:
ndbrequire(false);
}//switch
ndbrequire(ok);
Uint32 failedNodeId = c_lcpState.m_MASTER_LCPREQ_FailedNodeId;
MasterLCPConf * const conf = (MasterLCPConf *)&signal->theData[0];
@ -6158,96 +6153,136 @@ void Dbdih::execDIRELEASEREQ(Signal* signal)
3.7.1 A D D T A B L E M A I N L Y
***************************************
*/
void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){
#define UNDEF_NODEGROUP 65535
static inline void inc_node_or_group(Uint32 &node, Uint32 max_node)
{
Uint32 next = node + 1;
node = (next == max_node ? 0 : next);
}
/*
Spread fragments in backwards compatible mode
*/
static void set_default_node_groups(Signal *signal, Uint32 noFrags)
{
Uint16 *node_group_array = (Uint16*)&signal->theData[25];
Uint32 i;
node_group_array[0] = 0;
for (i = 1; i < noFrags; i++)
node_group_array[i] = UNDEF_NODEGROUP;
}
void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal)
{
Uint16 node_group_id[MAX_NDB_PARTITIONS];
jamEntry();
CreateFragmentationReq * const req =
(CreateFragmentationReq*)signal->getDataPtr();
const Uint32 senderRef = req->senderRef;
const Uint32 senderData = req->senderData;
const Uint32 fragmentNode = req->fragmentNode;
const Uint32 fragmentType = req->fragmentationType;
//const Uint32 fragmentCount = req->noOfFragments;
Uint32 noOfFragments = req->noOfFragments;
const Uint32 fragType = req->fragmentationType;
const Uint32 primaryTableId = req->primaryTableId;
Uint32 err = 0;
do {
Uint32 noOfFragments = 0;
Uint32 noOfReplicas = cnoReplicas;
switch(fragmentType){
case DictTabInfo::AllNodesSmallTable:
jam();
noOfFragments = csystemnodes;
break;
case DictTabInfo::AllNodesMediumTable:
jam();
noOfFragments = 2 * csystemnodes;
break;
case DictTabInfo::AllNodesLargeTable:
jam();
noOfFragments = 4 * csystemnodes;
break;
case DictTabInfo::SingleFragment:
jam();
noOfFragments = 1;
break;
#if 0
case DictTabInfo::SpecifiedFragmentCount:
noOfFragments = (fragmentCount == 0 ? 1 : (fragmentCount + 1)/ 2);
break;
#endif
default:
jam();
err = CreateFragmentationRef::InvalidFragmentationType;
break;
}
if(err)
break;
NodeGroupRecordPtr NGPtr;
TabRecordPtr primTabPtr;
Uint32 count = 2;
Uint16 noOfReplicas = cnoReplicas;
Uint16 *fragments = (Uint16*)(signal->theData+25);
if (primaryTableId == RNIL) {
if(fragmentNode == 0){
jam();
NGPtr.i = 0;
if(noOfFragments < csystemnodes)
{
NGPtr.i = c_nextNodeGroup;
c_nextNodeGroup = (NGPtr.i + 1 == cnoOfNodeGroups ? 0 : NGPtr.i + 1);
}
} else if(! (fragmentNode < MAX_NDB_NODES)) {
jam();
err = CreateFragmentationRef::InvalidNodeId;
} else {
jam();
const Uint32 stat = Sysfile::getNodeStatus(fragmentNode,
SYSFILE->nodeStatus);
switch (stat) {
case Sysfile::NS_Active:
case Sysfile::NS_ActiveMissed_1:
case Sysfile::NS_ActiveMissed_2:
case Sysfile::NS_TakeOver:
jam();
switch ((DictTabInfo::FragmentType)fragType)
{
/*
Backward compatability and for all places in code not changed.
*/
case DictTabInfo::AllNodesSmallTable:
jam();
noOfFragments = csystemnodes;
set_default_node_groups(signal, noOfFragments);
break;
case Sysfile::NS_NotActive_NotTakenOver:
case DictTabInfo::AllNodesMediumTable:
jam();
noOfFragments = 2 * csystemnodes;
set_default_node_groups(signal, noOfFragments);
break;
case Sysfile::NS_HotSpare:
case DictTabInfo::AllNodesLargeTable:
jam();
case Sysfile::NS_NotDefined:
noOfFragments = 4 * csystemnodes;
set_default_node_groups(signal, noOfFragments);
break;
case DictTabInfo::SingleFragment:
jam();
noOfFragments = 1;
set_default_node_groups(signal, noOfFragments);
break;
default:
jam();
err = CreateFragmentationRef::InvalidNodeType;
if (noOfFragments == 0)
{
jam();
err = CreateFragmentationRef::InvalidFragmentationType;
}
break;
}
if (err)
break;
/*
When we come here the the exact partition is specified
and there is an array of node groups sent along as well.
*/
memcpy(&node_group_id[0], &signal->theData[25], 2 * noOfFragments);
Uint16 next_replica_node[MAX_NDB_NODES];
memset(next_replica_node,0,sizeof(next_replica_node));
Uint32 default_node_group= c_nextNodeGroup;
for(Uint32 fragNo = 0; fragNo < noOfFragments; fragNo++)
{
jam();
NGPtr.i = node_group_id[fragNo];
if (NGPtr.i == UNDEF_NODEGROUP)
{
jam();
NGPtr.i = default_node_group;
}
if (NGPtr.i > cnoOfNodeGroups)
{
jam();
err = CreateFragmentationRef::InvalidNodeGroup;
break;
}
if(err)
break;
NGPtr.i = Sysfile::getNodeGroup(fragmentNode,
SYSFILE->nodeGroups);
ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
const Uint32 max = NGPtr.p->nodeCount;
Uint32 tmp= next_replica_node[NGPtr.i];
for(Uint32 replicaNo = 0; replicaNo < noOfReplicas; replicaNo++)
{
jam();
const Uint16 nodeId = NGPtr.p->nodesInGroup[tmp];
fragments[count++]= nodeId;
inc_node_or_group(tmp, max);
}
inc_node_or_group(tmp, max);
next_replica_node[NGPtr.i]= tmp;
/**
* Next node group for next fragment
*/
inc_node_or_group(default_node_group, cnoOfNodeGroups);
}
if (err)
{
jam();
break;
}
else
{
jam();
c_nextNodeGroup = default_node_group;
}
} else {
if (primaryTableId >= ctabFileSize) {
jam();
@ -6261,49 +6296,14 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){
err = CreateFragmentationRef::InvalidPrimaryTable;
break;
}
if (noOfFragments != primTabPtr.p->totalfragments) {
jam();
err = CreateFragmentationRef::InvalidFragmentationType;
break;
}
}
Uint32 count = 2;
Uint16 *fragments = (Uint16*)(signal->theData+25);
if (primaryTableId == RNIL) {
jam();
Uint8 next_replica_node[MAX_NDB_NODES];
memset(next_replica_node,0,sizeof(next_replica_node));
for(Uint32 fragNo = 0; fragNo<noOfFragments; fragNo++){
jam();
ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
const Uint32 max = NGPtr.p->nodeCount;
Uint32 tmp= next_replica_node[NGPtr.i];
for(Uint32 replicaNo = 0; replicaNo<noOfReplicas; replicaNo++)
{
jam();
const Uint32 nodeId = NGPtr.p->nodesInGroup[tmp++];
fragments[count++] = nodeId;
tmp = (tmp >= max ? 0 : tmp);
}
tmp++;
next_replica_node[NGPtr.i]= (tmp >= max ? 0 : tmp);
/**
* Next node group for next fragment
*/
NGPtr.i++;
NGPtr.i = (NGPtr.i == cnoOfNodeGroups ? 0 : NGPtr.i);
}
} else {
noOfFragments= primTabPtr.p->totalfragments;
for (Uint32 fragNo = 0;
fragNo < primTabPtr.p->totalfragments; fragNo++) {
fragNo < noOfFragments; fragNo++) {
jam();
FragmentstorePtr fragPtr;
ReplicaRecordPtr replicaPtr;
getFragstore(primTabPtr.p, fragNo, fragPtr);
fragments[count++] = fragPtr.p->preferredPrimary;
fragments[count++]= fragPtr.p->preferredPrimary;
for (replicaPtr.i = fragPtr.p->storedReplicas;
replicaPtr.i != RNIL;
replicaPtr.i = replicaPtr.p->nextReplica) {
@ -6311,9 +6311,9 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){
ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
if (replicaPtr.p->procNode != fragPtr.p->preferredPrimary) {
jam();
fragments[count++] = replicaPtr.p->procNode;
}//if
}//for
fragments[count++]= replicaPtr.p->procNode;
}
}
for (replicaPtr.i = fragPtr.p->oldStoredReplicas;
replicaPtr.i != RNIL;
replicaPtr.i = replicaPtr.p->nextReplica) {
@ -6321,25 +6321,26 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){
ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
if (replicaPtr.p->procNode != fragPtr.p->preferredPrimary) {
jam();
fragments[count++] = replicaPtr.p->procNode;
}//if
}//for
fragments[count++]= replicaPtr.p->procNode;
}
}
}
}
ndbrequire(count == (2 + noOfReplicas * noOfFragments));
ndbrequire(count == (2U + noOfReplicas * noOfFragments));
CreateFragmentationConf * const conf =
(CreateFragmentationConf*)signal->getDataPtrSend();
conf->senderRef = reference();
conf->senderData = senderData;
conf->noOfReplicas = noOfReplicas;
conf->noOfFragments = noOfFragments;
conf->noOfReplicas = (Uint32)noOfReplicas;
conf->noOfFragments = (Uint32)noOfFragments;
fragments[0] = noOfReplicas;
fragments[1] = noOfFragments;
fragments[0]= noOfReplicas;
fragments[1]= noOfFragments;
if(senderRef != 0)
{
jam();
LinearSectionPtr ptr[3];
ptr[0].p = (Uint32*)&fragments[0];
ptr[0].sz = (count + 1) / 2;
@ -6351,33 +6352,17 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){
ptr,
1);
}
else
{
// Execute direct
signal->theData[0] = 0;
}
// Always ACK/NACK (here ACK)
signal->theData[0] = 0;
return;
} while(false);
if(senderRef != 0)
{
CreateFragmentationRef * const ref =
(CreateFragmentationRef*)signal->getDataPtrSend();
ref->senderRef = reference();
ref->senderData = senderData;
ref->errorCode = err;
sendSignal(senderRef, GSN_CREATE_FRAGMENTATION_REF, signal,
CreateFragmentationRef::SignalLength, JBB);
}
else
{
// Execute direct
signal->theData[0] = err;
}
// Always ACK/NACK (here NACK)
signal->theData[0] = err;
}
void Dbdih::execDIADDTABREQ(Signal* signal)
{
Uint32 fragType;
jamEntry();
DiAddTabReq * const req = (DiAddTabReq*)signal->getDataPtr();
@ -6402,6 +6387,7 @@ void Dbdih::execDIADDTABREQ(Signal* signal)
ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
tabPtr.p->connectrec = connectPtr.i;
tabPtr.p->tableType = req->tableType;
fragType= req->fragType;
tabPtr.p->schemaVersion = req->schemaVersion;
tabPtr.p->primaryTableId = req->primaryTableId;
@ -6438,9 +6424,33 @@ void Dbdih::execDIADDTABREQ(Signal* signal)
/*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/
tabPtr.p->tabStatus = TabRecord::TS_CREATING;
tabPtr.p->storedTable = req->storedTable;
tabPtr.p->method = TabRecord::HASH;
tabPtr.p->kvalue = req->kValue;
switch ((DictTabInfo::FragmentType)fragType)
{
case DictTabInfo::AllNodesSmallTable:
case DictTabInfo::AllNodesMediumTable:
case DictTabInfo::AllNodesLargeTable:
case DictTabInfo::SingleFragment:
jam();
case DictTabInfo::DistrKeyLin:
jam();
tabPtr.p->method= TabRecord::LINEAR_HASH;
break;
case DictTabInfo::DistrKeyHash:
case DictTabInfo::DistrKeyUniqueHashIndex:
case DictTabInfo::DistrKeyOrderedIndex:
jam();
tabPtr.p->method= TabRecord::NORMAL_HASH;
break;
case DictTabInfo::UserDefined:
jam();
tabPtr.p->method= TabRecord::USER_DEFINED;
break;
default:
ndbrequire(false);
}
union {
Uint16 fragments[2 + MAX_FRAG_PER_NODE*MAX_REPLICAS*MAX_NDB_NODES];
Uint32 align;
@ -6875,17 +6885,40 @@ void Dbdih::execDIGETNODESREQ(Signal* signal)
tabPtr.i = req->tableId;
Uint32 hashValue = req->hashValue;
Uint32 ttabFileSize = ctabFileSize;
Uint32 fragId;
DiGetNodesConf * const conf = (DiGetNodesConf *)&signal->theData[0];
TabRecord* regTabDesc = tabRecord;
jamEntry();
ptrCheckGuard(tabPtr, ttabFileSize, regTabDesc);
Uint32 fragId = hashValue & tabPtr.p->mask;
ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE);
if (fragId < tabPtr.p->hashpointer) {
if (tabPtr.p->method == TabRecord::LINEAR_HASH)
{
jam();
fragId = hashValue & ((tabPtr.p->mask << 1) + 1);
}//if
fragId = hashValue & tabPtr.p->mask;
ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE);
if (fragId < tabPtr.p->hashpointer) {
jam();
fragId = hashValue & ((tabPtr.p->mask << 1) + 1);
}//if
}
else if (tabPtr.p->method == TabRecord::NORMAL_HASH)
{
jam();
fragId= hashValue % tabPtr.p->totalfragments;
}
else
{
jam();
ndbassert(tabPtr.p->method == TabRecord::USER_DEFINED);
fragId= hashValue;
if (fragId >= tabPtr.p->totalfragments)
{
jam();
conf->zero= 1; //Indicate error;
signal->theData[1]= ZUNDEFINED_FRAGMENT_ERROR;
return;
}
}
getFragstore(tabPtr.p, fragId, fragPtr);
DiGetNodesConf * const conf = (DiGetNodesConf *)&signal->theData[0];
Uint32 nodeCount = extractNodeInfo(fragPtr.p, conf->nodes);
Uint32 sig2 = (nodeCount - 1) +
(fragPtr.p->distributionKey << 16);
@ -8410,8 +8443,7 @@ void Dbdih::readPagesIntoTableLab(Signal* signal, Uint32 tableId)
rf.rwfTabPtr.p->hashpointer = readPageWord(&rf);
rf.rwfTabPtr.p->kvalue = readPageWord(&rf);
rf.rwfTabPtr.p->mask = readPageWord(&rf);
ndbrequire(readPageWord(&rf) == TabRecord::HASH);
rf.rwfTabPtr.p->method = TabRecord::HASH;
rf.rwfTabPtr.p->method = (TabRecord::Method)readPageWord(&rf);
/* ---------------------------------- */
/* Type of table, 2 = temporary table */
/* ---------------------------------- */
@ -8505,7 +8537,7 @@ void Dbdih::packTableIntoPagesLab(Signal* signal, Uint32 tableId)
writePageWord(&wf, tabPtr.p->hashpointer);
writePageWord(&wf, tabPtr.p->kvalue);
writePageWord(&wf, tabPtr.p->mask);
writePageWord(&wf, TabRecord::HASH);
writePageWord(&wf, tabPtr.p->method);
writePageWord(&wf, tabPtr.p->storedTable);
signal->theData[0] = DihContinueB::ZPACK_FRAG_INTO_PAGES;
@ -10947,6 +10979,7 @@ void Dbdih::initCommonData()
cnoHotSpare = 0;
cnoOfActiveTables = 0;
cnoOfNodeGroups = 0;
c_nextNodeGroup = 0;
cnoReplicas = 0;
coldgcp = 0;
coldGcpId = 0;

View File

@ -2096,7 +2096,7 @@ private:
void execSTART_EXEC_SR(Signal* signal);
void execEXEC_SRREQ(Signal* signal);
void execEXEC_SRCONF(Signal* signal);
void execREAD_PSUEDO_REQ(Signal* signal);
void execREAD_PSEUDO_REQ(Signal* signal);
void execDUMP_STATE_ORD(Signal* signal);
void execACC_COM_BLOCK(Signal* signal);

View File

@ -337,7 +337,7 @@ Dblqh::Dblqh(const class Configuration & conf):
addRecSignal(GSN_TUX_ADD_ATTRCONF, &Dblqh::execTUX_ADD_ATTRCONF);
addRecSignal(GSN_TUX_ADD_ATTRREF, &Dblqh::execTUX_ADD_ATTRREF);
addRecSignal(GSN_READ_PSUEDO_REQ, &Dblqh::execREAD_PSUEDO_REQ);
addRecSignal(GSN_READ_PSEUDO_REQ, &Dblqh::execREAD_PSEUDO_REQ);
initData();

View File

@ -2613,7 +2613,7 @@ Dblqh::updatePackedList(Signal* signal, HostRecord * ahostptr, Uint16 hostId)
}//Dblqh::updatePackedList()
void
Dblqh::execREAD_PSUEDO_REQ(Signal* signal){
Dblqh::execREAD_PSEUDO_REQ(Signal* signal){
jamEntry();
TcConnectionrecPtr regTcPtr;
regTcPtr.i = signal->theData[0];
@ -2627,7 +2627,7 @@ Dblqh::execREAD_PSUEDO_REQ(Signal* signal){
ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
signal->theData[0] = regFragptr.p->accFragptr[regTcPtr.p->localFragptr];
EXECUTE_DIRECT(DBACC, GSN_READ_PSUEDO_REQ, signal, 2);
EXECUTE_DIRECT(DBACC, GSN_READ_PSEUDO_REQ, signal, 2);
}
else
{

View File

@ -394,6 +394,13 @@ public:
*/
Uint32 fireingOperation;
/**
* The fragment id of the firing operation. This will be appended
* to the Primary Key such that the record can be found even in the
* case of user defined partitioning.
*/
Uint32 fragId;
/**
* Used for scrapping in case of node failure
*/

View File

@ -11225,6 +11225,7 @@ void Dbtc::execFIRE_TRIG_ORD(Signal* signal)
c_firedTriggerHash.remove(trigPtr);
trigPtr.p->fragId= fireOrd->fragId;
bool ok = trigPtr.p->keyValues.getSize() == fireOrd->m_noPrimKeyWords;
ok &= trigPtr.p->afterValues.getSize() == fireOrd->m_noAfterValueWords;
ok &= trigPtr.p->beforeValues.getSize() == fireOrd->m_noBeforeValueWords;
@ -12122,7 +12123,11 @@ void Dbtc::executeIndexOperation(Signal* signal,
Uint32 dataPos = 0;
TcKeyReq * const tcIndxReq = &indexOp->tcIndxReq;
TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
Uint32 * dataPtr = &tcKeyReq->scanInfo;
/*
Data points to distrGroupHashValue since scanInfo is used to send
fragment id of receiving fragment
*/
Uint32 * dataPtr = &tcKeyReq->distrGroupHashValue;
Uint32 tcKeyLength = TcKeyReq::StaticLength;
Uint32 tcKeyRequestInfo = tcIndxReq->requestInfo;
TcIndexData* indexData;
@ -12161,11 +12166,16 @@ void Dbtc::executeIndexOperation(Signal* signal,
regApiPtr->executingIndexOp = indexOp->indexOpId;;
regApiPtr->noIndexOp++; // Increase count
// Filter out AttributeHeader:s since this should not be in key
/*
Filter out AttributeHeader:s since this should not be in key.
Also filter out fragment id from primary key and handle that
separately by setting it as Distribution Key and set indicator.
*/
AttributeHeader* attrHeader = (AttributeHeader *) aiIter.data;
Uint32 headerSize = attrHeader->getHeaderSize();
Uint32 keySize = attrHeader->getDataSize();
Uint32 keySize = attrHeader->getDataSize() - 1;
TcKeyReq::setKeyLength(tcKeyRequestInfo, keySize);
// Skip header
if (headerSize == 1) {
@ -12175,6 +12185,9 @@ void Dbtc::executeIndexOperation(Signal* signal,
jam();
moreKeyData = indexOp->transIdAI.next(aiIter, headerSize - 1);
}//if
tcKeyReq->scanInfo = *aiIter.data; //Fragment Id
moreKeyData = indexOp->transIdAI.next(aiIter);
TcKeyReq::setDistributionKeyFlag(tcKeyRequestInfo, 1U);
while(// If we have not read complete key
(keySize != 0) &&
(dataPos < keyBufSize)) {
@ -12584,10 +12597,11 @@ void Dbtc::insertIntoIndexTable(Signal* signal,
moreAttrData = keyValues.next(iter, hops);
}
AttributeHeader pkAttrHeader(attrId, totalPrimaryKeyLength);
Uint32 attributesLength = afterValues.getSize() +
pkAttrHeader.getHeaderSize() + pkAttrHeader.getDataSize() + 1;
TcKeyReq::setKeyLength(tcKeyRequestInfo, keyLength);
tcKeyReq->attrLen = afterValues.getSize() +
pkAttrHeader.getHeaderSize() + pkAttrHeader.getDataSize();
tcKeyReq->attrLen = attributesLength;
tcKeyReq->tableId = indexData->indexId;
TcKeyReq::setOperationType(tcKeyRequestInfo, ZINSERT);
TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, true);
@ -12637,8 +12651,11 @@ void Dbtc::insertIntoIndexTable(Signal* signal,
}
tcKeyLength += dataPos;
Uint32 attributesLength = afterValues.getSize() +
pkAttrHeader.getHeaderSize() + pkAttrHeader.getDataSize();
/*
Size of attrinfo is unique index attributes one by one, header for each
of them (all contained in the afterValues data structure), plus a header,
the primary key (compacted) and the fragment id before the primary key
*/
if (attributesLength <= attrBufSize) {
jam();
// ATTRINFO fits in TCKEYREQ
@ -12655,6 +12672,10 @@ void Dbtc::insertIntoIndexTable(Signal* signal,
// as one attribute
pkAttrHeader.insertHeader(dataPtr);
dataPtr += pkAttrHeader.getHeaderSize();
/*
Insert fragment id before primary key as part of reference to tuple
*/
*dataPtr++ = firedTriggerData->fragId;
moreAttrData = keyValues.first(iter);
while(moreAttrData) {
jam();
@ -12819,6 +12840,29 @@ void Dbtc::insertIntoIndexTable(Signal* signal,
pkAttrHeader.insertHeader(dataPtr);
dataPtr += pkAttrHeader.getHeaderSize();
attrInfoPos += pkAttrHeader.getHeaderSize();
/*
Add fragment id before primary key
TODO: This code really needs to be made into a long signal
to remove this messy code.
*/
if (attrInfoPos == AttrInfo::DataLength)
{
jam();
// Flush ATTRINFO
#if INTERNAL_TRIGGER_TCKEYREQ_JBA
sendSignal(reference(), GSN_ATTRINFO, signal,
AttrInfo::HeaderLength + AttrInfo::DataLength, JBA);
#else
EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal,
AttrInfo::HeaderLength + AttrInfo::DataLength);
jamEntry();
#endif
dataPtr = (Uint32 *) &attrInfo->attrData;
attrInfoPos = 0;
}
attrInfoPos++;
*dataPtr++ = firedTriggerData->fragId;
moreAttrData = keyValues.first(iter);
while(moreAttrData) {
jam();

View File

@ -1694,7 +1694,7 @@ private:
//------------------------------------------------------------------
//------------------------------------------------------------------
bool nullFlagCheck(Uint32 attrDes2);
Uint32 read_psuedo(Uint32 attrId, Uint32* outBuffer);
Uint32 read_pseudo(Uint32 attrId, Uint32* outBuffer);
//------------------------------------------------------------------
//------------------------------------------------------------------

View File

@ -210,8 +210,8 @@ int Dbtup::readAttributes(Page* const pagePtr,
} else {
return -1;
}//if
} else if(attributeId & AttributeHeader::PSUEDO){
Uint32 sz = read_psuedo(attributeId,
} else if(attributeId & AttributeHeader::PSEUDO){
Uint32 sz = read_pseudo(attributeId,
outBuffer+tmpAttrBufIndex+1);
AttributeHeader::init(&outBuffer[tmpAttrBufIndex], attributeId, sz);
tOutBufIndex = tmpAttrBufIndex + 1 + sz;
@ -995,7 +995,7 @@ Dbtup::updateDynSmallVarSize(Uint32* inBuffer,
}//Dbtup::updateDynSmallVarSize()
Uint32
Dbtup::read_psuedo(Uint32 attrId, Uint32* outBuffer){
Dbtup::read_pseudo(Uint32 attrId, Uint32* outBuffer){
Uint32 tmp[sizeof(SignalHeader)+25];
Signal * signal = (Signal*)&tmp;
switch(attrId){
@ -1017,7 +1017,7 @@ Dbtup::read_psuedo(Uint32 attrId, Uint32* outBuffer){
signal->theData[0] = operPtr.p->userpointer;
signal->theData[1] = attrId;
EXECUTE_DIRECT(DBLQH, GSN_READ_PSUEDO_REQ, signal, 2);
EXECUTE_DIRECT(DBLQH, GSN_READ_PSEUDO_REQ, signal, 2);
outBuffer[0] = signal->theData[0];
outBuffer[1] = signal->theData[1];
return 2;
@ -1025,7 +1025,7 @@ Dbtup::read_psuedo(Uint32 attrId, Uint32* outBuffer){
signal->theData[0] = operPtr.p->userpointer;
signal->theData[1] = attrId;
EXECUTE_DIRECT(DBLQH, GSN_READ_PSUEDO_REQ, signal, 2);
EXECUTE_DIRECT(DBLQH, GSN_READ_PSEUDO_REQ, signal, 2);
outBuffer[0] = signal->theData[0];
return 1;
default:

View File

@ -887,6 +887,7 @@ void Dbtup::sendFireTrigOrd(Signal* signal,
fireTrigOrd->setConnectionPtr(regOperPtr->tcOpIndex);
fireTrigOrd->setTriggerId(trigPtr->triggerId);
fireTrigOrd->fragId= regOperPtr->fragId >> 1; //Handle two local frags
switch(regOperPtr->optype) {
case(ZINSERT):

View File

@ -67,11 +67,40 @@ NdbBlob::getBlobTableName(char* btname, const NdbTableImpl* t, const NdbColumnIm
void
NdbBlob::getBlobTable(NdbTableImpl& bt, const NdbTableImpl* t, const NdbColumnImpl* c)
{
DBUG_ENTER("NdbBlob::getBlobTable");
char btname[NdbBlobImpl::BlobTableNameSize];
getBlobTableName(btname, t, c);
bt.setName(btname);
bt.setLogging(t->getLogging());
bt.setFragmentType(t->getFragmentType());
/*
BLOB tables use the same fragmentation as the original table
but may change the fragment type if it is UserDefined since it
must be hash based so that the kernel can handle it on its own.
*/
bt.m_primaryTableId = t->m_tableId;
bt.m_ng.clear();
switch (t->getFragmentType())
{
case NdbDictionary::Object::FragAllSmall:
case NdbDictionary::Object::FragAllMedium:
case NdbDictionary::Object::FragAllLarge:
case NdbDictionary::Object::FragSingle:
bt.setFragmentType(t->getFragmentType());
break;
case NdbDictionary::Object::DistrKeyLin:
case NdbDictionary::Object::DistrKeyHash:
bt.setFragmentType(t->getFragmentType());
break;
case NdbDictionary::Object::UserDefined:
bt.setFragmentType(NdbDictionary::Object::DistrKeyHash);
break;
default:
DBUG_ASSERT(0);
break;
}
DBUG_PRINT("info",
("Create BLOB table with primary table = %u and Fragment Type = %u",
bt.m_primaryTableId, (uint)bt.getFragmentType()));
{ NdbDictionary::Column bc("PK");
bc.setType(NdbDictionary::Column::Unsigned);
assert(t->m_keyLenInWords != 0);
@ -107,6 +136,7 @@ NdbBlob::getBlobTable(NdbTableImpl& bt, const NdbTableImpl* t, const NdbColumnIm
bc.setLength(c->getPartSize());
bt.addColumn(bc);
}
DBUG_VOID_RETURN;
}
// initialization
@ -371,8 +401,8 @@ NdbBlob::setPartKeyValue(NdbOperation* anOp, Uint32 part)
DBUG_ENTER("NdbBlob::setPartKeyValue");
DBUG_PRINT("info", ("dist=%u part=%u key=", getDistKey(part), part));
DBUG_DUMP("info", theKeyBuf.data, 4 * theTable->m_keyLenInWords);
Uint32* data = (Uint32*)theKeyBuf.data;
unsigned size = theTable->m_keyLenInWords;
//Uint32* data = (Uint32*)theKeyBuf.data;
//unsigned size = theTable->m_keyLenInWords;
// TODO use attr ids after compatibility with 4.1.7 not needed
if (anOp->equal("PK", theKeyBuf.data) == -1 ||
anOp->equal("DIST", getDistKey(part)) == -1 ||

View File

@ -412,6 +412,22 @@ NdbDictionary::Table::setFrm(const void* data, Uint32 len){
m_impl.m_frm.assign(data, len);
}
const void*
NdbDictionary::Table::getNodeGroupIds() const {
return m_impl.m_ng.get_data();
}
Uint32
NdbDictionary::Table::getNodeGroupIdsLength() const {
return m_impl.m_ng.length();
}
void
NdbDictionary::Table::setNodeGroupIds(const void* data, Uint32 noWords)
{
m_impl.m_ng.assign(data, 2*noWords);
}
NdbDictionary::Object::Status
NdbDictionary::Table::getObjectStatus() const {
return m_impl.m_status;
@ -732,8 +748,10 @@ NdbDictionary::Dictionary::~Dictionary(){
}
int
NdbDictionary::Dictionary::createTable(const Table & t){
return m_impl.createTable(NdbTableImpl::getImpl(t));
NdbDictionary::Dictionary::createTable(const Table & t)
{
DBUG_ENTER("NdbDictionary::Dictionary::createTable");
DBUG_RETURN(m_impl.createTable(NdbTableImpl::getImpl(t)));
}
int

View File

@ -233,7 +233,7 @@ NdbColumnImpl::equal(const NdbColumnImpl& col) const
}
NdbDictionary::Column *
NdbColumnImpl::create_psuedo(const char * name){
NdbColumnImpl::create_pseudo(const char * name){
NdbDictionary::Column * col = new NdbDictionary::Column();
col->setName(name);
if(!strcmp(name, "NDB$FRAGMENT")){
@ -302,8 +302,9 @@ void
NdbTableImpl::init(){
m_changeMask= 0;
m_tableId= RNIL;
m_primaryTableId= RNIL;
m_frm.clear();
m_fragmentType= NdbDictionary::Object::FragAllSmall;
m_fragmentType= NdbDictionary::Object::DistrKeyHash;
m_hashValueMask= 0;
m_hashpointerValue= 0;
m_logging= true;
@ -390,6 +391,7 @@ NdbTableImpl::assign(const NdbTableImpl& org)
m_externalName.assign(org.m_externalName);
m_newExternalName.assign(org.m_newExternalName);
m_frm.assign(org.m_frm.get_data(), org.m_frm.length());
m_ng.assign(org.m_ng.get_data(), org.m_ng.length());
m_fragmentType = org.m_fragmentType;
m_fragmentCount = org.m_fragmentCount;
@ -788,17 +790,17 @@ NdbDictionaryImpl::setTransporter(class Ndb* ndb,
m_globalHash->lock();
if(f_dictionary_count++ == 0){
NdbDictionary::Column::FRAGMENT=
NdbColumnImpl::create_psuedo("NDB$FRAGMENT");
NdbColumnImpl::create_pseudo("NDB$FRAGMENT");
NdbDictionary::Column::FRAGMENT_MEMORY=
NdbColumnImpl::create_psuedo("NDB$FRAGMENT_MEMORY");
NdbColumnImpl::create_pseudo("NDB$FRAGMENT_MEMORY");
NdbDictionary::Column::ROW_COUNT=
NdbColumnImpl::create_psuedo("NDB$ROW_COUNT");
NdbColumnImpl::create_pseudo("NDB$ROW_COUNT");
NdbDictionary::Column::COMMIT_COUNT=
NdbColumnImpl::create_psuedo("NDB$COMMIT_COUNT");
NdbColumnImpl::create_pseudo("NDB$COMMIT_COUNT");
NdbDictionary::Column::ROW_SIZE=
NdbColumnImpl::create_psuedo("NDB$ROW_SIZE");
NdbColumnImpl::create_pseudo("NDB$ROW_SIZE");
NdbDictionary::Column::RANGE_NO=
NdbColumnImpl::create_psuedo("NDB$RANGE_NO");
NdbColumnImpl::create_pseudo("NDB$RANGE_NO");
}
m_globalHash->unlock();
return true;
@ -1220,6 +1222,9 @@ fragmentTypeMapping[] = {
{ DictTabInfo::AllNodesMediumTable, NdbDictionary::Object::FragAllMedium },
{ DictTabInfo::AllNodesLargeTable, NdbDictionary::Object::FragAllLarge },
{ DictTabInfo::SingleFragment, NdbDictionary::Object::FragSingle },
{ DictTabInfo::DistrKeyHash, NdbDictionary::Object::DistrKeyHash },
{ DictTabInfo::DistrKeyLin, NdbDictionary::Object::DistrKeyLin },
{ DictTabInfo::UserDefined, NdbDictionary::Object::UserDefined },
{ -1, -1 }
};
@ -1293,6 +1298,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
impl->m_externalName.assign(externalName);
impl->m_frm.assign(tableDesc.FrmData, tableDesc.FrmLen);
impl->m_ng.assign(tableDesc.FragmentData, tableDesc.FragmentDataLen);
impl->m_fragmentType = (NdbDictionary::Object::FragmentType)
getApiConstant(tableDesc.FragmentType,
@ -1406,12 +1412,12 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
if(tableDesc.FragmentDataLen > 0)
{
Uint32 replicaCount = tableDesc.FragmentData[0];
Uint32 fragCount = tableDesc.FragmentData[1];
Uint16 replicaCount = tableDesc.FragmentData[0];
Uint16 fragCount = tableDesc.FragmentData[1];
impl->m_replicaCount = replicaCount;
impl->m_fragmentCount = fragCount;
DBUG_PRINT("info", ("replicaCount=%x , fragCount=%x",replicaCount,fragCount));
for(i = 0; i<(fragCount*replicaCount); i++)
{
impl->m_fragments.push_back(tableDesc.FragmentData[i+2]);
@ -1452,29 +1458,35 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
int
NdbDictionaryImpl::createTable(NdbTableImpl &t)
{
DBUG_ENTER("NdbDictionaryImpl::createTable");
if (m_receiver.createTable(m_ndb, t) != 0)
return -1;
{
DBUG_RETURN(-1);
}
if (t.m_noOfBlobs == 0)
return 0;
{
DBUG_RETURN(0);
}
// update table def from DICT
Ndb_local_table_info *info=
get_local_table_info(t.m_internalName,false);
if (info == NULL) {
m_error.code= 709;
return -1;
DBUG_RETURN(-1);
}
if (createBlobTables(*(info->m_table_impl)) != 0) {
int save_code = m_error.code;
(void)dropTable(t);
m_error.code= save_code;
return -1;
DBUG_RETURN(-1);
}
return 0;
DBUG_RETURN(0);
}
int
NdbDictionaryImpl::createBlobTables(NdbTableImpl &t)
{
DBUG_ENTER("NdbDictionaryImpl::createBlobTables");
for (unsigned i = 0; i < t.m_columns.size(); i++) {
NdbColumnImpl & c = *t.m_columns[i];
if (! c.getBlobType() || c.getPartSize() == 0)
@ -1482,23 +1494,26 @@ NdbDictionaryImpl::createBlobTables(NdbTableImpl &t)
NdbTableImpl bt;
NdbBlob::getBlobTable(bt, &t, &c);
if (createTable(bt) != 0)
return -1;
{
DBUG_RETURN(-1);
}
// Save BLOB table handle
Ndb_local_table_info *info=
get_local_table_info(bt.m_internalName, false);
if (info == 0) {
return -1;
if (info == 0)
{
DBUG_RETURN(-1);
}
c.m_blobTable = info->m_table_impl;
}
return 0;
DBUG_RETURN(0);
}
int
NdbDictionaryImpl::addBlobTables(NdbTableImpl &t)
{
unsigned n= t.m_noOfBlobs;
DBUG_ENTER("NdbDictioanryImpl::addBlobTables");
// optimized for blob column being the last one
// and not looking for more than one if not neccessary
for (unsigned i = t.m_columns.size(); i > 0 && n > 0;) {
@ -1512,19 +1527,19 @@ NdbDictionaryImpl::addBlobTables(NdbTableImpl &t)
// Save BLOB table handle
NdbTableImpl * cachedBlobTable = getTable(btname);
if (cachedBlobTable == 0) {
return -1;
DBUG_RETURN(-1);
}
c.m_blobTable = cachedBlobTable;
}
return 0;
DBUG_RETURN(0);
}
int
NdbDictInterface::createTable(Ndb & ndb,
NdbTableImpl & impl)
{
return createOrAlterTable(ndb, impl, false);
DBUG_ENTER("NdbDictInterface::createTable");
DBUG_RETURN(createOrAlterTable(ndb, impl, false));
}
int NdbDictionaryImpl::alterTable(NdbTableImpl &impl)
@ -1560,7 +1575,8 @@ int
NdbDictInterface::alterTable(Ndb & ndb,
NdbTableImpl & impl)
{
return createOrAlterTable(ndb, impl, true);
DBUG_ENTER("NdbDictInterface::alterTable");
DBUG_RETURN(createOrAlterTable(ndb, impl, true));
}
int
@ -1592,7 +1608,8 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
ndb.internalize_table_name(impl.m_externalName.c_str()));
impl.m_internalName.assign(internalName);
UtilBufferWriter w(m_buffer);
DictTabInfo::Table tmpTab; tmpTab.init();
DictTabInfo::Table tmpTab;
tmpTab.init();
BaseString::snprintf(tmpTab.TableName,
sizeof(tmpTab.TableName),
internalName.c_str());
@ -1615,6 +1632,10 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
if (col->m_distributionKey)
distKeys++;
}
if (distKeys == impl.m_noOfKeys)
distKeys= 0;
impl.m_noOfDistributionKeys= distKeys;
// Check max length of frm data
if (impl.m_frm.length() > MAX_FRM_DATA_SIZE){
@ -1623,12 +1644,15 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
}
tmpTab.FrmLen = impl.m_frm.length();
memcpy(tmpTab.FrmData, impl.m_frm.get_data(), impl.m_frm.length());
tmpTab.FragmentDataLen = impl.m_ng.length();
memcpy(tmpTab.FragmentData, impl.m_ng.get_data(), impl.m_ng.length());
tmpTab.TableLoggedFlag = impl.m_logging;
tmpTab.TableKValue = impl.m_kvalue;
tmpTab.MinLoadFactor = impl.m_minLoadFactor;
tmpTab.MaxLoadFactor = impl.m_maxLoadFactor;
tmpTab.TableType = DictTabInfo::UserTable;
tmpTab.PrimaryTableId = impl.m_primaryTableId;
tmpTab.NoOfAttributes = sz;
tmpTab.FragmentType = getKernelConstant(impl.m_fragmentType,
@ -1646,6 +1670,8 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
abort();
}
DBUG_PRINT("info",("impl.m_noOfDistributionKeys: %d impl.m_noOfKeys: %d distKeys: %d",
impl.m_noOfDistributionKeys, impl.m_noOfKeys, distKeys));
if (distKeys == impl.m_noOfKeys)
distKeys= 0;
impl.m_noOfDistributionKeys= distKeys;
@ -1655,6 +1681,8 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
if(col == 0)
continue;
DBUG_PRINT("info",("column: %s(%d) col->m_distributionKey: %d",
col->m_name.c_str(), i, col->m_distributionKey));
DictTabInfo::Attribute tmpAttr; tmpAttr.init();
BaseString::snprintf(tmpAttr.AttributeName, sizeof(tmpAttr.AttributeName),
col->m_name.c_str());
@ -1685,8 +1713,14 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
}
// distribution key not supported for Char attribute
if (distKeys && col->m_distributionKey && col->m_cs != NULL) {
m_error.code= 745;
DBUG_RETURN(-1);
// we can allow this for non-var char where strxfrm does nothing
if (col->m_type == NdbDictionary::Column::Char &&
(col->m_cs->state & MY_CS_BINSORT))
;
else {
m_error.code= 745;
DBUG_RETURN(-1);
}
}
// charset in upper half of precision
if (col->getCharType()) {

View File

@ -90,7 +90,7 @@ public:
static const NdbColumnImpl & getImpl(const NdbDictionary::Column & t);
NdbDictionary::Column * m_facade;
static NdbDictionary::Column * create_psuedo(const char *);
static NdbDictionary::Column * create_pseudo(const char *);
};
class NdbTableImpl : public NdbDictionary::Table, public NdbDictObjectImpl {
@ -105,10 +105,12 @@ public:
Uint32 m_changeMask;
Uint32 m_tableId;
Uint32 m_primaryTableId;
BaseString m_internalName;
BaseString m_externalName;
BaseString m_newExternalName; // Used for alter table
UtilBuffer m_frm;
UtilBuffer m_ng;
NdbDictionary::Object::FragmentType m_fragmentType;
/**

View File

@ -9,7 +9,7 @@ atrt supports fully distributed test and utilizes ndb_cpcd.
atrt has the following main loop:
/**
* Psuedo code for atrt
* Pseudo code for atrt
*/
read config file (default d.txt)
contact each ndb_cpcd
@ -36,7 +36,7 @@ atrt has the following main loop:
done
/**
* End of psuedo code
* End of pseudo code
*/
=================================

View File

@ -226,7 +226,8 @@ RestoreMetaData::parseTableDescriptor(const Uint32 * data, Uint32 len)
return false;
debug << "parseTableInfo " << tableImpl->getName() << " done" << endl;
tableImpl->m_ng.clear();
tableImpl->m_fragmentType = NdbDictionary::Object::FragAllSmall;
TableS * table = new TableS(m_fileHeader.NdbVersion, tableImpl);
if(table == NULL) {
return false;