WL #2604: Partition Management

Optimised version of ADD/DROP/REORGANIZE partitions for
non-NDB storage engines.
New syntax to handle REBUILD/OPTIMIZE/ANALYZE/CHECK/REPAIR partitions
Quite a few bug fixes
This commit is contained in:
mikron@c-1e0be253.1238-1-64736c10.cust.bredbandsbolaget.se 2006-01-17 08:40:00 +01:00
parent f569266bfa
commit e802a94284
71 changed files with 11855 additions and 2085 deletions

View File

@ -143,10 +143,12 @@ void thr_unlock(THR_LOCK_DATA *data);
enum enum_thr_lock_result thr_multi_lock(THR_LOCK_DATA **data, enum enum_thr_lock_result thr_multi_lock(THR_LOCK_DATA **data,
uint count, THR_LOCK_OWNER *owner); uint count, THR_LOCK_OWNER *owner);
void thr_multi_unlock(THR_LOCK_DATA **data,uint count); void thr_multi_unlock(THR_LOCK_DATA **data,uint count);
void thr_abort_locks(THR_LOCK *lock); void thr_abort_locks(THR_LOCK *lock, bool upgrade_lock);
my_bool thr_abort_locks_for_thread(THR_LOCK *lock, pthread_t thread); my_bool thr_abort_locks_for_thread(THR_LOCK *lock, pthread_t thread);
void thr_print_locks(void); /* For debugging */ void thr_print_locks(void); /* For debugging */
my_bool thr_upgrade_write_delay_lock(THR_LOCK_DATA *data); my_bool thr_upgrade_write_delay_lock(THR_LOCK_DATA *data);
void thr_downgrade_write_lock(THR_LOCK_DATA *data,
enum thr_lock_type new_lock_type);
my_bool thr_reschedule_write_lock(THR_LOCK_DATA *data); my_bool thr_reschedule_write_lock(THR_LOCK_DATA *data);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -0,0 +1,747 @@
# include/partition_1.inc
#
# Partitionong tests
#
# Attention: The variable
# $engine -- Storage engine to be tested.
# must be set within the script sourcing this file.
#
--disable_abort_on_error
SET AUTOCOMMIT= 1;
##### Disabled testcases, because of open bugs #####
--echo
--echo #------------------------------------------------------------------------
--echo # There are several testcases disabled because ouf the open bugs
--echo # #15407 , #15408 , #15890 , #15961 , #13447 , #15966 , #15968, #16370
--echo #------------------------------------------------------------------------
# Bug#15407 Partitions: crash if subpartition
let $fixed_bug15407= 0;
# Bug#15408 Partitions: subpartition names are not unique
let $fixed_bug15408= 0;
# Bug#15890 Partitions: Strange interpretation of partition number
let $fixed_bug15890= 0;
# Bug#15961 Partitions: Creation of subpart. table without subpart. rule not rejected
let $fixed_bug15961= 0;
# Bug#13447 Partitions: crash with alter table
let $fixed_bug13447= 0;
# Bug#15966 Partitions: crash if session default engine <> engine used in create table
let $fixed_bug15966= 0;
# Bug#15968 Partitions: crash when INSERT with f1 = -1 into PARTITION BY HASH(f1)
let $fixed_bug15968= 0;
# Bug #16370 Partitions: subpartitions names not mentioned in SHOW CREATE TABLE output
let $fixed_bug16370= 0;
##### Option, for displaying files #####
#
# Attention: Displaying the directory content via "ls var/master-data/test/t*"
# is probably not portable.
# let $ls= 0; disables the execution of "ls ....."
let $ls= 0;
################################################################################
# Partitioning syntax
#
# CREATE TABLE .... (column-list ..)
# PARTITION BY
# KEY '(' ( column-list ) ')'
# | RANGE '(' ( expr ) ')'
# | LIST '(' ( expr ) ')'
# | HASH '(' ( expr ) ')'
# [PARTITIONS num ]
# [SUBPARTITION BY
# KEY '(' ( column-list ) ')'
# | HASH '(' ( expr ) ')'
# [SUBPARTITIONS num ]
# ]
# [ '('
# ( PARTITION logical-name
# [ VALUES LESS THAN '(' ( expr | MAX_VALUE ) ')' ]
# [ VALUES IN '(' (expr)+ ')' ]
# [ TABLESPACE tablespace-name ]
# [ [ STORAGE ] ENGINE [ '=' ] storage-engine-name ]
# [ NODEGROUP nodegroup-id ]
# [ '('
# ( SUBPARTITION logical-name
# [ TABLESPACE tablespace-name ]
# [ STORAGE ENGINE = storage-engine-name ]
# [ NODEGROUP nodegroup-id ]
# )+
# ')'
# )+
# ')'
# ]
################################################################################
--echo
--echo #------------------------------------------------------------------------
--echo # 0. Setting of auxiliary variables + Creation of an auxiliary table
--echo # needed in all testcases
--echo #------------------------------------------------------------------------
let $max_row= `SELECT @max_row`;
let $max_row_div2= `SELECT @max_row DIV 2`;
let $max_row_div3= `SELECT @max_row DIV 3`;
let $max_row_div4= `SELECT @max_row DIV 4`;
let $max_int_4= 2147483647;
--disable_warnings
DROP TABLE IF EXISTS t0_template;
--enable_warnings
CREATE TABLE t0_template ( f1 INTEGER, f2 char(20), PRIMARY KEY(f1))
ENGINE = MEMORY;
--echo # Logging of <max_row> INSERTs into t0_template suppressed
--disable_query_log
let $num= $max_row;
while ($num)
{
eval INSERT INTO t0_template SET f1 = $num, f2 = '---$num---';
dec $num;
}
--enable_query_log
--echo
--echo #------------------------------------------------------------------------
--echo # 1. Some syntax checks
--echo #------------------------------------------------------------------------
--echo # 1.1 Subpartioned table without subpartitioning rule must be rejected
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
if ($fixed_bug15961)
{
# Bug#15961 Partitions: Creation of subpart. table without subpart. rule not rejected
--error 9999
CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY RANGE(f1)
( PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11));
}
--echo # FIXME Implement testcases, where it is checked that all create and
--echo # alter table statements
--echo # - with missing mandatory parameters are rejected
--echo # - with optional parameters are accepted
--echo # - with wrong combinations of optional parameters are rejected
--echo # - ............
--echo
--echo #------------------------------------------------------------------------
--echo # 2. Checks where the engine is assigned on all supported (CREATE TABLE
--echo # statement) positions + basic operations on the tables
--echo # Storage engine mixups are currently (2005-12-23) not supported
--echo #------------------------------------------------------------------------
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
--echo # 2.1 non partitioned table (for comparison)
eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = $engine;
# MLML Full size (as check of check routine)
--source include/partition_10.inc
DROP TABLE t1;
#
--echo # 2.2 Assignment of storage engine just after column list only
eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = $engine
PARTITION BY HASH(f1) PARTITIONS 2;
--source include/partition_10.inc
DROP TABLE t1;
#
--echo # 2.3 Assignment of storage engine just after partition or subpartition
--echo # name only
eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY HASH(f1)
( PARTITION part1 STORAGE ENGINE = $engine,
PARTITION part2 STORAGE ENGINE = $engine
);
--source include/partition_10.inc
DROP TABLE t1;
eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY RANGE(f1)
SUBPARTITION BY HASH(f1)
( PARTITION part1 VALUES LESS THAN ($max_row_div2)
(SUBPARTITION subpart11 STORAGE ENGINE = $engine,
SUBPARTITION subpart12 STORAGE ENGINE = $engine),
PARTITION part2 VALUES LESS THAN ($max_int_4)
(SUBPARTITION subpart21 STORAGE ENGINE = $engine,
SUBPARTITION subpart22 STORAGE ENGINE = $engine)
);
--source include/partition_10.inc
DROP TABLE t1;
#
--echo # 2.4 Some but not all named partitions or subpartitions get a storage
--echo # engine assigned
eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY HASH(f1)
( PARTITION part1 STORAGE ENGINE = $engine,
PARTITION part2
);
--source include/partition_10.inc
DROP TABLE t1;
eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY HASH(f1)
( PARTITION part1 ,
PARTITION part2 STORAGE ENGINE = $engine
);
--source include/partition_10.inc
DROP TABLE t1;
eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY RANGE(f1)
SUBPARTITION BY HASH(f1)
( PARTITION part1 VALUES LESS THAN ($max_row_div2)
(SUBPARTITION subpart11,
SUBPARTITION subpart12 STORAGE ENGINE = $engine),
PARTITION part2 VALUES LESS THAN ($max_int_4)
(SUBPARTITION subpart21 STORAGE ENGINE = $engine,
SUBPARTITION subpart22 STORAGE ENGINE = $engine)
);
--source include/partition_10.inc
DROP TABLE t1;
eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY RANGE(f1)
SUBPARTITION BY HASH(f1)
( PARTITION part1 VALUES LESS THAN ($max_row_div2)
(SUBPARTITION subpart11 STORAGE ENGINE = $engine,
SUBPARTITION subpart12 STORAGE ENGINE = $engine),
PARTITION part2 VALUES LESS THAN ($max_int_4)
(SUBPARTITION subpart21,
SUBPARTITION subpart22 )
);
--source include/partition_10.inc
DROP TABLE t1;
#
--echo # 2.5 Storage engine assignment after partition name + after name of
--echo # subpartitions belonging to another partition
eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY RANGE(f1)
SUBPARTITION BY HASH(f1)
( PARTITION part1 VALUES LESS THAN ($max_row_div2) ENGINE = $engine
(SUBPARTITION subpart11,
SUBPARTITION subpart12),
PARTITION part2 VALUES LESS THAN ($max_int_4)
(SUBPARTITION subpart21 STORAGE ENGINE = $engine,
SUBPARTITION subpart22 STORAGE ENGINE = $engine)
);
--source include/partition_10.inc
DROP TABLE t1;
eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY RANGE(f1)
SUBPARTITION BY HASH(f1)
( PARTITION part1 VALUES LESS THAN ($max_row_div2)
(SUBPARTITION subpart11 STORAGE ENGINE = $engine,
SUBPARTITION subpart12 STORAGE ENGINE = $engine),
PARTITION part2 VALUES LESS THAN ($max_int_4) ENGINE = $engine
(SUBPARTITION subpart21,
SUBPARTITION subpart22)
);
--source include/partition_10.inc
DROP TABLE t1;
#
--echo # 2.6 Precedence of storage engine assignments
--echo # 2.6.1 Storage engine assignment after column list + after partition
--echo # or subpartition name
eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = $engine
PARTITION BY HASH(f1)
( PARTITION part1 STORAGE ENGINE = $engine,
PARTITION part2 STORAGE ENGINE = $engine
);
--source include/partition_10.inc
DROP TABLE t1;
eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = $engine
PARTITION BY RANGE(f1)
SUBPARTITION BY HASH(f1)
( PARTITION part1 VALUES LESS THAN ($max_row_div2)
(SUBPARTITION subpart11 STORAGE ENGINE = $engine,
SUBPARTITION subpart12 STORAGE ENGINE = $engine),
PARTITION part2 VALUES LESS THAN ($max_int_4)
(SUBPARTITION subpart21 STORAGE ENGINE = $engine,
SUBPARTITION subpart22 STORAGE ENGINE = $engine)
);
--source include/partition_10.inc
DROP TABLE t1;
--echo # 2.6.2 Storage engine assignment after partition name + after
--echo # subpartition name
# in partition part + in sub partition part
eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY RANGE(f1)
SUBPARTITION BY HASH(f1)
( PARTITION part1 VALUES LESS THAN ($max_row_div2) STORAGE ENGINE = $engine
(SUBPARTITION subpart11 STORAGE ENGINE = $engine,
SUBPARTITION subpart12 STORAGE ENGINE = $engine),
PARTITION part2 VALUES LESS THAN ($max_int_4)
(SUBPARTITION subpart21 STORAGE ENGINE = $engine,
SUBPARTITION subpart22 STORAGE ENGINE = $engine)
);
--source include/partition_10.inc
DROP TABLE t1;
--echo # 2.7 Session default engine differs from engine used within create table
eval SET SESSION storage_engine=$engine_other;
if ($fixed_bug15966)
{
# Bug#15966 Partitions: crash if session default engine <> engine used in create table
eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY HASH(f1) ( PARTITION part1 ENGINE = $engine);
--source include/partition_10.inc
DROP TABLE t1;
# Bug#15966 Partitions: crash if session default engine <> engine used in create table
eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY RANGE(f1)
SUBPARTITION BY HASH(f1)
( PARTITION part1 VALUES LESS THAN (1000)
(SUBPARTITION subpart11 STORAGE ENGINE = $engine,
SUBPARTITION subpart12 STORAGE ENGINE = $engine));
--source include/partition_10.inc
DROP TABLE t1;
}
eval SET SESSION storage_engine=$engine;
--echo
--echo #------------------------------------------------------------------------
--echo # 3. Check assigning the number of partitions and subpartitions
--echo # with and without named partitions/subpartitions
--echo #------------------------------------------------------------------------
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
--echo # 3.1 (positive) without partition/subpartition number assignment
--echo # 3.1.1 no partition number, no named partitions
CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY HASH(f1);
--source include/partition_10.inc
DROP TABLE t1;
--echo # 3.1.2 no partition number, named partitions
CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY HASH(f1) (PARTITION part1, PARTITION part2);
--source include/partition_10.inc
DROP TABLE t1;
# Attention: Several combinations are impossible
# If subpartitioning exists
# - partitioning algorithm must be RANGE or LIST
# This implies the assignment of named partitions.
# - subpartitioning algorithm must be HASH or KEY
--echo # 3.1.3 variations on no partition/subpartition number, named partitions,
--echo # different subpartitions are/are not named
#
# Partition name -- "properties"
# part1 -- first/non last
# part2 -- non first/non last
# part3 -- non first/ last
#
# Testpattern:
# named subpartitions in
# Partition part1 part2 part3
# N N N
# N N Y
# N Y N
# N Y Y
# Y N N
# Y N Y
# Y Y N
# Y Y Y
--disable_query_log
let $part0= CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1);
#
eval SET @aux = '(PARTITION part1 VALUES LESS THAN ($max_row_div2),';
let $part1_N= `SELECT @AUX`;
eval SET @aux = '(PARTITION part1 VALUES LESS THAN ($max_row_div2)
(SUBPARTITION subpart11 , SUBPARTITION subpart12 ),';
let $part1_Y= `SELECT @AUX`;
#
eval SET @aux = 'PARTITION part2 VALUES LESS THAN ($max_row),';
let $part2_N= `SELECT @AUX`;
eval SET @aux = 'PARTITION part2 VALUES LESS THAN ($max_row)
(SUBPARTITION subpart21 , SUBPARTITION subpart22 ),';
let $part2_Y= `SELECT @AUX`;
#
eval SET @aux = 'PARTITION part3 VALUES LESS THAN ($max_int_4))';
let $part3_N= `SELECT @AUX`;
eval SET @aux = 'PARTITION part3 VALUES LESS THAN ($max_int_4)
(SUBPARTITION subpart31 , SUBPARTITION subpart32 ))';
let $part3_Y= `SELECT @AUX`;
--enable_query_log
eval $part0 $part1_N $part2_N $part3_N ;
DROP TABLE t1;
# Bug#15407 Partitions: crash if subpartition
if ($fixed_bug15407)
{
eval $part0 $part1_N $part2_N $part3_Y ;
--source include/partition_10.inc
DROP TABLE t1;
eval $part0 $part1_N $part2_Y $part3_N ;
--source include/partition_10.inc
DROP TABLE t1;
eval $part0 $part1_N $part2_Y $part3_Y ;
--source include/partition_10.inc
DROP TABLE t1;
eval $part0 $part1_Y $part2_N $part3_N ;
--source include/partition_10.inc
DROP TABLE t1;
eval $part0 $part1_Y $part2_N $part3_Y ;
--source include/partition_10.inc
DROP TABLE t1;
eval $part0 $part1_Y $part2_Y $part3_N ;
--source include/partition_10.inc
DROP TABLE t1;
}
eval $part0 $part1_Y $part2_Y $part3_Y ;
--source include/partition_10.inc
DROP TABLE t1;
--echo # 3.2 partition/subpartition numbers good and bad values and notations
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
--echo # 3.2.1 partition/subpartition numbers INTEGER notation
# ML: "positive/negative" is my private judgement. It need no to correspond
# with the server response.
# (positive) number = 2
let $part_number= 2;
--source include/partition_11.inc
# (positive) special case number = 1
let $part_number= 1;
--source include/partition_11.inc
# (negative) 0 is non sense
let $part_number= 0;
--source include/partition_11.inc
# (negative) -1 is non sense
let $part_number= -1;
--source include/partition_11.inc
# (negative) 1000000 is too huge
let $part_number= 1000000;
--source include/partition_11.inc
if ($fixed_bug15890)
{
--echo # 3.2.2 partition/subpartition numbers DECIMAL notation
# (positive) number = 2.0
let $part_number= 2.0;
--source include/partition_11.inc
# (negative) -2.0 is non sense
let $part_number= -2.0;
--source include/partition_11.inc
# (negative) case number = 0.0 is non sense
let $part_number= 0.0;
--source include/partition_11.inc
# Bug#15890 Partitions: Strange interpretation of partition number
# (negative) number = 1.5 is non sense
let $part_number= 1.5;
--source include/partition_11.inc
# (negative) number is too huge
let $part_number= 999999999999999999999999999999.999999999999999999999999999999;
--source include/partition_11.inc
# (negative) number is nearly zero
let $part_number= 0.000000000000000000000000000001;
--source include/partition_11.inc
--echo # 3.2.3 partition/subpartition numbers FLOAT notation
##### FLOAT notation
# (positive) number = 2.0E+0
let $part_number= 2.0E+0;
--source include/partition_11.inc
# Bug#15890 Partitions: Strange interpretation of partition number
# (positive) number = 0.2E+1
let $part_number= 0.2E+1;
--source include/partition_11.inc
# (negative) -2.0E+0 is non sense
let $part_number= -2.0E+0;
--source include/partition_11.inc
# (negative) 0.15E+1 is non sense
let $part_number= 0.15E+1;
--source include/partition_11.inc
# (negative) 0.0E+300 is zero
let $part_number= 0.0E+300;
--source include/partition_11.inc
# Bug#15890 Partitions: Strange interpretation of partition number
# (negative) 1E+300 is too huge
let $part_number= 1E+300;
--source include/partition_11.inc
# (negative) 1E-300 is nearly zero
let $part_number= 1E-300;
--source include/partition_11.inc
}
--echo # 3.2.4 partition/subpartition numbers STRING notation
##### STRING notation
# (negative?) case number = '2'
let $part_number= '2';
--source include/partition_11.inc
# (negative?) case number = '2.0'
let $part_number= '2.0';
--source include/partition_11.inc
# (negative?) case number = '0.2E+1'
let $part_number= '0.2E+1';
--source include/partition_11.inc
# (negative) Strings starts with digit, but 'A' follows
let $part_number= '2A';
--source include/partition_11.inc
# (negative) Strings starts with 'A', but digit follows
let $part_number= 'A2';
--source include/partition_11.inc
# (negative) empty string
let $part_number= '';
--source include/partition_11.inc
# (negative) string without any digits
let $part_number= 'GARBAGE';
--source include/partition_11.inc
--echo # 3.2.5 partition/subpartition numbers other notations
# (negative) Strings starts with digit, but 'A' follows
let $part_number= 2A;
--source include/partition_11.inc
# (negative) Strings starts with 'A', but digit follows
let $part_number= A2;
--source include/partition_11.inc
# (negative) string without any digits
let $part_number= GARBAGE;
--source include/partition_11.inc
# (negative?) double quotes
let $part_number= "2";
--source include/partition_11.inc
# (negative) Strings starts with digit, but 'A' follows
let $part_number= "2A";
--source include/partition_11.inc
# (negative) Strings starts with 'A', but digit follows
let $part_number= "A2";
--source include/partition_11.inc
# (negative) string without any digits
let $part_number= "GARBAGE";
--source include/partition_11.inc
--echo # 3.3 Mixups of assigned partition/subpartition numbers and names
--echo # 3.3.1 (positive) number of partition/subpartition
--echo # = number of named partition/subpartition
CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY HASH(f1) PARTITIONS 2 ( PARTITION part1, PARTITION part2 ) ;
SHOW CREATE TABLE t1;
DROP TABLE t1;
CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY RANGE(f1) PARTITIONS 2
SUBPARTITION BY HASH(f1) SUBPARTITIONS 2
( PARTITION part1 VALUES LESS THAN (1000)
(SUBPARTITION subpart11, SUBPARTITION subpart12),
PARTITION part2 VALUES LESS THAN (2147483647)
(SUBPARTITION subpart21, SUBPARTITION subpart22)
);
--source include/partition_layout.inc
DROP TABLE t1;
--echo # 3.3.2 (positive) number of partition/subpartition ,
--echo # 0 (= no) named partition/subpartition
--echo # already checked above
--echo # 3.3.3 (negative) number of partitions/subpartitions
--echo # > number of named partitions/subpartitions
--error 1064
CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY HASH(f1) PARTITIONS 2 ( PARTITION part1 ) ;
# Wrong number of named subpartitions in first partition
--error 1064
CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY RANGE(f1)
SUBPARTITION BY HASH(f1) SUBPARTITIONS 2
( PARTITION part1 VALUES LESS THAN (1000)
(SUBPARTITION subpart11 ),
PARTITION part2 VALUES LESS THAN (2147483647)
(SUBPARTITION subpart21, SUBPARTITION subpart22)
);
# Wrong number of named subpartitions in non first/non last partition
--error 1064
CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY RANGE(f1)
SUBPARTITION BY HASH(f1) SUBPARTITIONS 2
( PARTITION part1 VALUES LESS THAN (1000)
(SUBPARTITION subpart11, SUBPARTITION subpart12),
PARTITION part2 VALUES LESS THAN (2000)
(SUBPARTITION subpart21 ),
PARTITION part3 VALUES LESS THAN (2147483647)
(SUBPARTITION subpart31, SUBPARTITION subpart32)
);
# Wrong number of named subpartitions in last partition
--error 1064
CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY RANGE(f1) PARTITIONS 2
SUBPARTITION BY HASH(f1) SUBPARTITIONS 2
( PARTITION part1 VALUES LESS THAN (1000)
(SUBPARTITION subpart11, SUBPARTITION subpart12),
PARTITION part2 VALUES LESS THAN (2147483647)
(SUBPARTITION subpart21 )
);
--echo # 3.3.4 (negative) number of partitions < number of named partitions
--error 1064
CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY HASH(f1) PARTITIONS 1 ( PARTITION part1, PARTITION part2 ) ;
# Wrong number of named subpartitions in first partition
--error 1064
CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY RANGE(f1)
SUBPARTITION BY HASH(f1) SUBPARTITIONS 1
( PARTITION part1 VALUES LESS THAN (1000)
(SUBPARTITION subpart11, SUBPARTITION subpart12),
PARTITION part2 VALUES LESS THAN (2147483647)
(SUBPARTITION subpart21, SUBPARTITION subpart22)
);
# Wrong number of named subpartitions in non first/non last partition
--error 1064
CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY RANGE(f1)
SUBPARTITION BY HASH(f1) SUBPARTITIONS 1
( PARTITION part1 VALUES LESS THAN (1000)
(SUBPARTITION subpart11, SUBPARTITION subpart12),
PARTITION part2 VALUES LESS THAN (2000)
(SUBPARTITION subpart21 ),
PARTITION part3 VALUES LESS THAN (2147483647)
(SUBPARTITION subpart31, SUBPARTITION subpart32)
);
# Wrong number of named subpartitions in last partition
--error 1064
CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY RANGE(f1)
SUBPARTITION BY HASH(f1) SUBPARTITIONS 1
( PARTITION part1 VALUES LESS THAN (1000)
(SUBPARTITION subpart11, SUBPARTITION subpart12),
PARTITION part2 VALUES LESS THAN (2147483647)
(SUBPARTITION subpart21, SUBPARTITION subpart22)
);
--echo
--echo #------------------------------------------------------------------------
--echo # 4. Checks of logical partition/subpartition name
--echo # file name clashes during CREATE TABLE
--echo #------------------------------------------------------------------------
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
--echo # 4.1 (negative) A partition name used more than once
--error ER_SAME_NAME_PARTITION
CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY HASH(f1) (PARTITION part1, PARTITION part1);
#
if ($fixed_bug15408)
{
# Bug#15408 Partitions: subpartition names are not unique
--error ER_SAME_NAME_PARTITION
CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY RANGE(f1)
SUBPARTITION BY HASH(f1)
( PARTITION part1 VALUES LESS THAN (1000)
(SUBPARTITION subpart11, SUBPARTITION subpart11)
);
}
--echo # FIXME Implement testcases with filename problems
--echo # existing file of other table --- partition/subpartition file name
--echo # partition/subpartition file name --- file of the same table
--echo
--echo #------------------------------------------------------------------------
--echo # 5. Alter table experiments
--echo #------------------------------------------------------------------------
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
--echo # 5.1 alter table add partition
--echo # 5.1.1 (negative) add partition to non partitioned table
CREATE TABLE t1 ( f1 INTEGER, f2 char(20));
--source include/partition_layout.inc
# MyISAM gets ER_PARTITION_MGMT_ON_NONPARTITIONED and NDB 1005
# The error code of NDB differs, because all NDB tables are partitioned even
# if the CREATE TABLE does not contain a partitioning clause.
--error ER_PARTITION_MGMT_ON_NONPARTITIONED,1005
ALTER TABLE t1 ADD PARTITION (PARTITION part1);
--source include/partition_layout.inc
DROP TABLE t1;
--echo # 5.1.2 Add one partition to a table with one partition
CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY HASH(f1);
--source include/partition_layout.inc
eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND $max_row_div2 - 1;
--disable_query_log
eval SELECT $engine = 'NDB' INTO @aux;
let $my_exit= `SELECT @aux`;
if ($my_exit)
{
exit;
}
--enable_query_log
ALTER TABLE t1 ADD PARTITION (PARTITION part1);
--source include/partition_12.inc
DROP TABLE t1;
--echo # 5.1.3 Several times add one partition to a table with some partitions
CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY HASH(f1) (PARTITION part1, PARTITION part3);
--source include/partition_layout.inc
eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND $max_row_div2 - 1;
# Partition name before first existing partition name
ALTER TABLE t1 ADD PARTITION (PARTITION part0);
--source include/partition_12.inc
DELETE FROM t1;
eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND $max_row_div2 - 1;
# Partition name between existing partition names
ALTER TABLE t1 ADD PARTITION (PARTITION part2);
--source include/partition_12.inc
DELETE FROM t1;
eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND $max_row_div2 - 1;
if ($fixed_bug13447)
{
# Partition name after all existing partition names
# Bug#13447 Partitions: crash with alter table
ALTER TABLE t1 ADD PARTITION (PARTITION part4);
}
--source include/partition_12.inc
DROP TABLE t1;
--echo # 5.1.4 Add several partitions to a table with some partitions
CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY HASH(f1) (PARTITION part1, PARTITION part3);
--source include/partition_layout.inc
eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND $max_row_div2 - 1;
if ($fixed_bug13447)
{
# Bug#13447 Partitions: crash with alter table
ALTER TABLE t1 ADD PARTITION (PARTITION part0, PARTITION part2, PARTITION part4);
}
--source include/partition_12.inc
DROP TABLE t1;
--echo # 5.1.5 (negative) Add partitions to a table with some partitions
--echo # clash on new and already existing partition names
CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY HASH(f1) (PARTITION part1, PARTITION part2, PARTITION part3);
# Clash on first/non last partition name
--error ER_SAME_NAME_PARTITION
ALTER TABLE t1 ADD PARTITION (PARTITION part1);
# Clash on non first/non last partition name
--error ER_SAME_NAME_PARTITION
ALTER TABLE t1 ADD PARTITION (PARTITION part2);
# Clash on non first/last partition name
--error ER_SAME_NAME_PARTITION
ALTER TABLE t1 ADD PARTITION (PARTITION part3);
# Clash on all partition names
--error ER_SAME_NAME_PARTITION
ALTER TABLE t1 ADD PARTITION (PARTITION part1, PARTITION part2, PARTITION part3);
DROP TABLE t1;
# FIXME Is there any way to add a subpartition to an already existing partition
--echo # 5.2 alter table add subpartition
--echo # 5.2.1 Add one subpartition to a table with subpartitioning rule and
--echo # no explicit defined subpartitions
eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY RANGE(f1)
SUBPARTITION BY HASH(f1)
(PARTITION part1 VALUES LESS THAN ($max_row_div2));
if ($fixed_bug16370)
{
--source include/partition_layout.inc
}
eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND $max_row_div2 - 1;
eval ALTER TABLE t1 ADD PARTITION (PARTITION part2 VALUES LESS THAN ($max_int_4)
(SUBPARTITION subpart21));
if ($fixed_bug16370)
{
--source include/partition_12.inc
}
DROP TABLE t1;

View File

@ -0,0 +1,73 @@
# include/partition_10.inc
#
# Do some basic checks on a table.
#
# FIXME: Do not write the statements and results, if SQL return code = 0
# and result set like expected. Write a message, that all is like
# expected instead.
#
# All SELECTs are so written, that we get my_value = 1, when everything
# is like expected.
#
--source include/partition_layout.inc
####### Variations with multiple records
# Select on empty table
SELECT COUNT(*) = 0 AS my_value FROM t1;
# (mass) Insert of $max_row records
eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND $max_row;
# Select
eval SELECT (COUNT(*) = $max_row) AND (MIN(f1) = 1) AND (MAX(f1) = $max_row)
AS my_value FROM t1;
# DEBUG SELECT COUNT(*),MIN(f1),MAX(f1) FROM t1;
# (mass) Update $max_row_div4 * 2 + 1 records
eval UPDATE t1 SET f1 = f1 + $max_row
WHERE f1 BETWEEN $max_row_div2 - $max_row_div4 AND $max_row_div2 + $max_row_div4;
# Select
eval SELECT (COUNT(*) = $max_row) AND (MIN(f1) = 1) AND (MAX(f1) = $max_row_div2 + $max_row_div4 + $max_row )
AS my_value FROM t1;
# DEBUG SELECT COUNT(*),MIN(f1),MAX(f1) FROM t1;
# (mass) Delete $max_row_div4 * 2 + 1 records
eval DELETE FROM t1
WHERE f1 BETWEEN $max_row_div2 - $max_row_div4 + $max_row AND $max_row_div2 + $max_row_div4 + $max_row;
# Select
eval SELECT (COUNT(*) = $max_row - $max_row_div4 - $max_row_div4 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = $max_row)
AS my_value FROM t1;
# DEBUG SELECT COUNT(*),MIN(f1),MAX(f1) FROM t1;
####### Variations with single records
# Insert one record at beginning
INSERT INTO t1 SET f1 = 0 , f2 = '#######';
# Select this record
SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
# Insert one record at end
eval INSERT INTO t1 SET f1 = $max_row + 1, f2 = '#######';
# Select this record
eval SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = $max_row + 1 AND f2 = '#######';
# Update one record
eval UPDATE t1 SET f1 = $max_row + 2, f2 = 'ZZZZZZZ'
WHERE f1 = 0 AND f2 = '#######';
# Select
eval SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = $max_row + 2 AND f2 = 'ZZZZZZZ';
if ($fixed_bug15968)
{
# Bug #15968: Partitions: crash when INSERT with f1 = -1 into PARTITION BY HASH(f1)
eval UPDATE t1 SET f1 = 0 - 1, f2 = 'ZZZZZZZ'
WHERE f1 = $max_row + 1 AND f2 = '#######';
# Select
SELECT COUNT(*) AS my_value FROM t1 WHERE f1 = 0 - 1 AND f2 = 'ZZZZZZZ';
}
# Delete
eval DELETE FROM t1 WHERE f1 = $max_row + 2 AND f2 = 'ZZZZZZZ';
if ($fixed_bug15968)
{
DELETE FROM t1 WHERE f1 = 0 - 1 AND f2 = 'ZZZZZZZ';
}
# Select
SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
# Truncate
TRUNCATE t1;
# Select on empty table
SELECT COUNT(*) = 0 AS my_value FROM t1;

View File

@ -0,0 +1,34 @@
# include/partition_11.inc
#
# Try to create a table with the given partition number
#
eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY HASH(f1) PARTITIONS $part_number;
--disable_query_log
eval SET @my_errno= $mysql_errno ;
let $run= `SELECT @my_errno = 0`;
--enable_query_log
#
# If this operation was successfull, check + drop this table
if ($run)
{
--source include/partition_10.inc
eval DROP TABLE t1;
}
#### Try to create a table with the given subpartition number
eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
SUBPARTITIONS $part_number
(PARTITION part1 VALUES LESS THAN ($max_row_div2), PARTITION part2 VALUES LESS THAN ($max_int_4));
--disable_query_log
eval SET @my_errno= $mysql_errno ;
let $run= `SELECT @my_errno = 0`;
--enable_query_log
#
# If this operation was successfull, check + drop this table
if ($run)
{
--source include/partition_10.inc
eval DROP TABLE t1;
}

View File

@ -0,0 +1,65 @@
# include/partition_12.inc
#
# Do some basic things on a table, if the SQL command executed just before
# sourcing this file was successful.
#
--source include/partition_layout.inc
####### Variations with multiple records
# (mass) Insert max_row_div2 + 1 records
eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN $max_row_div2 AND $max_row;
# Select
eval SELECT (COUNT(*) = $max_row) AND (MIN(f1) = 1) AND (MAX(f1) = $max_row)
AS my_value FROM t1;
# DEBUG SELECT COUNT(*),MIN(f1),MAX(f1) FROM t1;
# (mass) Update $max_row_div4 * 2 + 1 records
eval UPDATE t1 SET f1 = f1 + $max_row
WHERE f1 BETWEEN $max_row_div2 - $max_row_div4 AND $max_row_div2 + $max_row_div4;
# Select
eval SELECT (COUNT(*) = $max_row) AND (MIN(f1) = 1) AND (MAX(f1) = $max_row_div2 + $max_row_div4 + $max_row )
AS my_value FROM t1;
# DEBUG SELECT COUNT(*),MIN(f1),MAX(f1) FROM t1;
# (mass) Delete $max_row_div4 * 2 + 1 records
eval DELETE FROM t1
WHERE f1 BETWEEN $max_row_div2 - $max_row_div4 + $max_row AND $max_row_div2 + $max_row_div4 + $max_row;
# Select
eval SELECT (COUNT(*) = $max_row - $max_row_div4 - $max_row_div4 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = $max_row)
AS my_value FROM t1;
# DEBUG SELECT COUNT(*),MIN(f1),MAX(f1) FROM t1;
####### Variations with single records
# Insert one record at beginning
INSERT INTO t1 SET f1 = 0 , f2 = '#######';
# Select this record
SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
# Insert one record at end
eval INSERT INTO t1 SET f1 = $max_row + 1, f2 = '#######';
# Select this record
eval SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = $max_row + 1 AND f2 = '#######';
# Update one record
eval UPDATE t1 SET f1 = $max_row + 2, f2 = 'ZZZZZZZ'
WHERE f1 = 0 AND f2 = '#######';
# Select
eval SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = $max_row + 2 AND f2 = 'ZZZZZZZ';
if ($fixed_bug15968)
{
# Bug #15968: Partitions: crash when INSERT with f1 = -1 into PARTITION BY HASH(f1)
eval UPDATE t1 SET f1 = 0 - 1, f2 = 'ZZZZZZZ'
WHERE f1 = $max_row + 1 AND f2 = '#######';
# Select
SELECT COUNT(*) AS my_value FROM t1 WHERE f1 = 0 - 1 AND f2 = 'ZZZZZZZ';
}
# Delete
eval DELETE FROM t1 WHERE f1 = $max_row + 2 AND f2 = 'ZZZZZZZ';
if ($fixed_bug15968)
{
DELETE FROM t1 WHERE f1 = 0 - 1 AND f2 = 'ZZZZZZZ';
}
# Select
SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
# Truncate
TRUNCATE t1;
# Select on empty table
SELECT COUNT(*) = 0 AS my_value FROM t1;

View File

@ -0,0 +1,13 @@
# include/partition_layout.inc
#
# Print partitioning related informations about the table t1
#
eval SHOW CREATE TABLE t1;
# Optional (most probably issues with separators and case sensitivity)
# listing of files belonging to the table t1
if ($ls)
{
--exec ls var/master-data/test/t1*
}

View File

@ -110,7 +110,7 @@ t3 CREATE TABLE `t3` (
`id` int(11) NOT NULL, `id` int(11) NOT NULL,
`name` char(255) default NULL, `name` char(255) default NULL,
PRIMARY KEY (`id`) PRIMARY KEY (`id`)
) ENGINE=NDBCLUSTER DEFAULT CHARSET=latin1 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY ()
select * from t3; select * from t3;
id name id name
1 Explorer 1 Explorer

View File

@ -9,7 +9,7 @@ t1 CREATE TABLE `t1` (
`pk1` int(11) NOT NULL, `pk1` int(11) NOT NULL,
`b` bit(64) default NULL, `b` bit(64) default NULL,
PRIMARY KEY (`pk1`) PRIMARY KEY (`pk1`)
) ENGINE=NDBCLUSTER DEFAULT CHARSET=latin1 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY ()
insert into t1 values insert into t1 values
(0,b'1111111111111111111111111111111111111111111111111111111111111111'), (0,b'1111111111111111111111111111111111111111111111111111111111111111'),
(1,b'1000000000000000000000000000000000000000000000000000000000000000'), (1,b'1000000000000000000000000000000000000000000000000000000000000000'),

View File

@ -13,7 +13,7 @@ Table Create Table
gis_point CREATE TABLE `gis_point` ( gis_point CREATE TABLE `gis_point` (
`fid` int(11) default NULL, `fid` int(11) default NULL,
`g` point default NULL `g` point default NULL
) ENGINE=NDBCLUSTER DEFAULT CHARSET=latin1 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY ()
SHOW FIELDS FROM gis_point; SHOW FIELDS FROM gis_point;
Field Type Null Key Default Extra Field Type Null Key Default Extra
fid int(11) YES NULL fid int(11) YES NULL
@ -471,7 +471,7 @@ Table Create Table
gis_point CREATE TABLE `gis_point` ( gis_point CREATE TABLE `gis_point` (
`fid` int(11) default NULL, `fid` int(11) default NULL,
`g` point default NULL `g` point default NULL
) ENGINE=NDBCLUSTER DEFAULT CHARSET=latin1 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY ()
SHOW FIELDS FROM gis_point; SHOW FIELDS FROM gis_point;
Field Type Null Key Default Extra Field Type Null Key Default Extra
fid int(11) YES NULL fid int(11) YES NULL

View File

@ -80,3 +80,12 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY USING HASH (`a`,`b`,`c`) PRIMARY KEY USING HASH (`a`,`b`,`c`)
) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (b) ) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (b)
DROP TABLE t1; DROP TABLE t1;
CREATE TABLE t1 (a int not null primary key)
PARTITION BY KEY(a)
(PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB);
drop table t1;
CREATE TABLE t1 (a int not null primary key);
ALTER TABLE t1
PARTITION BY KEY(a)
(PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB);
drop table t1;

View File

@ -65,6 +65,8 @@ partitions 3
(partition x1 tablespace ts1, (partition x1 tablespace ts1,
partition x2 tablespace ts2, partition x2 tablespace ts2,
partition x3 tablespace ts3); partition x3 tablespace ts3);
CREATE TABLE t2 LIKE t1;
drop table t2;
drop table t1; drop table t1;
CREATE TABLE t1 ( CREATE TABLE t1 (
a int not null, a int not null,
@ -108,6 +110,127 @@ insert into t1 values (3);
insert into t1 values (4); insert into t1 values (4);
UNLOCK TABLES; UNLOCK TABLES;
drop table t1; drop table t1;
CREATE TABLE t1 (a int, name VARCHAR(50), purchased DATE)
PARTITION BY RANGE (a)
(PARTITION p0 VALUES LESS THAN (3),
PARTITION p1 VALUES LESS THAN (7),
PARTITION p2 VALUES LESS THAN (9),
PARTITION p3 VALUES LESS THAN (11));
INSERT INTO t1 VALUES
(1, 'desk organiser', '2003-10-15'),
(2, 'CD player', '1993-11-05'),
(3, 'TV set', '1996-03-10'),
(4, 'bookcase', '1982-01-10'),
(5, 'exercise bike', '2004-05-09'),
(6, 'sofa', '1987-06-05'),
(7, 'popcorn maker', '2001-11-22'),
(8, 'acquarium', '1992-08-04'),
(9, 'study desk', '1984-09-16'),
(10, 'lava lamp', '1998-12-25');
SELECT * from t1 ORDER BY a;
a name purchased
1 desk organiser 2003-10-15
2 CD player 1993-11-05
3 TV set 1996-03-10
4 bookcase 1982-01-10
5 exercise bike 2004-05-09
6 sofa 1987-06-05
7 popcorn maker 2001-11-22
8 acquarium 1992-08-04
9 study desk 1984-09-16
10 lava lamp 1998-12-25
ALTER TABLE t1 DROP PARTITION p0;
SELECT * from t1 ORDER BY a;
a name purchased
3 TV set 1996-03-10
4 bookcase 1982-01-10
5 exercise bike 2004-05-09
6 sofa 1987-06-05
7 popcorn maker 2001-11-22
8 acquarium 1992-08-04
9 study desk 1984-09-16
10 lava lamp 1998-12-25
drop table t1;
CREATE TABLE t1 (a int)
PARTITION BY LIST (a)
(PARTITION p0 VALUES IN (1,2,3), PARTITION p1 VALUES IN (4,5,6));
insert into t1 values (1),(2),(3),(4),(5),(6);
select * from t1;
a
1
2
3
4
5
6
truncate t1;
select * from t1;
a
truncate t1;
select * from t1;
a
drop table t1;
CREATE TABLE t1 (a int, b int, primary key(a,b))
PARTITION BY KEY(b,a) PARTITIONS 4;
insert into t1 values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6);
select * from t1 where a = 4;
a b
4 4
drop table t1;
CREATE TABLE t1 (a int)
PARTITION BY LIST (a)
PARTITIONS 1
(PARTITION x1 VALUES IN (1) ENGINE=MEMORY);
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) default NULL
) ENGINE=MEMORY DEFAULT CHARSET=latin1 PARTITION BY LIST (a) (PARTITION x1 VALUES IN (1) ENGINE = MEMORY)
drop table t1;
CREATE TABLE t1 (a int, unique(a))
PARTITION BY LIST (a)
(PARTITION x1 VALUES IN (10), PARTITION x2 VALUES IN (20));
REPLACE t1 SET a = 4;
ERROR HY000: Table has no partition for value 4
drop table t1;
CREATE TABLE t1 (a int)
PARTITION BY LIST (a)
(PARTITION x1 VALUES IN (2), PARTITION x2 VALUES IN (3));
insert into t1 values (2), (3);
insert into t1 values (4);
ERROR HY000: Table has no partition for value 4
insert into t1 values (1);
ERROR HY000: Table has no partition for value 1
drop table t1;
CREATE TABLE t1 (a int)
PARTITION BY HASH(a)
PARTITIONS 5;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) default NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (a) PARTITIONS 5
drop table t1;
CREATE TABLE t1 (a int)
PARTITION BY RANGE (a)
(PARTITION x1 VALUES LESS THAN (2));
insert into t1 values (1);
update t1 set a = 5;
ERROR HY000: Table has no partition for value 5
drop table t1;
CREATE TABLE t1 (a int)
PARTITION BY LIST (a)
(PARTITION x1 VALUES IN (10), PARTITION x2 VALUES IN (20));
analyze table t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
drop table t1;
CREATE TABLE `t1` (
`id` int(11) default NULL
) ENGINE=BLACKHOLE DEFAULT CHARSET=latin1 PARTITION BY HASH (id) ;
SELECT * FROM t1;
id
drop table t1;
CREATE TABLE `t1` ( CREATE TABLE `t1` (
`id` int(11) default NULL `id` int(11) default NULL
) ENGINE=BLACKHOLE DEFAULT CHARSET=latin1 PARTITION BY HASH (id) ; ) ENGINE=BLACKHOLE DEFAULT CHARSET=latin1 PARTITION BY HASH (id) ;
@ -119,8 +242,8 @@ create table t1
partition by range (a) partition by range (a)
( partition p0 values less than(10), ( partition p0 values less than(10),
partition p1 values less than (20), partition p1 values less than (20),
partition p2 values less than maxvalue); partition p2 values less than (25));
alter table t1 reorganise partition p2 into (partition p2 values less than (30)); alter table t1 reorganize partition p2 into (partition p2 values less than (30));
show create table t1; show create table t1;
Table Create Table Table Create Table
t1 CREATE TABLE `t1` ( t1 CREATE TABLE `t1` (
@ -139,7 +262,7 @@ PARTITION x6 VALUES LESS THAN (14),
PARTITION x7 VALUES LESS THAN (16), PARTITION x7 VALUES LESS THAN (16),
PARTITION x8 VALUES LESS THAN (18), PARTITION x8 VALUES LESS THAN (18),
PARTITION x9 VALUES LESS THAN (20)); PARTITION x9 VALUES LESS THAN (20));
ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2 INTO ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2 INTO
(PARTITION x1 VALUES LESS THAN (6)); (PARTITION x1 VALUES LESS THAN (6));
show create table t1; show create table t1;
Table Create Table Table Create Table

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +1,4 @@
drop table if exists t1;
partition by list (a) partition by list (a)
partitions 3 partitions 3
(partition x1 values in (1,2,9,4) tablespace ts1, (partition x1 values in (1,2,9,4) tablespace ts1,
@ -544,6 +545,10 @@ partitions 2
partition x2 values in (5)); partition x2 values in (5));
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '4, ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '4,
partition x2 values in (5))' at line 8 partition x2 values in (5))' at line 8
CREATE TABLE t1 (a int)
PARTITION BY RANGE (a)
(PARTITION p0 VALUES LESS THAN (x1));
ERROR 42S22: Unknown column 'x1' in 'partition function'
CREATE TABLE t1(a int) CREATE TABLE t1(a int)
PARTITION BY RANGE (a) (PARTITION p1 VALUES LESS THAN(5)); PARTITION BY RANGE (a) (PARTITION p1 VALUES LESS THAN(5));
insert into t1 values (10); insert into t1 values (10);

View File

@ -1,3 +1,4 @@
drop table if exists t1;
CREATE TABLE t1 (a int, b int) CREATE TABLE t1 (a int, b int)
PARTITION BY RANGE (a) PARTITION BY RANGE (a)
(PARTITION x0 VALUES LESS THAN (2), (PARTITION x0 VALUES LESS THAN (2),
@ -10,48 +11,52 @@ PARTITION x6 VALUES LESS THAN (14),
PARTITION x7 VALUES LESS THAN (16), PARTITION x7 VALUES LESS THAN (16),
PARTITION x8 VALUES LESS THAN (18), PARTITION x8 VALUES LESS THAN (18),
PARTITION x9 VALUES LESS THAN (20)); PARTITION x9 VALUES LESS THAN (20));
ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
(PARTITION x01 VALUES LESS THAN (2), (PARTITION x01 VALUES LESS THAN (2),
PARTITION x11 VALUES LESS THAN (5)); PARTITION x11 VALUES LESS THAN (5));
ERROR HY000: The new partitions cover a bigger range then the reorganised partitions do ERROR HY000: Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range
ALTER TABLE t1 DROP PARTITION x0, x1, x2, x3, x3; ALTER TABLE t1 DROP PARTITION x0, x1, x2, x3, x3;
ERROR HY000: Error in list of partitions to change ERROR HY000: Error in list of partitions to DROP
ALTER TABLE t1 DROP PARTITION x0, x1, x2, x10; ALTER TABLE t1 DROP PARTITION x0, x1, x2, x10;
ERROR HY000: Error in list of partitions to change ERROR HY000: Error in list of partitions to DROP
ALTER TABLE t1 DROP PARTITION x10, x1, x2, x1; ALTER TABLE t1 DROP PARTITION x10, x1, x2, x1;
ERROR HY000: Error in list of partitions to change ERROR HY000: Error in list of partitions to DROP
ALTER TABLE t1 DROP PARTITION x10, x1, x2, x3; ALTER TABLE t1 DROP PARTITION x10, x1, x2, x3;
ERROR HY000: Error in list of partitions to change ERROR HY000: Error in list of partitions to DROP
ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10 INTO ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10 INTO
(PARTITION x11 VALUES LESS THAN (22)); (PARTITION x11 VALUES LESS THAN (22));
ERROR HY000: More partitions to reorganise than there are partitions ERROR HY000: More partitions to reorganise than there are partitions
ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2 INTO ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2 INTO
(PARTITION x3 VALUES LESS THAN (6)); (PARTITION x3 VALUES LESS THAN (6));
ERROR HY000: All partitions must have unique names in the table ERROR HY000: Duplicate partition name x3
ALTER TABLE t1 REORGANISE PARTITION x0, x2 INTO ALTER TABLE t1 REORGANIZE PARTITION x0, x2 INTO
(PARTITION x11 VALUES LESS THAN (2)); (PARTITION x11 VALUES LESS THAN (2));
ERROR HY000: When reorganising a set of partitions they must be in consecutive order ERROR HY000: When reorganising a set of partitions they must be in consecutive order
ALTER TABLE t1 REORGANISE PARTITION x0, x1, x1 INTO ALTER TABLE t1 REORGANIZE PARTITION x0, x1, x1 INTO
(PARTITION x11 VALUES LESS THAN (4)); (PARTITION x11 VALUES LESS THAN (4));
ERROR HY000: Error in list of partitions to change ERROR HY000: Error in list of partitions to REORGANIZE
ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
(PARTITION x01 VALUES LESS THAN (5)); (PARTITION x01 VALUES LESS THAN (5));
ERROR HY000: The new partitions cover a bigger range then the reorganised partitions do ERROR HY000: Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range
ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
(PARTITION x01 VALUES LESS THAN (4), (PARTITION x01 VALUES LESS THAN (4),
PARTITION x11 VALUES LESS THAN (2)); PARTITION x11 VALUES LESS THAN (2));
ERROR HY000: Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range
ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
(PARTITION x01 VALUES LESS THAN (6),
PARTITION x11 VALUES LESS THAN (4));
ERROR HY000: VALUES LESS THAN value must be strictly increasing for each partition ERROR HY000: VALUES LESS THAN value must be strictly increasing for each partition
DROP TABLE t1; DROP TABLE t1;
CREATE TABLE t1 (a int) CREATE TABLE t1 (a int)
PARTITION BY KEY (a) PARTITION BY KEY (a)
PARTITIONS 2; PARTITIONS 2;
ALTER TABLE t1 ADD PARTITION (PARTITION p1); ALTER TABLE t1 ADD PARTITION (PARTITION p1);
ERROR HY000: All partitions must have unique names in the table ERROR HY000: Duplicate partition name p1
DROP TABLE t1; DROP TABLE t1;
CREATE TABLE t1 (a int) CREATE TABLE t1 (a int)
PARTITION BY KEY (a) PARTITION BY KEY (a)
(PARTITION x0, PARTITION x1, PARTITION x2, PARTITION x3, PARTITION x3); (PARTITION x0, PARTITION x1, PARTITION x2, PARTITION x3, PARTITION x3);
ERROR HY000: All partitions must have unique names in the table ERROR HY000: Duplicate partition name x3
CREATE TABLE t1 (a int) CREATE TABLE t1 (a int)
PARTITION BY RANGE (a) PARTITION BY RANGE (a)
SUBPARTITION BY KEY (a) SUBPARTITION BY KEY (a)
@ -100,7 +105,7 @@ PARTITION x1 VALUES LESS THAN (8));
ALTER TABLE t1 ADD PARTITION PARTITIONS 1; ALTER TABLE t1 ADD PARTITION PARTITIONS 1;
ERROR HY000: For RANGE partitions each partition must be defined ERROR HY000: For RANGE partitions each partition must be defined
ALTER TABLE t1 DROP PARTITION x2; ALTER TABLE t1 DROP PARTITION x2;
ERROR HY000: Error in list of partitions to change ERROR HY000: Error in list of partitions to DROP
ALTER TABLE t1 COALESCE PARTITION 1; ALTER TABLE t1 COALESCE PARTITION 1;
ERROR HY000: COALESCE PARTITION can only be used on HASH/KEY partitions ERROR HY000: COALESCE PARTITION can only be used on HASH/KEY partitions
ALTER TABLE t1 DROP PARTITION x1; ALTER TABLE t1 DROP PARTITION x1;

View File

@ -19,6 +19,7 @@ innodb_concurrent : Results are not deterministic, Elliot will fix (BUG#3300)
subselect : Bug#15706 subselect : Bug#15706
ps_7ndb : dbug assert in RBR mode when executing test suite ps_7ndb : dbug assert in RBR mode when executing test suite
rpl_ddl : Bug#15963 SBR does not show "Definer" correctly rpl_ddl : Bug#15963 SBR does not show "Definer" correctly
partition_03ndb : Bug#16385
events : Affects flush test case. A table lock not released somewhere events : Affects flush test case. A table lock not released somewhere
ndb_binlog_basic : Results are not deterministic, Tomas will fix ndb_binlog_basic : Results are not deterministic, Tomas will fix
rpl_ndb_basic : Bug#16228 rpl_ndb_basic : Bug#16228

View File

@ -63,3 +63,19 @@ insert into t1 values (1,"a",1,1),(2,"a",1,1),(3,"a",1,1);
show create table t1; show create table t1;
DROP TABLE t1; DROP TABLE t1;
#
# Bug #13155: Problem in Create Table using SHOW CREATE TABLE syntax
#
CREATE TABLE t1 (a int not null primary key)
PARTITION BY KEY(a)
(PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB);
drop table t1;
CREATE TABLE t1 (a int not null primary key);
ALTER TABLE t1
PARTITION BY KEY(a)
(PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB);
drop table t1;

View File

@ -8,6 +8,7 @@
--disable_warnings --disable_warnings
drop table if exists t1; drop table if exists t1;
--enable_warnings --enable_warnings
# #
# Partition by key no partition defined => OK # Partition by key no partition defined => OK
# #
@ -97,6 +98,9 @@ partitions 3
partition x2 tablespace ts2, partition x2 tablespace ts2,
partition x3 tablespace ts3); partition x3 tablespace ts3);
CREATE TABLE t2 LIKE t1;
drop table t2;
drop table t1; drop table t1;
# #
@ -162,6 +166,141 @@ UNLOCK TABLES;
drop table t1; drop table t1;
#
# Bug #13644 DROP PARTITION NULL's DATE column
#
CREATE TABLE t1 (a int, name VARCHAR(50), purchased DATE)
PARTITION BY RANGE (a)
(PARTITION p0 VALUES LESS THAN (3),
PARTITION p1 VALUES LESS THAN (7),
PARTITION p2 VALUES LESS THAN (9),
PARTITION p3 VALUES LESS THAN (11));
INSERT INTO t1 VALUES
(1, 'desk organiser', '2003-10-15'),
(2, 'CD player', '1993-11-05'),
(3, 'TV set', '1996-03-10'),
(4, 'bookcase', '1982-01-10'),
(5, 'exercise bike', '2004-05-09'),
(6, 'sofa', '1987-06-05'),
(7, 'popcorn maker', '2001-11-22'),
(8, 'acquarium', '1992-08-04'),
(9, 'study desk', '1984-09-16'),
(10, 'lava lamp', '1998-12-25');
SELECT * from t1 ORDER BY a;
ALTER TABLE t1 DROP PARTITION p0;
SELECT * from t1 ORDER BY a;
drop table t1;
#
# Bug #13442; Truncate Partitioned table doesn't work
#
CREATE TABLE t1 (a int)
PARTITION BY LIST (a)
(PARTITION p0 VALUES IN (1,2,3), PARTITION p1 VALUES IN (4,5,6));
insert into t1 values (1),(2),(3),(4),(5),(6);
select * from t1;
truncate t1;
select * from t1;
truncate t1;
select * from t1;
drop table t1;
#
# Bug #13445 Partition by KEY method crashes server
#
CREATE TABLE t1 (a int, b int, primary key(a,b))
PARTITION BY KEY(b,a) PARTITIONS 4;
insert into t1 values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6);
select * from t1 where a = 4;
drop table t1;
#
# Bug #13438: Engine clause in PARTITION clause causes crash
#
CREATE TABLE t1 (a int)
PARTITION BY LIST (a)
PARTITIONS 1
(PARTITION x1 VALUES IN (1) ENGINE=MEMORY);
show create table t1;
drop table t1;
#
# Bug #13440: REPLACE causes crash in partitioned table
#
CREATE TABLE t1 (a int, unique(a))
PARTITION BY LIST (a)
(PARTITION x1 VALUES IN (10), PARTITION x2 VALUES IN (20));
--error ER_NO_PARTITION_FOR_GIVEN_VALUE
REPLACE t1 SET a = 4;
drop table t1;
#
# Bug #14365: Crash if value too small in list partitioned table
#
CREATE TABLE t1 (a int)
PARTITION BY LIST (a)
(PARTITION x1 VALUES IN (2), PARTITION x2 VALUES IN (3));
insert into t1 values (2), (3);
--error ER_NO_PARTITION_FOR_GIVEN_VALUE
insert into t1 values (4);
--error ER_NO_PARTITION_FOR_GIVEN_VALUE
insert into t1 values (1);
drop table t1;
#
# Bug 14327: PARTITIONS clause gets lost in SHOW CREATE TABLE
#
CREATE TABLE t1 (a int)
PARTITION BY HASH(a)
PARTITIONS 5;
SHOW CREATE TABLE t1;
drop table t1;
#
# Bug #13446: Update to value outside of list values doesn't give error
#
CREATE TABLE t1 (a int)
PARTITION BY RANGE (a)
(PARTITION x1 VALUES LESS THAN (2));
insert into t1 values (1);
--error ER_NO_PARTITION_FOR_GIVEN_VALUE
update t1 set a = 5;
drop table t1;
#
# Bug #13441: Analyze on partitioned table didn't work
#
CREATE TABLE t1 (a int)
PARTITION BY LIST (a)
(PARTITION x1 VALUES IN (10), PARTITION x2 VALUES IN (20));
analyze table t1;
drop table t1;
#
# BUG 14524
#
CREATE TABLE `t1` (
`id` int(11) default NULL
) ENGINE=BLACKHOLE DEFAULT CHARSET=latin1 PARTITION BY HASH (id) ;
SELECT * FROM t1;
drop table t1;
# #
# BUG 14524 # BUG 14524
# #
@ -180,9 +319,9 @@ create table t1
partition by range (a) partition by range (a)
( partition p0 values less than(10), ( partition p0 values less than(10),
partition p1 values less than (20), partition p1 values less than (20),
partition p2 values less than maxvalue); partition p2 values less than (25));
alter table t1 reorganise partition p2 into (partition p2 values less than (30)); alter table t1 reorganize partition p2 into (partition p2 values less than (30));
show create table t1; show create table t1;
drop table t1; drop table t1;
@ -199,7 +338,8 @@ PARTITION BY RANGE (a)
PARTITION x8 VALUES LESS THAN (18), PARTITION x8 VALUES LESS THAN (18),
PARTITION x9 VALUES LESS THAN (20)); PARTITION x9 VALUES LESS THAN (20));
ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2 INTO ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2 INTO
(PARTITION x1 VALUES LESS THAN (6)); (PARTITION x1 VALUES LESS THAN (6));
show create table t1; show create table t1;
drop table t1; drop table t1;

View File

@ -0,0 +1,25 @@
###############################################
# #
# Partition tests MyISAM tables #
# #
###############################################
#
# NOTE: PLEASE DO NOT ADD NOT MYISAM SPECIFIC TESTCASES HERE !
# NON STORAGE SPECIFIC TESTCASES SHOULD BE ADDED IN
# THE SOURCED FIELS ONLY.
#
# Storage engine to be tested
let $engine= 'MYISAM';
eval SET SESSION storage_engine=$engine;
# Other storage engine <> storage engine to be tested
let $engine_other= 'MEMORY';
# number of rows for the INSERT/UPDATE/DELETE/SELECT experiments
# on partioned tables
# Attention: In the moment the result files fit to @max_row = 200 only
SET @max_row = 200;
-- source include/partition_1.inc

View File

@ -0,0 +1,26 @@
###############################################
# #
# Partition tests NDB tables #
# #
###############################################
#
# NOTE: PLEASE DO NOT ADD NOT NDB SPECIFIC TESTCASES HERE !
# NON STORAGE SPECIFIC TESTCASES SHOULD BE ADDED IN
# THE SOURCED FIELS ONLY.
#
# Storage engine to be tested
let $engine= 'NDB' ;
-- source include/have_ndb.inc
eval SET SESSION storage_engine=$engine;
# Other storage engine <> storage engine to be tested
let $engine_other= 'MEMORY';
# number of rows for the INSERT/UPDATE/DELETE/SELECT experiments
# on partioned tables
# Attention: In the moment the result files fit to @max_row = 200 only
SET @max_row = 200;
-- source include/partition_1.inc

View File

@ -4,6 +4,10 @@
# #
-- source include/have_partition.inc -- source include/have_partition.inc
--disable_warnings
drop table if exists t1;
--enable_warnings
# #
# Partition by key stand-alone error # Partition by key stand-alone error
# #
@ -727,6 +731,14 @@ partitions 2
(partition x1 values in 4, (partition x1 values in 4,
partition x2 values in (5)); partition x2 values in (5));
#
# Bug #13439: Crash when LESS THAN (non-literal)
#
--error 1054
CREATE TABLE t1 (a int)
PARTITION BY RANGE (a)
(PARTITION p0 VALUES LESS THAN (x1));
# #
# No partition for the given value # No partition for the given value
# #

View File

@ -4,6 +4,10 @@
# #
-- source include/have_partition.inc -- source include/have_partition.inc
--disable_warnings
drop table if exists t1;
--enable_warnings
# #
# Try faulty DROP PARTITION and COALESCE PARTITION # Try faulty DROP PARTITION and COALESCE PARTITION
# #
@ -21,7 +25,7 @@ PARTITION BY RANGE (a)
PARTITION x9 VALUES LESS THAN (20)); PARTITION x9 VALUES LESS THAN (20));
--error ER_REORG_OUTSIDE_RANGE --error ER_REORG_OUTSIDE_RANGE
ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
(PARTITION x01 VALUES LESS THAN (2), (PARTITION x01 VALUES LESS THAN (2),
PARTITION x11 VALUES LESS THAN (5)); PARTITION x11 VALUES LESS THAN (5));
@ -38,30 +42,35 @@ ALTER TABLE t1 DROP PARTITION x10, x1, x2, x1;
ALTER TABLE t1 DROP PARTITION x10, x1, x2, x3; ALTER TABLE t1 DROP PARTITION x10, x1, x2, x3;
--error ER_REORG_PARTITION_NOT_EXIST --error ER_REORG_PARTITION_NOT_EXIST
ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10 INTO ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10 INTO
(PARTITION x11 VALUES LESS THAN (22)); (PARTITION x11 VALUES LESS THAN (22));
--error ER_SAME_NAME_PARTITION --error ER_SAME_NAME_PARTITION
ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2 INTO ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2 INTO
(PARTITION x3 VALUES LESS THAN (6)); (PARTITION x3 VALUES LESS THAN (6));
--error ER_CONSECUTIVE_REORG_PARTITIONS --error ER_CONSECUTIVE_REORG_PARTITIONS
ALTER TABLE t1 REORGANISE PARTITION x0, x2 INTO ALTER TABLE t1 REORGANIZE PARTITION x0, x2 INTO
(PARTITION x11 VALUES LESS THAN (2)); (PARTITION x11 VALUES LESS THAN (2));
--error ER_DROP_PARTITION_NON_EXISTENT --error ER_DROP_PARTITION_NON_EXISTENT
ALTER TABLE t1 REORGANISE PARTITION x0, x1, x1 INTO ALTER TABLE t1 REORGANIZE PARTITION x0, x1, x1 INTO
(PARTITION x11 VALUES LESS THAN (4)); (PARTITION x11 VALUES LESS THAN (4));
--error ER_REORG_OUTSIDE_RANGE --error ER_REORG_OUTSIDE_RANGE
ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
(PARTITION x01 VALUES LESS THAN (5)); (PARTITION x01 VALUES LESS THAN (5));
--error ER_RANGE_NOT_INCREASING_ERROR --error ER_REORG_OUTSIDE_RANGE
ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
(PARTITION x01 VALUES LESS THAN (4), (PARTITION x01 VALUES LESS THAN (4),
PARTITION x11 VALUES LESS THAN (2)); PARTITION x11 VALUES LESS THAN (2));
--error ER_RANGE_NOT_INCREASING_ERROR
ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
(PARTITION x01 VALUES LESS THAN (6),
PARTITION x11 VALUES LESS THAN (4));
DROP TABLE t1; DROP TABLE t1;
CREATE TABLE t1 (a int) CREATE TABLE t1 (a int)

View File

@ -1009,7 +1009,7 @@ void thr_multi_unlock(THR_LOCK_DATA **data,uint count)
TL_WRITE_ONLY to abort any new accesses to the lock TL_WRITE_ONLY to abort any new accesses to the lock
*/ */
void thr_abort_locks(THR_LOCK *lock) void thr_abort_locks(THR_LOCK *lock, bool upgrade_lock)
{ {
THR_LOCK_DATA *data; THR_LOCK_DATA *data;
DBUG_ENTER("thr_abort_locks"); DBUG_ENTER("thr_abort_locks");
@ -1031,7 +1031,7 @@ void thr_abort_locks(THR_LOCK *lock)
lock->read_wait.last= &lock->read_wait.data; lock->read_wait.last= &lock->read_wait.data;
lock->write_wait.last= &lock->write_wait.data; lock->write_wait.last= &lock->write_wait.data;
lock->read_wait.data=lock->write_wait.data=0; lock->read_wait.data=lock->write_wait.data=0;
if (lock->write.data) if (upgrade_lock && lock->write.data)
lock->write.data->type=TL_WRITE_ONLY; lock->write.data->type=TL_WRITE_ONLY;
pthread_mutex_unlock(&lock->mutex); pthread_mutex_unlock(&lock->mutex);
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
@ -1089,6 +1089,213 @@ my_bool thr_abort_locks_for_thread(THR_LOCK *lock, pthread_t thread)
} }
/*
Downgrade a WRITE_* to a lower WRITE level
SYNOPSIS
thr_downgrade_write_lock()
in_data Lock data of thread downgrading its lock
new_lock_type New write lock type
RETURN VALUE
NONE
DESCRIPTION
This can be used to downgrade a lock already owned. When the downgrade
occurs also other waiters, both readers and writers can be allowed to
start.
The previous lock is often TL_WRITE_ONLY but can also be
TL_WRITE and TL_WRITE_ALLOW_READ. The normal downgrade variants are
TL_WRITE_ONLY => TL_WRITE_ALLOW_READ After a short exclusive lock
TL_WRITE_ALLOW_READ => TL_WRITE_ALLOW_WRITE After discovering that the
operation didn't need such a high lock.
TL_WRITE_ONLY => TL_WRITE after a short exclusive lock while holding a
write table lock
TL_WRITE_ONLY => TL_WRITE_ALLOW_WRITE After a short exclusive lock after
already earlier having dongraded lock to TL_WRITE_ALLOW_WRITE
The implementation is conservative and rather don't start rather than
go on unknown paths to start, the common cases are handled.
NOTE:
In its current implementation it is only allowed to downgrade from
TL_WRITE_ONLY. In this case there are no waiters. Thus no wake up
logic is required.
*/
void thr_downgrade_write_lock(THR_LOCK_DATA *in_data,
enum thr_lock_type new_lock_type)
{
THR_LOCK *lock=in_data->lock;
THR_LOCK_DATA *data, *next;
enum thr_lock_type old_lock_type= in_data->type;
bool start_writers= FALSE;
bool start_readers= FALSE;
DBUG_ENTER("thr_downgrade_write_only_lock");
pthread_mutex_lock(&lock->mutex);
DBUG_ASSERT(old_lock_type == TL_WRITE_ONLY);
DBUG_ASSERT(old_lock_type > new_lock_type);
in_data->type= new_lock_type;
check_locks(lock,"after downgrading lock",0);
#if 0
switch (old_lock_type)
{
case TL_WRITE_ONLY:
case TL_WRITE:
case TL_WRITE_LOW_PRIORITY:
/*
Previous lock was exclusive we are now ready to start up most waiting
threads.
*/
switch (new_lock_type)
{
case TL_WRITE_ALLOW_READ:
/* Still cannot start WRITE operations. Can only start readers. */
start_readers= TRUE;
break;
case TL_WRITE:
case TL_WRITE_LOW_PRIORITY:
/*
Still cannot start anything, but new requests are no longer
aborted.
*/
break;
case TL_WRITE_ALLOW_WRITE:
/*
We can start both writers and readers.
*/
start_writers= TRUE;
start_readers= TRUE;
break;
case TL_WRITE_CONCURRENT_INSERT:
case TL_WRITE_DELAYED:
/*
This routine is not designed for those. Lock will be downgraded
but no start of waiters will occur. This is not the optimal but
should be a correct behaviour.
*/
break;
default:
DBUG_ASSERT(0);
}
break;
case TL_WRITE_DELAYED:
case TL_WRITE_CONCURRENT_INSERT:
/*
This routine is not designed for those. Lock will be downgraded
but no start of waiters will occur. This is not the optimal but
should be a correct behaviour.
*/
break;
case TL_WRITE_ALLOW_READ:
DBUG_ASSERT(new_lock_type == TL_WRITE_ALLOW_WRITE);
/*
Previously writers were not allowed to start, now it is ok to
start them again. Readers are already allowed so no reason to
handle them.
*/
start_writers= TRUE;
break;
default:
DBUG_ASSERT(0);
break;
}
if (start_writers)
{
/*
At this time the only active writer can be ourselves. Thus we need
not worry about that there are other concurrent write operations
active on the table. Thus we only need to worry about starting
waiting operations.
We also only come here with TL_WRITE_ALLOW_WRITE as the new
lock type, thus we can start other writers also of the same type.
If we find a lock at exclusive level >= TL_WRITE_LOW_PRIORITY we
don't start any more operations that would be mean those operations
will have to wait for things started afterwards.
*/
DBUG_ASSERT(new_lock_type == TL_WRITE_ALLOW_WRITE);
for (data=lock->write_wait.data; data ; data= next)
{
/*
All WRITE requests compatible with new lock type are also
started
*/
next= data->next;
if (start_writers && data->type == new_lock_type)
{
pthread_cond_t *cond= data->cond;
/*
It is ok to start this waiter.
Move from being first in wait queue to be last in write queue.
*/
if (((*data->prev)= data->next))
data->next->prev= data->prev;
else
lock->write_wait.last= data->prev;
data->prev= lock->write.last;
lock->write.last= &data->next;
data->next= 0;
check_locks(lock, "Started write lock after downgrade",0);
data->cond= 0;
pthread_cond_signal(cond);
}
else
{
/*
We found an incompatible lock, we won't start any more write
requests to avoid letting writers pass other writers in the
queue.
*/
start_writers= FALSE;
if (data->type >= TL_WRITE_LOW_PRIORITY)
{
/*
We have an exclusive writer in the queue so we won't start
readers either.
*/
start_readers= FALSE;
}
}
}
}
if (start_readers)
{
DBUG_ASSERT(new_lock_type == TL_WRITE_ALLOW_WRITE ||
new_lock_type == TL_WRITE_ALLOW_READ);
/*
When we come here we know that the write locks are
TL_WRITE_ALLOW_WRITE or TL_WRITE_ALLOW_READ. This means that reads
are ok
*/
for (data=lock->read_wait.data; data ; data=next)
{
next= data->next;
/*
All reads are ok to start now except TL_READ_NO_INSERT when
write lock is TL_WRITE_ALLOW_READ.
*/
if (new_lock_type != TL_WRITE_ALLOW_READ ||
data->type != TL_READ_NO_INSERT)
{
pthread_cond_t *cond= data->cond;
if (((*data->prev)= data->next))
data->next->prev= data->prev;
else
lock->read_wait.last= data->prev;
data->prev= lock->read.last;
lock->read.last= &data->next;
data->next= 0;
if (data->type == TL_READ_NO_INSERT)
lock->read_no_write_count++;
check_locks(lock, "Started read lock after downgrade",0);
data->cond= 0;
pthread_cond_signal(cond);
}
}
}
check_locks(lock,"after starting waiters after downgrading lock",0);
#endif
pthread_mutex_unlock(&lock->mutex);
DBUG_VOID_RETURN;
}
/* Upgrade a WRITE_DELAY lock to a WRITE_LOCK */ /* Upgrade a WRITE_DELAY lock to a WRITE_LOCK */

View File

@ -170,6 +170,8 @@ handlerton archive_hton = {
NULL, /* Start Consistent Snapshot */ NULL, /* Start Consistent Snapshot */
NULL, /* Flush logs */ NULL, /* Flush logs */
NULL, /* Show status */ NULL, /* Show status */
NULL, /* Partition flags */
NULL, /* Alter table flags */
NULL, /* Alter interface */ NULL, /* Alter interface */
HTON_NO_FLAGS HTON_NO_FLAGS
}; };

View File

@ -149,6 +149,8 @@ handlerton berkeley_hton = {
NULL, /* Start Consistent Snapshot */ NULL, /* Start Consistent Snapshot */
berkeley_flush_logs, /* Flush logs */ berkeley_flush_logs, /* Flush logs */
berkeley_show_status, /* Show status */ berkeley_show_status, /* Show status */
NULL, /* Partition flags */
NULL, /* Alter table flags */
NULL, /* Alter Tablespace */ NULL, /* Alter Tablespace */
HTON_CLOSE_CURSORS_AT_COMMIT | HTON_FLUSH_AFTER_RENAME HTON_CLOSE_CURSORS_AT_COMMIT | HTON_FLUSH_AFTER_RENAME
}; };

View File

@ -57,6 +57,8 @@ handlerton blackhole_hton= {
NULL, /* Start Consistent Snapshot */ NULL, /* Start Consistent Snapshot */
NULL, /* Flush logs */ NULL, /* Flush logs */
NULL, /* Show status */ NULL, /* Show status */
NULL, /* Partition flags */
NULL, /* Alter table flags */
NULL, /* Alter Tablespace */ NULL, /* Alter Tablespace */
HTON_CAN_RECREATE HTON_CAN_RECREATE
}; };

View File

@ -394,6 +394,8 @@ handlerton federated_hton= {
NULL, /* Start Consistent Snapshot */ NULL, /* Start Consistent Snapshot */
NULL, /* Flush logs */ NULL, /* Flush logs */
NULL, /* Show status */ NULL, /* Show status */
NULL, /* Partition flags */
NULL, /* Alter table flags */
NULL, /* Alter Tablespace */ NULL, /* Alter Tablespace */
HTON_ALTER_NOT_SUPPORTED HTON_ALTER_NOT_SUPPORTED
}; };

View File

@ -54,6 +54,8 @@ handlerton heap_hton= {
NULL, /* Start Consistent Snapshot */ NULL, /* Start Consistent Snapshot */
NULL, /* Flush logs */ NULL, /* Flush logs */
NULL, /* Show status */ NULL, /* Show status */
NULL, /* Partition flags */
NULL, /* Alter table flags */
NULL, /* Alter Tablespace */ NULL, /* Alter Tablespace */
HTON_CAN_RECREATE HTON_CAN_RECREATE
}; };

View File

@ -235,6 +235,8 @@ handlerton innobase_hton = {
innobase_start_trx_and_assign_read_view, /* Start Consistent Snapshot */ innobase_start_trx_and_assign_read_view, /* Start Consistent Snapshot */
innobase_flush_logs, /* Flush logs */ innobase_flush_logs, /* Flush logs */
innobase_show_status, /* Show status */ innobase_show_status, /* Show status */
NULL, /* Partition flags */
NULL, /* Alter table flags */
HTON_NO_FLAGS HTON_NO_FLAGS
}; };

View File

@ -86,6 +86,8 @@ handlerton myisam_hton= {
NULL, /* Start Consistent Snapshot */ NULL, /* Start Consistent Snapshot */
NULL, /* Flush logs */ NULL, /* Flush logs */
NULL, /* Show status */ NULL, /* Show status */
NULL, /* Partition flags */
NULL, /* Alter table flags */
NULL, /* Alter Tablespace */ NULL, /* Alter Tablespace */
HTON_CAN_RECREATE HTON_CAN_RECREATE
}; };

View File

@ -64,6 +64,8 @@ handlerton myisammrg_hton= {
NULL, /* Start Consistent Snapshot */ NULL, /* Start Consistent Snapshot */
NULL, /* Flush logs */ NULL, /* Flush logs */
NULL, /* Show status */ NULL, /* Show status */
NULL, /* Partition flags */
NULL, /* Alter table flags */
NULL, /* Alter Tablespace */ NULL, /* Alter Tablespace */
HTON_CAN_RECREATE HTON_CAN_RECREATE
}; };

View File

@ -52,6 +52,8 @@ static const int parallelism= 0;
// createable against NDB from this handler // createable against NDB from this handler
static const int max_transactions= 3; // should really be 2 but there is a transaction to much allocated when loch table is used static const int max_transactions= 3; // should really be 2 but there is a transaction to much allocated when loch table is used
static uint ndbcluster_partition_flags();
static uint ndbcluster_alter_table_flags(uint flags);
static bool ndbcluster_init(void); static bool ndbcluster_init(void);
static int ndbcluster_end(ha_panic_function flag); static int ndbcluster_end(ha_panic_function flag);
static bool ndbcluster_show_status(THD*,stat_print_fn *,enum ha_stat_type); static bool ndbcluster_show_status(THD*,stat_print_fn *,enum ha_stat_type);
@ -72,6 +74,23 @@ static handler *ndbcluster_create_handler(TABLE_SHARE *table)
return new ha_ndbcluster(table); return new ha_ndbcluster(table);
} }
static uint ndbcluster_partition_flags()
{
return (HA_CAN_PARTITION | HA_CAN_UPDATE_PARTITION_KEY |
HA_CAN_PARTITION_UNIQUE | HA_USE_AUTO_PARTITION);
}
static uint ndbcluster_alter_table_flags(uint flags)
{
if (flags & ALTER_DROP_PARTITION)
return 0;
else
return (HA_ONLINE_ADD_INDEX | HA_ONLINE_DROP_INDEX |
HA_ONLINE_ADD_UNIQUE_INDEX | HA_ONLINE_DROP_UNIQUE_INDEX |
HA_PARTITION_FUNCTION_SUPPORTED);
}
#define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8 #define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8
#define NDB_FAILED_AUTO_INCREMENT ~(Uint64)0 #define NDB_FAILED_AUTO_INCREMENT ~(Uint64)0
@ -117,10 +136,6 @@ static int rename_share(NDB_SHARE *share, const char *new_key);
#endif #endif
static void ndb_set_fragmentation(NDBTAB &tab, TABLE *table, uint pk_len); static void ndb_set_fragmentation(NDBTAB &tab, TABLE *table, uint pk_len);
static int packfrm(const void *data, uint len, const void **pack_data, uint *pack_len);
static int unpackfrm(const void **data, uint *len,
const void* pack_data);
static int ndb_get_table_statistics(Ndb*, const char *, static int ndb_get_table_statistics(Ndb*, const char *,
struct Ndb_statistics *); struct Ndb_statistics *);
@ -348,7 +363,7 @@ struct Ndb_local_table_statistics {
void ha_ndbcluster::set_rec_per_key() void ha_ndbcluster::set_rec_per_key()
{ {
DBUG_ENTER("ha_ndbcluster::get_status_const"); DBUG_ENTER("ha_ndbcluster::get_status_const");
for (uint i=0 ; i < table->s->keys ; i++) for (uint i=0 ; i < table_share->keys ; i++)
{ {
table->key_info[i].rec_per_key[table->key_info[i].key_parts-1]= 1; table->key_info[i].rec_per_key[table->key_info[i].key_parts-1]= 1;
} }
@ -447,7 +462,7 @@ void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd)
*/ */
void void
ha_ndbcluster::invalidate_dictionary_cache(TABLE *table, Ndb *ndb, ha_ndbcluster::invalidate_dictionary_cache(TABLE_SHARE *share, Ndb *ndb,
const char *tabname, bool global) const char *tabname, bool global)
{ {
NDBDICT *dict= ndb->getDictionary(); NDBDICT *dict= ndb->getDictionary();
@ -470,16 +485,16 @@ ha_ndbcluster::invalidate_dictionary_cache(TABLE *table, Ndb *ndb,
} }
else else
dict->removeCachedTable(tabname); dict->removeCachedTable(tabname);
table->s->version=0L; /* Free when thread is ready */ share->version=0L; /* Free when thread is ready */
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
} }
void ha_ndbcluster::invalidate_dictionary_cache(bool global) void ha_ndbcluster::invalidate_dictionary_cache(bool global)
{ {
NDBDICT *dict= get_ndb()->getDictionary(); NDBDICT *dict= get_ndb()->getDictionary();
invalidate_dictionary_cache(table, get_ndb(), m_tabname, global); invalidate_dictionary_cache(table_share, get_ndb(), m_tabname, global);
/* Invalidate indexes */ /* Invalidate indexes */
for (uint i= 0; i < table->s->keys; i++) for (uint i= 0; i < table_share->keys; i++)
{ {
NDBINDEX *index = (NDBINDEX *) m_index[i].index; NDBINDEX *index = (NDBINDEX *) m_index[i].index;
NDBINDEX *unique_index = (NDBINDEX *) m_index[i].unique_index; NDBINDEX *unique_index = (NDBINDEX *) m_index[i].unique_index;
@ -549,7 +564,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans)
if (res == HA_ERR_FOUND_DUPP_KEY) if (res == HA_ERR_FOUND_DUPP_KEY)
{ {
if (m_rows_to_insert == 1) if (m_rows_to_insert == 1)
m_dupkey= table->s->primary_key; m_dupkey= table_share->primary_key;
else else
{ {
/* We are batching inserts, offending key is not available */ /* We are batching inserts, offending key is not available */
@ -788,7 +803,7 @@ int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob)
for (int loop= 0; loop <= 1; loop++) for (int loop= 0; loop <= 1; loop++)
{ {
uint32 offset= 0; uint32 offset= 0;
for (uint i= 0; i < table->s->fields; i++) for (uint i= 0; i < table_share->fields; i++)
{ {
Field *field= table->field[i]; Field *field= table->field[i];
NdbValue value= m_value[i]; NdbValue value= m_value[i];
@ -892,10 +907,10 @@ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field,
*/ */
bool ha_ndbcluster::uses_blob_value() bool ha_ndbcluster::uses_blob_value()
{ {
if (table->s->blob_fields == 0) if (table_share->blob_fields == 0)
return FALSE; return FALSE;
{ {
uint no_fields= table->s->fields; uint no_fields= table_share->fields;
int i; int i;
// They always put blobs at the end.. // They always put blobs at the end..
for (i= no_fields - 1; i >= 0; i--) for (i= no_fields - 1; i >= 0; i--)
@ -1423,7 +1438,7 @@ static void shrink_varchar(Field* field, const byte* & ptr, char* buf)
int ha_ndbcluster::set_primary_key(NdbOperation *op, const byte *key) int ha_ndbcluster::set_primary_key(NdbOperation *op, const byte *key)
{ {
KEY* key_info= table->key_info + table->s->primary_key; KEY* key_info= table->key_info + table_share->primary_key;
KEY_PART_INFO* key_part= key_info->key_part; KEY_PART_INFO* key_part= key_info->key_part;
KEY_PART_INFO* end= key_part+key_info->key_parts; KEY_PART_INFO* end= key_part+key_info->key_parts;
DBUG_ENTER("set_primary_key"); DBUG_ENTER("set_primary_key");
@ -1445,7 +1460,7 @@ int ha_ndbcluster::set_primary_key(NdbOperation *op, const byte *key)
int ha_ndbcluster::set_primary_key_from_record(NdbOperation *op, const byte *record) int ha_ndbcluster::set_primary_key_from_record(NdbOperation *op, const byte *record)
{ {
KEY* key_info= table->key_info + table->s->primary_key; KEY* key_info= table->key_info + table_share->primary_key;
KEY_PART_INFO* key_part= key_info->key_part; KEY_PART_INFO* key_part= key_info->key_part;
KEY_PART_INFO* end= key_part+key_info->key_parts; KEY_PART_INFO* end= key_part+key_info->key_parts;
DBUG_ENTER("set_primary_key_from_record"); DBUG_ENTER("set_primary_key_from_record");
@ -1490,7 +1505,7 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
DBUG_ENTER("define_read_attrs"); DBUG_ENTER("define_read_attrs");
// Define attributes to read // Define attributes to read
for (i= 0; i < table->s->fields; i++) for (i= 0; i < table_share->fields; i++)
{ {
Field *field= table->field[i]; Field *field= table->field[i];
if (ha_get_bit_in_read_set(i+1) || if (ha_get_bit_in_read_set(i+1) ||
@ -1505,11 +1520,11 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
} }
} }
if (table->s->primary_key == MAX_KEY) if (table_share->primary_key == MAX_KEY)
{ {
DBUG_PRINT("info", ("Getting hidden key")); DBUG_PRINT("info", ("Getting hidden key"));
// Scanning table with no primary key // Scanning table with no primary key
int hidden_no= table->s->fields; int hidden_no= table_share->fields;
#ifndef DBUG_OFF #ifndef DBUG_OFF
const NDBTAB *tab= (const NDBTAB *) m_table; const NDBTAB *tab= (const NDBTAB *) m_table;
if (!tab->getColumn(hidden_no)) if (!tab->getColumn(hidden_no))
@ -1529,7 +1544,7 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf, int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf,
uint32 part_id) uint32 part_id)
{ {
uint no_fields= table->s->fields; uint no_fields= table_share->fields;
NdbConnection *trans= m_active_trans; NdbConnection *trans= m_active_trans;
NdbOperation *op; NdbOperation *op;
@ -1547,7 +1562,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf,
if (m_use_partition_function) if (m_use_partition_function)
op->setPartitionId(part_id); op->setPartitionId(part_id);
if (table->s->primary_key == MAX_KEY) if (table_share->primary_key == MAX_KEY)
{ {
// This table has no primary key, use "hidden" primary key // This table has no primary key, use "hidden" primary key
DBUG_PRINT("info", ("Using hidden key")); DBUG_PRINT("info", ("Using hidden key"));
@ -1587,7 +1602,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf,
int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data, int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data,
uint32 old_part_id) uint32 old_part_id)
{ {
uint no_fields= table->s->fields, i; uint no_fields= table_share->fields, i;
NdbTransaction *trans= m_active_trans; NdbTransaction *trans= m_active_trans;
NdbOperation *op; NdbOperation *op;
DBUG_ENTER("complemented_pk_read"); DBUG_ENTER("complemented_pk_read");
@ -2135,13 +2150,13 @@ int ha_ndbcluster::write_row(byte *record)
DBUG_ENTER("write_row"); DBUG_ENTER("write_row");
if (!m_use_write && m_ignore_dup_key && table->s->primary_key != MAX_KEY) if (!m_use_write && m_ignore_dup_key && table_share->primary_key != MAX_KEY)
{ {
int peek_res= peek_row(record); int peek_res= peek_row(record);
if (!peek_res) if (!peek_res)
{ {
m_dupkey= table->s->primary_key; m_dupkey= table_share->primary_key;
DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY); DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY);
} }
if (peek_res != HA_ERR_KEY_NOT_FOUND) if (peek_res != HA_ERR_KEY_NOT_FOUND)
@ -2171,7 +2186,7 @@ int ha_ndbcluster::write_row(byte *record)
op->setPartitionId(part_id); op->setPartitionId(part_id);
} }
if (table->s->primary_key == MAX_KEY) if (table_share->primary_key == MAX_KEY)
{ {
// Table has hidden primary key // Table has hidden primary key
Ndb *ndb= get_ndb(); Ndb *ndb= get_ndb();
@ -2184,7 +2199,7 @@ int ha_ndbcluster::write_row(byte *record)
ndb->getNdbError().status == NdbError::TemporaryError); ndb->getNdbError().status == NdbError::TemporaryError);
if (auto_value == NDB_FAILED_AUTO_INCREMENT) if (auto_value == NDB_FAILED_AUTO_INCREMENT)
ERR_RETURN(ndb->getNdbError()); ERR_RETURN(ndb->getNdbError());
if (set_hidden_key(op, table->s->fields, (const byte*)&auto_value)) if (set_hidden_key(op, table_share->fields, (const byte*)&auto_value))
ERR_RETURN(op->getNdbError()); ERR_RETURN(op->getNdbError());
} }
else else
@ -2208,7 +2223,7 @@ int ha_ndbcluster::write_row(byte *record)
// Set non-key attribute(s) // Set non-key attribute(s)
bool set_blob_value= FALSE; bool set_blob_value= FALSE;
for (i= 0; i < table->s->fields; i++) for (i= 0; i < table_share->fields; i++)
{ {
Field *field= table->field[i]; Field *field= table->field[i];
if (!(field->flags & PRI_KEY_FLAG) && if (!(field->flags & PRI_KEY_FLAG) &&
@ -2349,8 +2364,8 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
} }
/* Check for update of primary key for special handling */ /* Check for update of primary key for special handling */
if ((table->s->primary_key != MAX_KEY) && if ((table_share->primary_key != MAX_KEY) &&
(key_cmp(table->s->primary_key, old_data, new_data)) || (key_cmp(table_share->primary_key, old_data, new_data)) ||
(old_part_id != new_part_id)) (old_part_id != new_part_id))
{ {
int read_res, insert_res, delete_res, undo_res; int read_res, insert_res, delete_res, undo_res;
@ -2424,14 +2439,14 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
if (m_use_partition_function) if (m_use_partition_function)
op->setPartitionId(new_part_id); op->setPartitionId(new_part_id);
if (table->s->primary_key == MAX_KEY) if (table_share->primary_key == MAX_KEY)
{ {
// This table has no primary key, use "hidden" primary key // This table has no primary key, use "hidden" primary key
DBUG_PRINT("info", ("Using hidden key")); DBUG_PRINT("info", ("Using hidden key"));
// Require that the PK for this record has previously been // Require that the PK for this record has previously been
// read into m_value // read into m_value
uint no_fields= table->s->fields; uint no_fields= table_share->fields;
const NdbRecAttr* rec= m_value[no_fields].rec; const NdbRecAttr* rec= m_value[no_fields].rec;
DBUG_ASSERT(rec); DBUG_ASSERT(rec);
DBUG_DUMP("key", (char*)rec->aRef(), NDB_HIDDEN_PRIMARY_KEY_LENGTH); DBUG_DUMP("key", (char*)rec->aRef(), NDB_HIDDEN_PRIMARY_KEY_LENGTH);
@ -2450,7 +2465,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
m_rows_changed++; m_rows_changed++;
// Set non-key attribute(s) // Set non-key attribute(s)
for (i= 0; i < table->s->fields; i++) for (i= 0; i < table_share->fields; i++)
{ {
Field *field= table->field[i]; Field *field= table->field[i];
if (ha_get_bit_in_write_set(i+1) && if (ha_get_bit_in_write_set(i+1) &&
@ -2529,11 +2544,11 @@ int ha_ndbcluster::delete_row(const byte *record)
no_uncommitted_rows_update(-1); no_uncommitted_rows_update(-1);
if (table->s->primary_key == MAX_KEY) if (table_share->primary_key == MAX_KEY)
{ {
// This table has no primary key, use "hidden" primary key // This table has no primary key, use "hidden" primary key
DBUG_PRINT("info", ("Using hidden key")); DBUG_PRINT("info", ("Using hidden key"));
uint no_fields= table->s->fields; uint no_fields= table_share->fields;
const NdbRecAttr* rec= m_value[no_fields].rec; const NdbRecAttr* rec= m_value[no_fields].rec;
DBUG_ASSERT(rec != NULL); DBUG_ASSERT(rec != NULL);
@ -2656,10 +2671,10 @@ void ha_ndbcluster::unpack_record(byte *buf)
ndb_unpack_record(table, m_value, 0, buf); ndb_unpack_record(table, m_value, 0, buf);
#ifndef DBUG_OFF #ifndef DBUG_OFF
// Read and print all values that was fetched // Read and print all values that was fetched
if (table->s->primary_key == MAX_KEY) if (table_share->primary_key == MAX_KEY)
{ {
// Table with hidden primary key // Table with hidden primary key
int hidden_no= table->s->fields; int hidden_no= table_share->fields;
const NDBTAB *tab= (const NDBTAB *) m_table; const NDBTAB *tab= (const NDBTAB *) m_table;
const NDBCOL *hidden_col= tab->getColumn(hidden_no); const NDBCOL *hidden_col= tab->getColumn(hidden_no);
const NdbRecAttr* rec= m_value[hidden_no].rec; const NdbRecAttr* rec= m_value[hidden_no].rec;
@ -2686,7 +2701,7 @@ void ha_ndbcluster::print_results()
char buf_type[MAX_FIELD_WIDTH], buf_val[MAX_FIELD_WIDTH]; char buf_type[MAX_FIELD_WIDTH], buf_val[MAX_FIELD_WIDTH];
String type(buf_type, sizeof(buf_type), &my_charset_bin); String type(buf_type, sizeof(buf_type), &my_charset_bin);
String val(buf_val, sizeof(buf_val), &my_charset_bin); String val(buf_val, sizeof(buf_val), &my_charset_bin);
for (uint f= 0; f < table->s->fields; f++) for (uint f= 0; f < table_share->fields; f++)
{ {
/* Use DBUG_PRINT since DBUG_FILE cannot be filtered out */ /* Use DBUG_PRINT since DBUG_FILE cannot be filtered out */
char buf[2000]; char buf[2000];
@ -2953,7 +2968,7 @@ int ha_ndbcluster::rnd_init(bool scan)
DBUG_RETURN(-1); DBUG_RETURN(-1);
} }
} }
index_init(table->s->primary_key, 0); index_init(table_share->primary_key, 0);
DBUG_RETURN(0); DBUG_RETURN(0);
} }
@ -3051,9 +3066,9 @@ void ha_ndbcluster::position(const byte *record)
byte *buff; byte *buff;
DBUG_ENTER("position"); DBUG_ENTER("position");
if (table->s->primary_key != MAX_KEY) if (table_share->primary_key != MAX_KEY)
{ {
key_info= table->key_info + table->s->primary_key; key_info= table->key_info + table_share->primary_key;
key_part= key_info->key_part; key_part= key_info->key_part;
end= key_part + key_info->key_parts; end= key_part + key_info->key_parts;
buff= ref; buff= ref;
@ -3095,7 +3110,7 @@ void ha_ndbcluster::position(const byte *record)
{ {
// No primary key, get hidden key // No primary key, get hidden key
DBUG_PRINT("info", ("Getting hidden key")); DBUG_PRINT("info", ("Getting hidden key"));
int hidden_no= table->s->fields; int hidden_no= table_share->fields;
const NdbRecAttr* rec= m_value[hidden_no].rec; const NdbRecAttr* rec= m_value[hidden_no].rec;
memcpy(ref, (const void*)rec->aRef(), ref_length); memcpy(ref, (const void*)rec->aRef(), ref_length);
#ifndef DBUG_OFF #ifndef DBUG_OFF
@ -4057,7 +4072,7 @@ int ha_ndbcluster::create(const char *name,
caller. caller.
Do Ndb specific stuff, such as create a .ndb file Do Ndb specific stuff, such as create a .ndb file
*/ */
if ((my_errno= write_ndb_file())) if ((my_errno= write_ndb_file(name)))
DBUG_RETURN(my_errno); DBUG_RETURN(my_errno);
#ifdef HAVE_NDB_BINLOG #ifdef HAVE_NDB_BINLOG
if (ndb_binlog_thread_running > 0) if (ndb_binlog_thread_running > 0)
@ -4164,20 +4179,10 @@ int ha_ndbcluster::create(const char *name,
// Check partition info // Check partition info
partition_info *part_info= form->part_info; partition_info *part_info= form->part_info;
if (part_info) if ((my_errno= set_up_partition_info(part_info, form, (void*)&tab)))
{ {
int error; DBUG_RETURN(my_errno);
if ((error= set_up_partition_info(part_info, form, (void*)&tab)))
{
DBUG_RETURN(error);
}
} }
else
{
ndb_set_fragmentation(tab, form, pk_length);
}
if ((my_errno= check_ndb_connection())) if ((my_errno= check_ndb_connection()))
DBUG_RETURN(my_errno); DBUG_RETURN(my_errno);
@ -4199,7 +4204,7 @@ int ha_ndbcluster::create(const char *name,
my_errno= create_indexes(ndb, form); my_errno= create_indexes(ndb, form);
if (!my_errno) if (!my_errno)
my_errno= write_ndb_file(); my_errno= write_ndb_file(name);
else else
{ {
/* /*
@ -4921,9 +4926,9 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked)
primary key to be written in the ref variable primary key to be written in the ref variable
*/ */
if (table->s->primary_key != MAX_KEY) if (table_share->primary_key != MAX_KEY)
{ {
key= table->key_info+table->s->primary_key; key= table->key_info+table_share->primary_key;
ref_length= key->key_length; ref_length= key->key_length;
DBUG_PRINT("info", (" ref_length: %d", ref_length)); DBUG_PRINT("info", (" ref_length: %d", ref_length));
} }
@ -4945,10 +4950,23 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked)
if (!res) if (!res)
info(HA_STATUS_VARIABLE | HA_STATUS_CONST); info(HA_STATUS_VARIABLE | HA_STATUS_CONST);
DBUG_RETURN(res); DBUG_RETURN(res);
} }
/*
Set partition info
SYNOPSIS
set_part_info()
part_info
RETURN VALUE
NONE
DESCRIPTION
Set up partition info when handler object created
*/
void ha_ndbcluster::set_part_info(partition_info *part_info) void ha_ndbcluster::set_part_info(partition_info *part_info)
{ {
m_part_info= part_info; m_part_info= part_info;
@ -5570,6 +5588,8 @@ static bool ndbcluster_init()
h.panic= ndbcluster_end; /* Panic call */ h.panic= ndbcluster_end; /* Panic call */
h.show_status= ndbcluster_show_status; /* Show status */ h.show_status= ndbcluster_show_status; /* Show status */
h.alter_tablespace= ndbcluster_alter_tablespace; /* Show status */ h.alter_tablespace= ndbcluster_alter_tablespace; /* Show status */
h.partition_flags= ndbcluster_partition_flags; /* Partition flags */
h.alter_table_flags=ndbcluster_alter_table_flags; /* Alter table flags */
#ifdef HAVE_NDB_BINLOG #ifdef HAVE_NDB_BINLOG
ndbcluster_binlog_init_handlerton(); ndbcluster_binlog_init_handlerton();
#endif #endif
@ -5721,6 +5741,20 @@ static int ndbcluster_end(ha_panic_function type)
DBUG_RETURN(0); DBUG_RETURN(0);
} }
void ha_ndbcluster::print_error(int error, myf errflag)
{
DBUG_ENTER("ha_ndbcluster::print_error");
DBUG_PRINT("enter", ("error = %d", error));
if (error == HA_ERR_NO_PARTITION_FOUND)
my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0),
(int)m_part_info->part_expr->val_int());
else
handler::print_error(error, errflag);
DBUG_VOID_RETURN;
}
/* /*
Static error print function called from Static error print function called from
static handler method ndbcluster_commit static handler method ndbcluster_commit
@ -5747,8 +5781,10 @@ void ndbcluster_print_error(int error, const NdbOperation *error_op)
*/ */
void ha_ndbcluster::set_dbname(const char *path_name, char *dbname) void ha_ndbcluster::set_dbname(const char *path_name, char *dbname)
{ {
char *end, *ptr; char *end, *ptr, *tmp_name;
char tmp_buff[FN_REFLEN];
tmp_name= tmp_buff;
/* Scan name from the end */ /* Scan name from the end */
ptr= strend(path_name)-1; ptr= strend(path_name)-1;
while (ptr >= path_name && *ptr != '\\' && *ptr != '/') { while (ptr >= path_name && *ptr != '\\' && *ptr != '/') {
@ -5760,18 +5796,19 @@ void ha_ndbcluster::set_dbname(const char *path_name, char *dbname)
ptr--; ptr--;
} }
uint name_len= end - ptr; uint name_len= end - ptr;
memcpy(dbname, ptr + 1, name_len); memcpy(tmp_name, ptr + 1, name_len);
dbname[name_len]= '\0'; tmp_name[name_len]= '\0';
#ifdef __WIN__ #ifdef __WIN__
/* Put to lower case */ /* Put to lower case */
ptr= dbname; ptr= tmp_name;
while (*ptr != '\0') { while (*ptr != '\0') {
*ptr= tolower(*ptr); *ptr= tolower(*ptr);
ptr++; ptr++;
} }
#endif #endif
filename_to_tablename(tmp_name, dbname, FN_REFLEN);
} }
/* /*
@ -5790,8 +5827,10 @@ void ha_ndbcluster::set_dbname(const char *path_name)
void void
ha_ndbcluster::set_tabname(const char *path_name, char * tabname) ha_ndbcluster::set_tabname(const char *path_name, char * tabname)
{ {
char *end, *ptr; char *end, *ptr, *tmp_name;
char tmp_buff[FN_REFLEN];
tmp_name= tmp_buff;
/* Scan name from the end */ /* Scan name from the end */
end= strend(path_name)-1; end= strend(path_name)-1;
ptr= end; ptr= end;
@ -5799,17 +5838,18 @@ ha_ndbcluster::set_tabname(const char *path_name, char * tabname)
ptr--; ptr--;
} }
uint name_len= end - ptr; uint name_len= end - ptr;
memcpy(tabname, ptr + 1, end - ptr); memcpy(tmp_name, ptr + 1, end - ptr);
tabname[name_len]= '\0'; tmp_name[name_len]= '\0';
#ifdef __WIN__ #ifdef __WIN__
/* Put to lower case */ /* Put to lower case */
ptr= tabname; ptr= tmp_name;
while (*ptr != '\0') { while (*ptr != '\0') {
*ptr= tolower(*ptr); *ptr= tolower(*ptr);
ptr++; ptr++;
} }
#endif #endif
filename_to_tablename(tmp_name, tabname, FN_REFLEN);
} }
/* /*
@ -6576,104 +6616,6 @@ void ndbcluster_free_share(NDB_SHARE **share, bool have_lock)
} }
/*
Internal representation of the frm blob
*/
struct frm_blob_struct
{
struct frm_blob_header
{
uint ver; // Version of header
uint orglen; // Original length of compressed data
uint complen; // Compressed length of data, 0=uncompressed
} head;
char data[1];
};
static int packfrm(const void *data, uint len,
const void **pack_data, uint *pack_len)
{
int error;
ulong org_len, comp_len;
uint blob_len;
frm_blob_struct* blob;
DBUG_ENTER("packfrm");
DBUG_PRINT("enter", ("data: 0x%lx len: %d", data, len));
error= 1;
org_len= len;
if (my_compress((byte*)data, &org_len, &comp_len))
goto err;
DBUG_PRINT("info", ("org_len: %d comp_len: %d", org_len, comp_len));
DBUG_DUMP("compressed", (char*)data, org_len);
error= 2;
blob_len= sizeof(frm_blob_struct::frm_blob_header)+org_len;
if (!(blob= (frm_blob_struct*) my_malloc(blob_len,MYF(MY_WME))))
goto err;
// Store compressed blob in machine independent format
int4store((char*)(&blob->head.ver), 1);
int4store((char*)(&blob->head.orglen), comp_len);
int4store((char*)(&blob->head.complen), org_len);
// Copy frm data into blob, already in machine independent format
memcpy(blob->data, data, org_len);
*pack_data= blob;
*pack_len= blob_len;
error= 0;
DBUG_PRINT("exit",
("pack_data: 0x%lx pack_len: %d", *pack_data, *pack_len));
err:
DBUG_RETURN(error);
}
static int unpackfrm(const void **unpack_data, uint *unpack_len,
const void *pack_data)
{
const frm_blob_struct *blob= (frm_blob_struct*)pack_data;
byte *data;
ulong complen, orglen, ver;
DBUG_ENTER("unpackfrm");
DBUG_PRINT("enter", ("pack_data: 0x%lx", pack_data));
complen= uint4korr((char*)&blob->head.complen);
orglen= uint4korr((char*)&blob->head.orglen);
ver= uint4korr((char*)&blob->head.ver);
DBUG_PRINT("blob",("ver: %d complen: %d orglen: %d",
ver,complen,orglen));
DBUG_DUMP("blob->data", (char*) blob->data, complen);
if (ver != 1)
DBUG_RETURN(1);
if (!(data= my_malloc(max(orglen, complen), MYF(MY_WME))))
DBUG_RETURN(2);
memcpy(data, blob->data, complen);
if (my_uncompress(data, &complen, &orglen))
{
my_free((char*)data, MYF(0));
DBUG_RETURN(3);
}
*unpack_data= data;
*unpack_len= complen;
DBUG_PRINT("exit", ("frmdata: 0x%lx, len: %d", *unpack_data, *unpack_len));
DBUG_RETURN(0);
}
static static
int int
ndb_get_table_statistics(Ndb* ndb, const char * table, ndb_get_table_statistics(Ndb* ndb, const char * table,
@ -6756,17 +6698,17 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
that the table with this name is a ndb table that the table with this name is a ndb table
*/ */
int ha_ndbcluster::write_ndb_file() int ha_ndbcluster::write_ndb_file(const char *name)
{ {
File file; File file;
bool error=1; bool error=1;
char path[FN_REFLEN]; char path[FN_REFLEN];
DBUG_ENTER("write_ndb_file"); DBUG_ENTER("write_ndb_file");
DBUG_PRINT("enter", ("db: %s, name: %s", m_dbname, m_tabname)); DBUG_PRINT("enter", ("name: %s", name));
(void)strxnmov(path, FN_REFLEN-1, (void)strxnmov(path, FN_REFLEN-1,
mysql_data_home,"/",m_dbname,"/",m_tabname,ha_ndb_ext,NullS); mysql_data_home,"/",name,ha_ndb_ext,NullS);
if ((file=my_create(path, CREATE_MODE,O_RDWR | O_TRUNC,MYF(MY_WME))) >= 0) if ((file=my_create(path, CREATE_MODE,O_RDWR | O_TRUNC,MYF(MY_WME))) >= 0)
{ {
@ -6790,7 +6732,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
int res; int res;
KEY* key_info= table->key_info + active_index; KEY* key_info= table->key_info + active_index;
NDB_INDEX_TYPE index_type= get_index_type(active_index); NDB_INDEX_TYPE index_type= get_index_type(active_index);
ulong reclength= table->s->reclength; ulong reclength= table_share->reclength;
NdbOperation* op; NdbOperation* op;
if (uses_blob_value()) if (uses_blob_value())
@ -6997,7 +6939,7 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p)
int res; int res;
int range_no; int range_no;
ulong reclength= table->s->reclength; ulong reclength= table_share->reclength;
const NdbOperation* op= m_current_multi_operation; const NdbOperation* op= m_current_multi_operation;
for (;multi_range_curr < m_multi_range_defined; multi_range_curr++) for (;multi_range_curr < m_multi_range_defined; multi_range_curr++)
{ {
@ -7146,7 +7088,7 @@ ha_ndbcluster::setup_recattr(const NdbRecAttr* curr)
Field **field, **end; Field **field, **end;
NdbValue *value= m_value; NdbValue *value= m_value;
end= table->field + table->s->fields; end= table->field + table_share->fields;
for (field= table->field; field < end; field++, value++) for (field= table->field; field < end; field++, value++)
{ {
@ -8867,11 +8809,116 @@ int ha_ndbcluster::get_default_no_partitions(ulonglong max_rows)
uint reported_frags; uint reported_frags;
uint no_fragments= get_no_fragments(max_rows); uint no_fragments= get_no_fragments(max_rows);
uint no_nodes= g_ndb_cluster_connection->no_db_nodes(); uint no_nodes= g_ndb_cluster_connection->no_db_nodes();
adjusted_frag_count(no_fragments, no_nodes, reported_frags); if (adjusted_frag_count(no_fragments, no_nodes, reported_frags))
{
push_warning(current_thd,
MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
"Ndb might have problems storing the max amount of rows specified");
}
return (int)reported_frags; return (int)reported_frags;
} }
/*
Set-up auto-partitioning for NDB Cluster
SYNOPSIS
set_auto_partitions()
part_info Partition info struct to set-up
RETURN VALUE
NONE
DESCRIPTION
Set-up auto partitioning scheme for tables that didn't define any
partitioning. We'll use PARTITION BY KEY() in this case which
translates into partition by primary key if a primary key exists
and partition by hidden key otherwise.
*/
void ha_ndbcluster::set_auto_partitions(partition_info *part_info)
{
DBUG_ENTER("ha_ndbcluster::set_auto_partitions");
part_info->list_of_part_fields= TRUE;
part_info->part_type= HASH_PARTITION;
switch (opt_ndb_distribution_id)
{
case ND_KEYHASH:
part_info->linear_hash_ind= FALSE;
break;
case ND_LINHASH:
part_info->linear_hash_ind= TRUE;
break;
}
DBUG_VOID_RETURN;
}
int ha_ndbcluster::set_range_data(void *tab_ref, partition_info *part_info)
{
NDBTAB *tab= (NDBTAB*)tab_ref;
int32 *range_data= (int32*)my_malloc(part_info->no_parts*sizeof(int32),
MYF(0));
uint i;
int error= 0;
DBUG_ENTER("set_range_data");
if (!range_data)
{
mem_alloc_error(part_info->no_parts*sizeof(int32));
DBUG_RETURN(1);
}
for (i= 0; i < part_info->no_parts; i++)
{
longlong range_val= part_info->range_int_array[i];
if (range_val < INT_MIN32 || range_val > INT_MAX32)
{
my_error(ER_LIMITED_PART_RANGE, MYF(0), "NDB");
error= 1;
goto error;
}
range_data[i]= (int32)range_val;
}
tab->setRangeListData(range_data, sizeof(int32)*part_info->no_parts);
error:
my_free((char*)range_data, MYF(0));
DBUG_RETURN(error);
}
int ha_ndbcluster::set_list_data(void *tab_ref, partition_info *part_info)
{
NDBTAB *tab= (NDBTAB*)tab_ref;
int32 *list_data= (int32*)my_malloc(part_info->no_list_values * 2
* sizeof(int32), MYF(0));
uint32 *part_id, i;
int error= 0;
DBUG_ENTER("set_list_data");
if (!list_data)
{
mem_alloc_error(part_info->no_list_values*2*sizeof(int32));
DBUG_RETURN(1);
}
for (i= 0; i < part_info->no_list_values; i++)
{
LIST_PART_ENTRY *list_entry= &part_info->list_array[i];
longlong list_val= list_entry->list_value;
if (list_val < INT_MIN32 || list_val > INT_MAX32)
{
my_error(ER_LIMITED_PART_RANGE, MYF(0), "NDB");
error= 1;
goto error;
}
list_data[2*i]= (int32)list_val;
part_id= (uint32*)&list_data[2*i+1];
*part_id= list_entry->partition_id;
}
tab->setRangeListData(list_data, 2*sizeof(int32)*part_info->no_list_values);
error:
my_free((char*)list_data, MYF(0));
DBUG_RETURN(error);
}
/* /*
User defined partitioning set-up. We need to check how many fragments the User defined partitioning set-up. We need to check how many fragments the
user wants defined and which node groups to put those into. Later we also user wants defined and which node groups to put those into. Later we also
@ -8889,12 +8936,18 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info,
TABLE *table, TABLE *table,
void *tab_par) void *tab_par)
{ {
DBUG_ENTER("ha_ndbcluster::set_up_partition_info"); uint16 frag_data[MAX_PARTITIONS];
ushort node_group[MAX_PARTITIONS]; char *ts_names[MAX_PARTITIONS];
ulong ng_index= 0, i, j; ulong ts_index= 0, fd_index= 0, i, j;
NDBTAB *tab= (NDBTAB*)tab_par; NDBTAB *tab= (NDBTAB*)tab_par;
NDBTAB::FragmentType ftype= NDBTAB::UserDefined; NDBTAB::FragmentType ftype= NDBTAB::UserDefined;
partition_element *part_elem; partition_element *part_elem;
bool first= TRUE;
uint ts_id, ts_version, part_count= 0, tot_ts_name_len;
List_iterator<partition_element> part_it(part_info->partitions);
int error;
char *name_ptr;
DBUG_ENTER("ha_ndbcluster::set_up_partition_info");
if (part_info->part_type == HASH_PARTITION && if (part_info->part_type == HASH_PARTITION &&
part_info->list_of_part_fields == TRUE) part_info->list_of_part_fields == TRUE)
@ -8913,93 +8966,60 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info,
col->setPartitionKey(TRUE); col->setPartitionKey(TRUE);
} }
} }
List_iterator<partition_element> part_it(part_info->partitions); else if (part_info->part_type == RANGE_PARTITION)
for (i= 0; i < part_info->no_parts; i++)
{ {
if ((error= set_range_data((void*)tab, part_info)))
{
DBUG_RETURN(error);
}
}
else if (part_info->part_type == LIST_PARTITION)
{
if ((error= set_list_data((void*)tab, part_info)))
{
DBUG_RETURN(error);
}
}
tab->setFragmentType(ftype);
i= 0;
tot_ts_name_len= 0;
do
{
uint ng;
part_elem= part_it++; part_elem= part_it++;
if (!is_sub_partitioned(part_info)) if (!is_sub_partitioned(part_info))
{ {
node_group[ng_index++]= part_elem->nodegroup_id; ng= part_elem->nodegroup_id;
//Here we should insert tablespace id based on tablespace name if (first && ng == UNDEF_NODEGROUP)
ng= 0;
ts_names[fd_index]= part_elem->tablespace_name;
frag_data[fd_index++]= ng;
} }
else else
{ {
List_iterator<partition_element> sub_it(part_elem->subpartitions); List_iterator<partition_element> sub_it(part_elem->subpartitions);
for (j= 0; j < part_info->no_subparts; j++) j= 0;
do
{ {
part_elem= sub_it++; part_elem= sub_it++;
node_group[ng_index++]= part_elem->nodegroup_id; ng= part_elem->nodegroup_id;
//Here we should insert tablespace id based on tablespace name if (first && ng == UNDEF_NODEGROUP)
} ng= 0;
ts_names[fd_index]= part_elem->tablespace_name;
frag_data[fd_index++]= ng;
} while (++j < part_info->no_subparts);
} }
} first= FALSE;
{ } while (++i < part_info->no_parts);
uint no_nodes= g_ndb_cluster_connection->no_db_nodes(); tab->setDefaultNoPartitionsFlag(part_info->use_default_no_partitions);
if (ng_index > 4 * no_nodes) tab->setMaxRows(table->s->max_rows);
{ tab->setTablespaceNames(ts_names, fd_index*sizeof(char*));
DBUG_RETURN(1300); tab->setFragmentCount(fd_index);
} tab->setFragmentData(&frag_data, fd_index*2);
}
tab->setNodeGroupIds(&node_group, ng_index);
tab->setFragmentType(ftype);
DBUG_RETURN(0); DBUG_RETURN(0);
} }
/*
This routine is used to set-up fragmentation when the user has only specified
ENGINE = NDB and no user defined partitioning what so ever. Thus all values
will be based on default values. We will choose Linear Hash or Hash with
perfect spread dependent on a session variable defined in MySQL.
*/
static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length)
{
NDBTAB::FragmentType ftype= NDBTAB::DistrKeyHash;
ushort node_group[MAX_PARTITIONS];
uint no_nodes= g_ndb_cluster_connection->no_db_nodes(), no_fragments, i;
DBUG_ENTER("ndb_set_fragmentation");
if (form->s->max_rows == (ha_rows) 0)
{
no_fragments= no_nodes;
}
else
{
/*
Ensure that we get enough fragments to handle all rows and ensure that
the table is fully distributed by keeping the number of fragments a
multiple of the number of nodes.
*/
uint fragments= get_no_fragments(form->s->max_rows);
if (adjusted_frag_count(fragments, no_nodes, no_fragments))
{
push_warning(current_thd,
MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
"Ndb might have problems storing the max amount of rows specified");
}
}
/*
Always start with node group 0 and continue with next node group from
there
*/
node_group[0]= 0;
for (i= 1; i < no_fragments; i++)
node_group[i]= UNDEF_NODEGROUP;
switch (opt_ndb_distribution_id)
{
case ND_KEYHASH:
ftype= NDBTAB::DistrKeyHash;
break;
case ND_LINHASH:
ftype= NDBTAB::DistrKeyLin;
break;
}
tab.setFragmentType(ftype);
tab.setNodeGroupIds(&node_group, no_fragments);
DBUG_VOID_RETURN;
}
bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info, bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes) uint table_changes)
{ {
@ -9252,3 +9272,41 @@ ndberror:
DBUG_RETURN(1); DBUG_RETURN(1);
} }
bool ha_ndbcluster::get_no_parts(const char *name, uint *no_parts)
{
Ndb *ndb;
NDBDICT *dict;
const NDBTAB *tab;
int err;
DBUG_ENTER("ha_ndbcluster::get_no_parts");
set_dbname(name);
set_tabname(name);
do
{
if (check_ndb_connection())
{
err= HA_ERR_NO_CONNECTION;
break;
}
ndb= get_ndb();
dict= ndb->getDictionary();
if (!(tab= dict->getTable(m_tabname)))
ERR_BREAK(dict->getNdbError(), err);
// Check if thread has stale local cache
if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
{
invalidate_dictionary_cache(FALSE);
if (!(tab= dict->getTable(m_tabname)))
ERR_BREAK(dict->getNdbError(), err);
}
*no_parts= tab->getFragmentCount();
DBUG_RETURN(FALSE);
} while (1);
end:
print_error(err, MYF(0));
DBUG_RETURN(TRUE);
}

View File

@ -561,22 +561,13 @@ class ha_ndbcluster: public handler
int extra_opt(enum ha_extra_function operation, ulong cache_size); int extra_opt(enum ha_extra_function operation, ulong cache_size);
int external_lock(THD *thd, int lock_type); int external_lock(THD *thd, int lock_type);
int start_stmt(THD *thd, thr_lock_type lock_type); int start_stmt(THD *thd, thr_lock_type lock_type);
void print_error(int error, myf errflag);
const char * table_type() const; const char * table_type() const;
const char ** bas_ext() const; const char ** bas_ext() const;
ulong table_flags(void) const; ulong table_flags(void) const;
ulong alter_table_flags(void) const
{
return (HA_ONLINE_ADD_INDEX | HA_ONLINE_DROP_INDEX |
HA_ONLINE_ADD_UNIQUE_INDEX | HA_ONLINE_DROP_UNIQUE_INDEX);
}
int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys); int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys);
int prepare_drop_index(TABLE *table_arg, uint *key_num, uint num_of_keys); int prepare_drop_index(TABLE *table_arg, uint *key_num, uint num_of_keys);
int final_drop_index(TABLE *table_arg); int final_drop_index(TABLE *table_arg);
ulong partition_flags(void) const
{
return (HA_CAN_PARTITION | HA_CAN_UPDATE_PARTITION_KEY |
HA_CAN_PARTITION_UNIQUE);
}
void set_part_info(partition_info *part_info); void set_part_info(partition_info *part_info);
ulong index_flags(uint idx, uint part, bool all_parts) const; ulong index_flags(uint idx, uint part, bool all_parts) const;
uint max_supported_record_length() const; uint max_supported_record_length() const;
@ -588,6 +579,9 @@ class ha_ndbcluster: public handler
int delete_table(const char *name); int delete_table(const char *name);
int create(const char *name, TABLE *form, HA_CREATE_INFO *info); int create(const char *name, TABLE *form, HA_CREATE_INFO *info);
int get_default_no_partitions(ulonglong max_rows); int get_default_no_partitions(ulonglong max_rows);
bool get_no_parts(const char *name, uint *no_parts);
void set_auto_partitions(partition_info *part_info);
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **store_lock(THD *thd,
THR_LOCK_DATA **to, THR_LOCK_DATA **to,
enum thr_lock_type lock_type); enum thr_lock_type lock_type);
@ -657,7 +651,7 @@ static void set_tabname(const char *pathname, char *tabname);
bool check_if_incompatible_data(HA_CREATE_INFO *info, bool check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes); uint table_changes);
static void invalidate_dictionary_cache(TABLE *table, Ndb *ndb, static void invalidate_dictionary_cache(TABLE_SHARE *share, Ndb *ndb,
const char *tabname, bool global); const char *tabname, bool global);
private: private:
@ -694,6 +688,8 @@ private:
uint set_up_partition_info(partition_info *part_info, uint set_up_partition_info(partition_info *part_info,
TABLE *table, TABLE *table,
void *tab); void *tab);
int set_range_data(void *tab, partition_info* part_info);
int set_list_data(void *tab, partition_info* part_info);
int complemented_pk_read(const byte *old_data, byte *new_data, int complemented_pk_read(const byte *old_data, byte *new_data,
uint32 old_part_id); uint32 old_part_id);
int pk_read(const byte *key, uint key_len, byte *buf, uint32 part_id); int pk_read(const byte *key, uint key_len, byte *buf, uint32 part_id);
@ -743,7 +739,7 @@ private:
char *update_table_comment(const char * comment); char *update_table_comment(const char * comment);
int write_ndb_file(); int write_ndb_file(const char *name);
int check_ndb_connection(THD* thd= current_thd); int check_ndb_connection(THD* thd= current_thd);

View File

@ -260,7 +260,7 @@ void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table)
break; break;
} }
if ((error= open_table_from_share(thd, table_share, "", 0, if ((error= open_table_from_share(thd, table_share, "", 0,
(uint) READ_ALL, 0, table))) (uint) READ_ALL, 0, table, FALSE)))
{ {
sql_print_error("Unable to open table for %s, error=%d(%d)", sql_print_error("Unable to open table for %s, error=%d(%d)",
share->key, error, my_errno); share->key, error, my_errno);
@ -1219,7 +1219,7 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp,
pOp->getReqNodeId() != g_ndb_cluster_connection->node_id()) pOp->getReqNodeId() != g_ndb_cluster_connection->node_id())
{ {
ndb->setDatabaseName(share->table->s->db.str); ndb->setDatabaseName(share->table->s->db.str);
ha_ndbcluster::invalidate_dictionary_cache(share->table, ha_ndbcluster::invalidate_dictionary_cache(share->table->s,
ndb, ndb,
share->table->s->table_name.str, share->table->s->table_name.str,
TRUE); TRUE);

File diff suppressed because it is too large Load Diff

View File

@ -49,10 +49,15 @@ private:
partition_no_index_scan= 3 partition_no_index_scan= 3
}; };
/* Data for the partition handler */ /* Data for the partition handler */
int m_mode; // Open mode
uint m_open_test_lock; // Open test_if_locked
char *m_file_buffer; // Buffer with names char *m_file_buffer; // Buffer with names
char *m_name_buffer_ptr; // Pointer to first partition name char *m_name_buffer_ptr; // Pointer to first partition name
handlerton **m_engine_array; // Array of types of the handlers handlerton **m_engine_array; // Array of types of the handlers
handler **m_file; // Array of references to handler inst. handler **m_file; // Array of references to handler inst.
handler **m_new_file; // Array of references to new handlers
handler **m_reorged_file; // Reorganised partitions
handler **m_added_file; // Added parts kept for errors
partition_info *m_part_info; // local reference to partition partition_info *m_part_info; // local reference to partition
byte *m_start_key_ref; // Reference of start key in current byte *m_start_key_ref; // Reference of start key in current
// index scan info // index scan info
@ -60,7 +65,7 @@ private:
byte *m_ordered_rec_buffer; // Row and key buffer for ord. idx scan byte *m_ordered_rec_buffer; // Row and key buffer for ord. idx scan
KEY *m_curr_key_info; // Current index KEY *m_curr_key_info; // Current index
byte *m_rec0; // table->record[0] byte *m_rec0; // table->record[0]
QUEUE queue; // Prio queue used by sorted read QUEUE m_queue; // Prio queue used by sorted read
/* /*
Since the partition handler is a handler on top of other handlers, it Since the partition handler is a handler on top of other handlers, it
is necessary to keep information about what the underlying handler is necessary to keep information about what the underlying handler
@ -71,6 +76,7 @@ private:
u_long m_table_flags; u_long m_table_flags;
u_long m_low_byte_first; u_long m_low_byte_first;
uint m_reorged_parts; // Number of reorganised parts
uint m_tot_parts; // Total number of partitions; uint m_tot_parts; // Total number of partitions;
uint m_no_locks; // For engines like ha_blackhole, which needs no locks uint m_no_locks; // For engines like ha_blackhole, which needs no locks
uint m_last_part; // Last file that we update,write uint m_last_part; // Last file that we update,write
@ -172,21 +178,38 @@ public:
*/ */
virtual int delete_table(const char *from); virtual int delete_table(const char *from);
virtual int rename_table(const char *from, const char *to); virtual int rename_table(const char *from, const char *to);
virtual int create(const char *name, TABLE * form, virtual int create(const char *name, TABLE *form,
HA_CREATE_INFO * create_info); HA_CREATE_INFO *create_info);
virtual int create_handler_files(const char *name); virtual int create_handler_files(const char *name);
virtual void update_create_info(HA_CREATE_INFO * create_info); virtual void update_create_info(HA_CREATE_INFO *create_info);
virtual char *update_table_comment(const char *comment); virtual char *update_table_comment(const char *comment);
virtual int change_partitions(HA_CREATE_INFO *create_info,
const char *path,
ulonglong *copied,
ulonglong *deleted,
const void *pack_frm_data,
uint pack_frm_len);
virtual int drop_partitions(const char *path); virtual int drop_partitions(const char *path);
virtual int rename_partitions(const char *path);
bool get_no_parts(const char *name, uint *no_parts)
{
DBUG_ENTER("ha_partition::get_no_parts");
*no_parts= m_tot_parts;
DBUG_RETURN(0);
}
private: private:
int copy_partitions(ulonglong *copied, ulonglong *deleted);
void cleanup_new_partition(uint part_count);
int prepare_new_partition(TABLE *table, HA_CREATE_INFO *create_info,
handler *file, const char *part_name);
/* /*
delete_table, rename_table and create uses very similar logic which delete_table, rename_table and create uses very similar logic which
is packed into this routine. is packed into this routine.
*/ */
uint del_ren_cre_table(const char *from, uint del_ren_cre_table(const char *from,
const char *to= NULL, const char *to= NULL,
TABLE * table_arg= NULL, TABLE *table_arg= NULL,
HA_CREATE_INFO * create_info= NULL); HA_CREATE_INFO *create_info= NULL);
/* /*
One method to create the table_name.par file containing the names of the One method to create the table_name.par file containing the names of the
underlying partitions, their engine and the number of partitions. underlying partitions, their engine and the number of partitions.
@ -647,30 +670,8 @@ public:
index scan module. index scan module.
(NDB) (NDB)
*/ */
virtual ulong alter_table_flags(void) const
{
//return HA_ONLINE_ADD_EMPTY_PARTITION + HA_ONLINE_DROP_PARTITION;
return HA_ONLINE_DROP_PARTITION;
}
virtual ulong table_flags() const virtual ulong table_flags() const
{ return m_table_flags; } { return m_table_flags; }
/*
HA_CAN_PARTITION:
Used by storage engines that can handle partitioning without this
partition handler
(Partition, NDB)
HA_CAN_UPDATE_PARTITION_KEY:
Set if the handler can update fields that are part of the partition
function.
HA_CAN_PARTITION_UNIQUE:
Set if the handler can handle unique indexes where the fields of the
unique key are not part of the fields of the partition function. Thus
a unique key can be set on all fields.
*/
virtual ulong partition_flags() const
{ return HA_CAN_PARTITION; }
/* /*
This is a bitmap of flags that says how the storage engine This is a bitmap of flags that says how the storage engine
@ -834,6 +835,8 @@ public:
description of how the CREATE TABLE part to define FOREIGN KEY's is done. description of how the CREATE TABLE part to define FOREIGN KEY's is done.
free_foreign_key_create_info is used to free the memory area that provided free_foreign_key_create_info is used to free the memory area that provided
this description. this description.
can_switch_engines checks if it is ok to switch to a new engine based on
the foreign key info in the table.
------------------------------------------------------------------------- -------------------------------------------------------------------------
virtual char* get_foreign_key_create_info() virtual char* get_foreign_key_create_info()
@ -843,7 +846,7 @@ public:
List<FOREIGN_KEY_INFO> *f_key_list) List<FOREIGN_KEY_INFO> *f_key_list)
virtual uint referenced_by_foreign_key() virtual uint referenced_by_foreign_key()
*/ */
virtual bool can_switch_engines();
/* /*
------------------------------------------------------------------------- -------------------------------------------------------------------------
MODULE fulltext index MODULE fulltext index
@ -892,16 +895,35 @@ public:
------------------------------------------------------------------------- -------------------------------------------------------------------------
MODULE admin MyISAM MODULE admin MyISAM
------------------------------------------------------------------------- -------------------------------------------------------------------------
-------------------------------------------------------------------------
OPTIMIZE TABLE, CHECK TABLE, ANALYZE TABLE and REPAIR TABLE are
mapped to a routine that handles looping over a given set of
partitions and those routines send a flag indicating to execute on
all partitions.
-------------------------------------------------------------------------
*/
virtual int optimize(THD* thd, HA_CHECK_OPT *check_opt);
virtual int analyze(THD* thd, HA_CHECK_OPT *check_opt);
virtual int check(THD* thd, HA_CHECK_OPT *check_opt);
virtual int repair(THD* thd, HA_CHECK_OPT *check_opt);
virtual int optimize_partitions(THD *thd);
virtual int analyze_partitions(THD *thd);
virtual int check_partitions(THD *thd);
virtual int repair_partitions(THD *thd);
private:
int handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
uint flags, bool all_parts);
public:
/*
-------------------------------------------------------------------------
Admin commands not supported currently (almost purely MyISAM routines) Admin commands not supported currently (almost purely MyISAM routines)
This means that the following methods are not implemented: This means that the following methods are not implemented:
------------------------------------------------------------------------- -------------------------------------------------------------------------
virtual int check(THD* thd, HA_CHECK_OPT *check_opt);
virtual int backup(TD* thd, HA_CHECK_OPT *check_opt); virtual int backup(TD* thd, HA_CHECK_OPT *check_opt);
virtual int restore(THD* thd, HA_CHECK_OPT *check_opt); virtual int restore(THD* thd, HA_CHECK_OPT *check_opt);
virtual int repair(THD* thd, HA_CHECK_OPT *check_opt);
virtual int optimize(THD* thd, HA_CHECK_OPT *check_opt);
virtual int analyze(THD* thd, HA_CHECK_OPT *check_opt);
virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt); virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt);
virtual int preload_keys(THD *thd, HA_CHECK_OPT *check_opt); virtual int preload_keys(THD *thd, HA_CHECK_OPT *check_opt);
virtual bool check_and_repair(THD *thd); virtual bool check_and_repair(THD *thd);

View File

@ -63,7 +63,7 @@ const handlerton default_hton =
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL,
create_default, create_default,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
HTON_NO_FLAGS HTON_NO_FLAGS
}; };
@ -2160,7 +2160,8 @@ int ha_create_table(THD *thd, const char *path,
init_tmp_table_share(&share, db, 0, table_name, path); init_tmp_table_share(&share, db, 0, table_name, path);
if (open_table_def(thd, &share, 0) || if (open_table_def(thd, &share, 0) ||
open_table_from_share(thd, &share, "", 0, (uint) READ_ALL, 0, &table)) open_table_from_share(thd, &share, "", 0, (uint) READ_ALL, 0, &table,
TRUE))
goto err; goto err;
if (update_create_info) if (update_create_info)
@ -2237,7 +2238,7 @@ int ha_create_table_from_engine(THD* thd, const char *db, const char *name)
{ {
DBUG_RETURN(3); DBUG_RETURN(3);
} }
if (open_table_from_share(thd, &share, "" ,0, 0, 0, &table)) if (open_table_from_share(thd, &share, "" ,0, 0, 0, &table, FALSE))
{ {
free_table_share(&share); free_table_share(&share);
DBUG_RETURN(3); DBUG_RETURN(3);

View File

@ -99,6 +99,7 @@
#define HA_CAN_PARTITION (1 << 0) /* Partition support */ #define HA_CAN_PARTITION (1 << 0) /* Partition support */
#define HA_CAN_UPDATE_PARTITION_KEY (1 << 1) #define HA_CAN_UPDATE_PARTITION_KEY (1 << 1)
#define HA_CAN_PARTITION_UNIQUE (1 << 2) #define HA_CAN_PARTITION_UNIQUE (1 << 2)
#define HA_USE_AUTO_PARTITION (1 << 3)
/* bits in index_flags(index_number) for what you can do with index */ /* bits in index_flags(index_number) for what you can do with index */
@ -109,30 +110,58 @@
#define HA_ONLY_WHOLE_INDEX 16 /* Can't use part key searches */ #define HA_ONLY_WHOLE_INDEX 16 /* Can't use part key searches */
#define HA_KEYREAD_ONLY 64 /* Support HA_EXTRA_KEYREAD */ #define HA_KEYREAD_ONLY 64 /* Support HA_EXTRA_KEYREAD */
/* bits in alter_table_flags */ /*
#define HA_ONLINE_ADD_EMPTY_PARTITION 0x00000001 bits in alter_table_flags:
#define HA_ONLINE_DROP_PARTITION 0x00000002 */
/* /*
These bits are set if different kinds of indexes can be created These bits are set if different kinds of indexes can be created
off-line without re-create of the table (but with a table lock). off-line without re-create of the table (but with a table lock).
*/ */
#define HA_ONLINE_ADD_INDEX_NO_WRITES 0x00000004 /*add index w/lock*/ #define HA_ONLINE_ADD_INDEX_NO_WRITES (1L << 0) /*add index w/lock*/
#define HA_ONLINE_DROP_INDEX_NO_WRITES 0x00000008 /*drop index w/lock*/ #define HA_ONLINE_DROP_INDEX_NO_WRITES (1L << 1) /*drop index w/lock*/
#define HA_ONLINE_ADD_UNIQUE_INDEX_NO_WRITES 0x00000010 /*add unique w/lock*/ #define HA_ONLINE_ADD_UNIQUE_INDEX_NO_WRITES (1L << 2) /*add unique w/lock*/
#define HA_ONLINE_DROP_UNIQUE_INDEX_NO_WRITES 0x00000020 /*drop uniq. w/lock*/ #define HA_ONLINE_DROP_UNIQUE_INDEX_NO_WRITES (1L << 3) /*drop uniq. w/lock*/
#define HA_ONLINE_ADD_PK_INDEX_NO_WRITES 0x00000040 /*add prim. w/lock*/ #define HA_ONLINE_ADD_PK_INDEX_NO_WRITES (1L << 4) /*add prim. w/lock*/
#define HA_ONLINE_DROP_PK_INDEX_NO_WRITES 0x00000080 /*drop prim. w/lock*/ #define HA_ONLINE_DROP_PK_INDEX_NO_WRITES (1L << 5) /*drop prim. w/lock*/
/* /*
These are set if different kinds of indexes can be created on-line These are set if different kinds of indexes can be created on-line
(without a table lock). If a handler is capable of one or more of (without a table lock). If a handler is capable of one or more of
these, it should also set the corresponding *_NO_WRITES bit(s). these, it should also set the corresponding *_NO_WRITES bit(s).
*/ */
#define HA_ONLINE_ADD_INDEX 0x00000100 /*add index online*/ #define HA_ONLINE_ADD_INDEX (1L << 6) /*add index online*/
#define HA_ONLINE_DROP_INDEX 0x00000200 /*drop index online*/ #define HA_ONLINE_DROP_INDEX (1L << 7) /*drop index online*/
#define HA_ONLINE_ADD_UNIQUE_INDEX 0x00000400 /*add unique online*/ #define HA_ONLINE_ADD_UNIQUE_INDEX (1L << 8) /*add unique online*/
#define HA_ONLINE_DROP_UNIQUE_INDEX 0x00000800 /*drop uniq. online*/ #define HA_ONLINE_DROP_UNIQUE_INDEX (1L << 9) /*drop uniq. online*/
#define HA_ONLINE_ADD_PK_INDEX 0x00001000 /*add prim. online*/ #define HA_ONLINE_ADD_PK_INDEX (1L << 10)/*add prim. online*/
#define HA_ONLINE_DROP_PK_INDEX 0x00002000 /*drop prim. online*/ #define HA_ONLINE_DROP_PK_INDEX (1L << 11)/*drop prim. online*/
/*
HA_PARTITION_FUNCTION_SUPPORTED indicates that the function is
supported at all.
HA_FAST_CHANGE_PARTITION means that optimised variants of the changes
exists but they are not necessarily done online.
HA_ONLINE_DOUBLE_WRITE means that the handler supports writing to both
the new partition and to the old partitions when updating through the
old partitioning schema while performing a change of the partitioning.
This means that we can support updating of the table while performing
the copy phase of the change. For no lock at all also a double write
from new to old must exist and this is not required when this flag is
set.
This is actually removed even before it was introduced the first time.
The new idea is that handlers will handle the lock level already in
store_lock for ALTER TABLE partitions.
HA_PARTITION_ONE_PHASE is a flag that can be set by handlers that take
care of changing the partitions online and in one phase. Thus all phases
needed to handle the change are implemented inside the storage engine.
The storage engine must also support auto-discovery since the frm file
is changed as part of the change and this change must be controlled by
the storage engine. A typical engine to support this is NDB (through
WL #2498).
*/
#define HA_PARTITION_FUNCTION_SUPPORTED (1L << 12)
#define HA_FAST_CHANGE_PARTITION (1L << 13)
#define HA_PARTITION_ONE_PHASE (1L << 14)
/* /*
Index scan will not return records in rowid order. Not guaranteed to be Index scan will not return records in rowid order. Not guaranteed to be
@ -140,7 +169,6 @@
*/ */
#define HA_KEY_SCAN_NOT_ROR 128 #define HA_KEY_SCAN_NOT_ROR 128
/* operations for disable/enable indexes */ /* operations for disable/enable indexes */
#define HA_KEY_SWITCH_NONUNIQ 0 #define HA_KEY_SWITCH_NONUNIQ 0
#define HA_KEY_SWITCH_ALL 1 #define HA_KEY_SWITCH_ALL 1
@ -540,6 +568,8 @@ typedef struct
int (*start_consistent_snapshot)(THD *thd); int (*start_consistent_snapshot)(THD *thd);
bool (*flush_logs)(); bool (*flush_logs)();
bool (*show_status)(THD *thd, stat_print_fn *print, enum ha_stat_type stat); bool (*show_status)(THD *thd, stat_print_fn *print, enum ha_stat_type stat);
uint (*partition_flags)();
uint (*alter_table_flags)(uint flags);
int (*alter_tablespace)(THD *thd, st_alter_tablespace *ts_info); int (*alter_tablespace)(THD *thd, st_alter_tablespace *ts_info);
uint32 flags; /* global handler flags */ uint32 flags; /* global handler flags */
/* /*
@ -604,10 +634,12 @@ enum partition_state {
PART_NORMAL= 0, PART_NORMAL= 0,
PART_IS_DROPPED= 1, PART_IS_DROPPED= 1,
PART_TO_BE_DROPPED= 2, PART_TO_BE_DROPPED= 2,
PART_DROPPING= 3, PART_TO_BE_ADDED= 3,
PART_IS_ADDED= 4, PART_TO_BE_REORGED= 4,
PART_ADDING= 5, PART_REORGED_DROPPED= 5,
PART_ADDED= 6 PART_CHANGED= 6,
PART_IS_CHANGED= 7,
PART_IS_ADDED= 8
}; };
typedef struct { typedef struct {
@ -657,12 +689,12 @@ public:
typedef struct { typedef struct {
longlong list_value; longlong list_value;
uint partition_id; uint32 partition_id;
} LIST_PART_ENTRY; } LIST_PART_ENTRY;
class partition_info; class partition_info;
typedef bool (*get_part_id_func)(partition_info *part_info, typedef int (*get_part_id_func)(partition_info *part_info,
uint32 *part_id); uint32 *part_id);
typedef uint32 (*get_subpart_id_func)(partition_info *part_info); typedef uint32 (*get_subpart_id_func)(partition_info *part_info);
@ -732,6 +764,8 @@ public:
char *part_func_string; char *part_func_string;
char *subpart_func_string; char *subpart_func_string;
uchar *part_state;
partition_element *curr_part_elem; partition_element *curr_part_elem;
partition_element *current_partition; partition_element *current_partition;
/* /*
@ -748,12 +782,12 @@ public:
partition_type subpart_type; partition_type subpart_type;
uint part_info_len; uint part_info_len;
uint part_state_len;
uint part_func_len; uint part_func_len;
uint subpart_func_len; uint subpart_func_len;
uint no_parts; uint no_parts;
uint no_subparts; uint no_subparts;
uint count_curr_parts;
uint count_curr_subparts; uint count_curr_subparts;
uint part_error_code; uint part_error_code;
@ -764,14 +798,24 @@ public:
uint no_subpart_fields; uint no_subpart_fields;
uint no_full_part_fields; uint no_full_part_fields;
/*
This variable is used to calculate the partition id when using
LINEAR KEY/HASH. This functionality is kept in the MySQL Server
but mainly of use to handlers supporting partitioning.
*/
uint16 linear_hash_mask; uint16 linear_hash_mask;
bool use_default_partitions; bool use_default_partitions;
bool use_default_no_partitions;
bool use_default_subpartitions; bool use_default_subpartitions;
bool use_default_no_subpartitions;
bool default_partitions_setup;
bool defined_max_value; bool defined_max_value;
bool list_of_part_fields; bool list_of_part_fields;
bool list_of_subpart_fields; bool list_of_subpart_fields;
bool linear_hash_ind; bool linear_hash_ind;
bool fixed;
bool from_openfrm;
partition_info() partition_info()
: get_partition_id(NULL), get_part_partition_id(NULL), : get_partition_id(NULL), get_part_partition_id(NULL),
@ -782,19 +826,27 @@ public:
list_array(NULL), list_array(NULL),
part_info_string(NULL), part_info_string(NULL),
part_func_string(NULL), subpart_func_string(NULL), part_func_string(NULL), subpart_func_string(NULL),
part_state(NULL),
curr_part_elem(NULL), current_partition(NULL), curr_part_elem(NULL), current_partition(NULL),
default_engine_type(NULL), default_engine_type(NULL),
part_result_type(INT_RESULT), part_result_type(INT_RESULT),
part_type(NOT_A_PARTITION), subpart_type(NOT_A_PARTITION), part_type(NOT_A_PARTITION), subpart_type(NOT_A_PARTITION),
part_info_len(0), part_func_len(0), subpart_func_len(0), part_info_len(0), part_state_len(0),
part_func_len(0), subpart_func_len(0),
no_parts(0), no_subparts(0), no_parts(0), no_subparts(0),
count_curr_parts(0), count_curr_subparts(0), part_error_code(0), count_curr_subparts(0), part_error_code(0),
no_list_values(0), no_part_fields(0), no_subpart_fields(0), no_list_values(0), no_part_fields(0), no_subpart_fields(0),
no_full_part_fields(0), linear_hash_mask(0), no_full_part_fields(0), linear_hash_mask(0),
use_default_partitions(TRUE), use_default_partitions(TRUE),
use_default_subpartitions(TRUE), defined_max_value(FALSE), use_default_no_partitions(TRUE),
use_default_subpartitions(TRUE),
use_default_no_subpartitions(TRUE),
default_partitions_setup(FALSE),
defined_max_value(FALSE),
list_of_part_fields(FALSE), list_of_subpart_fields(FALSE), list_of_part_fields(FALSE), list_of_subpart_fields(FALSE),
linear_hash_ind(FALSE) linear_hash_ind(FALSE),
fixed(FALSE),
from_openfrm(FALSE)
{ {
all_fields_in_PF.clear_all(); all_fields_in_PF.clear_all();
all_fields_in_PPF.clear_all(); all_fields_in_PPF.clear_all();
@ -842,6 +894,8 @@ uint get_tot_partitions(partition_info *part_info)
return part_info->no_parts * return part_info->no_parts *
(is_sub_partitioned(part_info) ? part_info->no_subparts : 1); (is_sub_partitioned(part_info) ? part_info->no_subparts : 1);
} }
#endif #endif
typedef struct st_ha_create_information typedef struct st_ha_create_information
@ -891,8 +945,8 @@ typedef struct st_ha_check_opt
#ifdef WITH_PARTITION_STORAGE_ENGINE #ifdef WITH_PARTITION_STORAGE_ENGINE
bool is_partition_in_list(char *part_name, List<char> list_part_names); bool is_partition_in_list(char *part_name, List<char> list_part_names);
bool is_partitions_in_table(partition_info *new_part_info, char *are_partitions_in_table(partition_info *new_part_info,
partition_info *old_part_info); partition_info *old_part_info);
bool check_reorganise_list(partition_info *new_part_info, bool check_reorganise_list(partition_info *new_part_info,
partition_info *old_part_info, partition_info *old_part_info,
List<char> list_part_names); List<char> list_part_names);
@ -906,12 +960,13 @@ int get_parts_for_update(const byte *old_data, byte *new_data,
uint32 *old_part_id, uint32 *new_part_id); uint32 *old_part_id, uint32 *new_part_id);
int get_part_for_delete(const byte *buf, const byte *rec0, int get_part_for_delete(const byte *buf, const byte *rec0,
partition_info *part_info, uint32 *part_id); partition_info *part_info, uint32 *part_id);
bool check_partition_info(partition_info *part_info,handlerton *eng_type, bool check_partition_info(partition_info *part_info,handlerton **eng_type,
handler *file, ulonglong max_rows); handler *file, ulonglong max_rows);
bool fix_partition_func(THD *thd, const char *name, TABLE *table); bool fix_partition_func(THD *thd, const char *name, TABLE *table,
bool create_table_ind);
char *generate_partition_syntax(partition_info *part_info, char *generate_partition_syntax(partition_info *part_info,
uint *buf_length, bool use_sql_alloc, uint *buf_length, bool use_sql_alloc,
bool add_default_info); bool write_all);
bool partition_key_modified(TABLE *table, List<Item> &fields); bool partition_key_modified(TABLE *table, List<Item> &fields);
void get_partition_set(const TABLE *table, byte *buf, const uint index, void get_partition_set(const TABLE *table, byte *buf, const uint index,
const key_range *key_spec, const key_range *key_spec,
@ -921,7 +976,9 @@ void get_full_part_id_from_key(const TABLE *table, byte *buf,
const key_range *key_spec, const key_range *key_spec,
part_id_range *part_spec); part_id_range *part_spec);
bool mysql_unpack_partition(THD *thd, const uchar *part_buf, bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
uint part_info_len, TABLE *table, uint part_info_len,
uchar *part_state, uint part_state_len,
TABLE *table, bool is_create_table_ind,
handlerton *default_db_type); handlerton *default_db_type);
void make_used_partitions_str(partition_info *part_info, String *parts_str); void make_used_partitions_str(partition_info *part_info, String *parts_str);
uint32 get_list_array_idx_for_endpoint(partition_info *part_info, uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
@ -1480,11 +1537,16 @@ public:
virtual const char *table_type() const =0; virtual const char *table_type() const =0;
virtual const char **bas_ext() const =0; virtual const char **bas_ext() const =0;
virtual ulong table_flags(void) const =0; virtual ulong table_flags(void) const =0;
virtual ulong alter_table_flags(void) const { return 0; }
#ifdef WITH_PARTITION_STORAGE_ENGINE #ifdef WITH_PARTITION_STORAGE_ENGINE
virtual ulong partition_flags(void) const { return 0;}
virtual int get_default_no_partitions(ulonglong max_rows) { return 1;} virtual int get_default_no_partitions(ulonglong max_rows) { return 1;}
virtual void set_part_info(partition_info *part_info) { return; } virtual void set_auto_partitions(partition_info *part_info) { return; }
virtual bool get_no_parts(const char *name,
uint *no_parts)
{
*no_parts= 0;
return 0;
}
virtual void set_part_info(partition_info *part_info) {return;}
#endif #endif
virtual ulong index_flags(uint idx, uint part, bool all_parts) const =0; virtual ulong index_flags(uint idx, uint part, bool all_parts) const =0;
@ -1530,19 +1592,26 @@ public:
virtual int create(const char *name, TABLE *form, HA_CREATE_INFO *info)=0; virtual int create(const char *name, TABLE *form, HA_CREATE_INFO *info)=0;
virtual int create_handler_files(const char *name) { return FALSE;} virtual int create_handler_files(const char *name) { return FALSE;}
/* virtual int change_partitions(HA_CREATE_INFO *create_info,
SYNOPSIS const char *path,
drop_partitions() ulonglong *copied,
path Complete path of db and table name ulonglong *deleted,
RETURN VALUE const void *pack_frm_data,
TRUE Failure uint pack_frm_len)
FALSE Success { return HA_ERR_WRONG_COMMAND; }
DESCRIPTION
Drop a partition, during this operation no other activity is ongoing
in this server on the table.
*/
virtual int drop_partitions(const char *path) virtual int drop_partitions(const char *path)
{ return HA_ERR_WRONG_COMMAND; } { return HA_ERR_WRONG_COMMAND; }
virtual int rename_partitions(const char *path)
{ return HA_ERR_WRONG_COMMAND; }
virtual int optimize_partitions(THD *thd)
{ return HA_ERR_WRONG_COMMAND; }
virtual int analyze_partitions(THD *thd)
{ return HA_ERR_WRONG_COMMAND; }
virtual int check_partitions(THD *thd)
{ return HA_ERR_WRONG_COMMAND; }
virtual int repair_partitions(THD *thd)
{ return HA_ERR_WRONG_COMMAND; }
/* lock_count() can be more than one if the table is a MERGE */ /* lock_count() can be more than one if the table is a MERGE */
virtual uint lock_count(void) const { return 1; } virtual uint lock_count(void) const { return 1; }
virtual THR_LOCK_DATA **store_lock(THD *thd, virtual THR_LOCK_DATA **store_lock(THD *thd,

View File

@ -422,6 +422,7 @@ static SYMBOL symbols[] = {
{ "READ_WRITE", SYM(READ_WRITE_SYM)}, { "READ_WRITE", SYM(READ_WRITE_SYM)},
{ "READS", SYM(READS_SYM)}, { "READS", SYM(READS_SYM)},
{ "REAL", SYM(REAL)}, { "REAL", SYM(REAL)},
{ "REBUILD", SYM(REBUILD_SYM)},
{ "RECOVER", SYM(RECOVER_SYM)}, { "RECOVER", SYM(RECOVER_SYM)},
{ "REDO_BUFFER_SIZE", SYM(REDO_BUFFER_SIZE_SYM)}, { "REDO_BUFFER_SIZE", SYM(REDO_BUFFER_SIZE_SYM)},
{ "REDOFILE", SYM(REDOFILE_SYM)}, { "REDOFILE", SYM(REDOFILE_SYM)},
@ -434,7 +435,7 @@ static SYMBOL symbols[] = {
{ "RELEASE", SYM(RELEASE_SYM)}, { "RELEASE", SYM(RELEASE_SYM)},
{ "RELOAD", SYM(RELOAD)}, { "RELOAD", SYM(RELOAD)},
{ "RENAME", SYM(RENAME)}, { "RENAME", SYM(RENAME)},
{ "REORGANISE", SYM(REORGANISE_SYM)}, { "REORGANIZE", SYM(REORGANIZE_SYM)},
{ "REPAIR", SYM(REPAIR)}, { "REPAIR", SYM(REPAIR)},
{ "REPEATABLE", SYM(REPEATABLE_SYM)}, { "REPEATABLE", SYM(REPEATABLE_SYM)},
{ "REPLACE", SYM(REPLACE)}, { "REPLACE", SYM(REPLACE)},

View File

@ -351,9 +351,25 @@ void mysql_lock_remove(THD *thd, MYSQL_LOCK *locked,TABLE *table)
} }
} }
/* Downgrade all locks on a table to new WRITE level from WRITE_ONLY */
void mysql_lock_downgrade_write(THD *thd, TABLE *table,
thr_lock_type new_lock_type)
{
MYSQL_LOCK *locked;
TABLE *write_lock_used;
if ((locked = get_lock_data(thd,&table,1,1,&write_lock_used)))
{
for (uint i=0; i < locked->lock_count; i++)
thr_downgrade_write_lock(locked->locks[i], new_lock_type);
my_free((gptr) locked,MYF(0));
}
}
/* abort all other threads waiting to get lock in table */ /* abort all other threads waiting to get lock in table */
void mysql_lock_abort(THD *thd, TABLE *table) void mysql_lock_abort(THD *thd, TABLE *table, bool upgrade_lock)
{ {
MYSQL_LOCK *locked; MYSQL_LOCK *locked;
TABLE *write_lock_used; TABLE *write_lock_used;
@ -362,7 +378,7 @@ void mysql_lock_abort(THD *thd, TABLE *table)
if ((locked = get_lock_data(thd,&table,1,1,&write_lock_used))) if ((locked = get_lock_data(thd,&table,1,1,&write_lock_used)))
{ {
for (uint i=0; i < locked->lock_count; i++) for (uint i=0; i < locked->lock_count; i++)
thr_abort_locks(locked->locks[i]->lock); thr_abort_locks(locked->locks[i]->lock, upgrade_lock);
my_free((gptr) locked,MYF(0)); my_free((gptr) locked,MYF(0));
} }
DBUG_VOID_RETURN; DBUG_VOID_RETURN;

View File

@ -88,6 +88,8 @@ handlerton binlog_hton = {
NULL, /* Start Consistent Snapshot */ NULL, /* Start Consistent Snapshot */
NULL, /* Flush logs */ NULL, /* Flush logs */
NULL, /* Show status */ NULL, /* Show status */
NULL, /* Partition flags */
NULL, /* Alter table flags */
NULL, /* Alter Tablespace */ NULL, /* Alter Tablespace */
HTON_NOT_USER_SELECTABLE | HTON_HIDDEN HTON_NOT_USER_SELECTABLE | HTON_HIDDEN
}; };

View File

@ -595,6 +595,11 @@ struct Query_cache_query_flags
#define query_cache_invalidate_by_MyISAM_filename_ref NULL #define query_cache_invalidate_by_MyISAM_filename_ref NULL
#endif /*HAVE_QUERY_CACHE*/ #endif /*HAVE_QUERY_CACHE*/
uint build_table_path(char *buff, size_t bufflen, const char *db,
const char *table, const char *ext);
void write_bin_log(THD *thd, bool clear_error,
char const *query, ulong query_length);
bool mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create, bool silent); bool mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create, bool silent);
bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create); bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create);
bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent); bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent);
@ -1035,6 +1040,22 @@ void remove_db_from_cache(const char *db);
void flush_tables(); void flush_tables();
bool is_equal(const LEX_STRING *a, const LEX_STRING *b); bool is_equal(const LEX_STRING *a, const LEX_STRING *b);
#ifdef WITH_PARTITION_STORAGE_ENGINE
uint fast_alter_partition_table(THD *thd, TABLE *table,
ALTER_INFO *alter_info,
HA_CREATE_INFO *create_info,
TABLE_LIST *table_list,
List<create_field> *create_list,
List<Key> *key_list, const char *db,
const char *table_name,
uint fast_alter_partition);
uint prep_alter_part_table(THD *thd, TABLE *table, ALTER_INFO *alter_info,
HA_CREATE_INFO *create_info,
handlerton *old_db_type,
bool *partition_changed,
uint *fast_alter_partition);
#endif
/* bits for last argument to remove_table_from_cache() */ /* bits for last argument to remove_table_from_cache() */
#define RTFC_NO_FLAG 0x0000 #define RTFC_NO_FLAG 0x0000
#define RTFC_OWNED_BY_THD_FLAG 0x0001 #define RTFC_OWNED_BY_THD_FLAG 0x0001
@ -1043,6 +1064,40 @@ bool is_equal(const LEX_STRING *a, const LEX_STRING *b);
bool remove_table_from_cache(THD *thd, const char *db, const char *table, bool remove_table_from_cache(THD *thd, const char *db, const char *table,
uint flags); uint flags);
typedef struct st_lock_param_type
{
ulonglong copied;
ulonglong deleted;
THD *thd;
HA_CREATE_INFO *create_info;
List<create_field> *create_list;
List<create_field> new_create_list;
List<Key> *key_list;
List<Key> new_key_list;
TABLE *table;
KEY *key_info_buffer;
const char *db;
const char *table_name;
const void *pack_frm_data;
enum thr_lock_type old_lock_type;
uint key_count;
uint db_options;
uint pack_frm_len;
} ALTER_PARTITION_PARAM_TYPE;
void mem_alloc_error(size_t size);
int packfrm(const void *data, uint len,
const void **pack_data, uint *pack_len);
int unpackfrm(const void **unpack_data, uint *unpack_len,
const void *pack_data);
#define WFRM_INITIAL_WRITE 1
#define WFRM_CREATE_HANDLER_FILES 2
#define WFRM_PACK_FRM 4
bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags);
bool abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt);
void close_open_tables_and_downgrade(ALTER_PARTITION_PARAM_TYPE *lpt);
void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table);
bool close_cached_tables(THD *thd, bool wait_for_refresh, TABLE_LIST *tables, bool have_lock = FALSE); bool close_cached_tables(THD *thd, bool wait_for_refresh, TABLE_LIST *tables, bool have_lock = FALSE);
void copy_field_from_tmp_record(Field *field,int offset); void copy_field_from_tmp_record(Field *field,int offset);
bool fill_record(THD *thd, Field **field, List<Item> &values, bool fill_record(THD *thd, Field **field, List<Item> &values,
@ -1379,7 +1434,9 @@ void mysql_unlock_tables(THD *thd, MYSQL_LOCK *sql_lock);
void mysql_unlock_read_tables(THD *thd, MYSQL_LOCK *sql_lock); void mysql_unlock_read_tables(THD *thd, MYSQL_LOCK *sql_lock);
void mysql_unlock_some_tables(THD *thd, TABLE **table,uint count); void mysql_unlock_some_tables(THD *thd, TABLE **table,uint count);
void mysql_lock_remove(THD *thd, MYSQL_LOCK *locked,TABLE *table); void mysql_lock_remove(THD *thd, MYSQL_LOCK *locked,TABLE *table);
void mysql_lock_abort(THD *thd, TABLE *table); void mysql_lock_abort(THD *thd, TABLE *table, bool upgrade_lock);
void mysql_lock_downgrade_write(THD *thd, TABLE *table,
thr_lock_type new_lock_type);
bool mysql_lock_abort_for_thread(THD *thd, TABLE *table); bool mysql_lock_abort_for_thread(THD *thd, TABLE *table);
MYSQL_LOCK *mysql_lock_merge(MYSQL_LOCK *a,MYSQL_LOCK *b); MYSQL_LOCK *mysql_lock_merge(MYSQL_LOCK *a,MYSQL_LOCK *b);
TABLE_LIST *mysql_lock_have_duplicate(THD *thd, TABLE_LIST *needle, TABLE_LIST *mysql_lock_have_duplicate(THD *thd, TABLE_LIST *needle,
@ -1431,9 +1488,7 @@ int open_table_def(THD *thd, TABLE_SHARE *share, uint db_flags);
void open_table_error(TABLE_SHARE *share, int error, int db_errno, int errarg); void open_table_error(TABLE_SHARE *share, int error, int db_errno, int errarg);
int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias, int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
uint db_stat, uint prgflag, uint ha_open_flags, uint db_stat, uint prgflag, uint ha_open_flags,
TABLE *outparam); TABLE *outparam, bool is_create_table);
int openfrm(THD *thd, const char *name,const char *alias,uint filestat,
uint prgflag, uint ha_open_flags, TABLE *outparam);
int readfrm(const char *name, const void** data, uint* length); int readfrm(const char *name, const void** data, uint* length);
int writefrm(const char* name, const void* data, uint len); int writefrm(const char* name, const void* data, uint len);
int closefrm(TABLE *table, bool free_share); int closefrm(TABLE *table, bool free_share);

View File

@ -5601,13 +5601,13 @@ ER_SP_RECURSION_LIMIT
eng "Recursive limit %d (as set by the max_sp_recursion_depth variable) was exceeded for routine %.64s" eng "Recursive limit %d (as set by the max_sp_recursion_depth variable) was exceeded for routine %.64s"
ger "Rekursionsgrenze %d (durch Variable max_sp_recursion_depth gegeben) wurde für Routine %.64s überschritten" ger "Rekursionsgrenze %d (durch Variable max_sp_recursion_depth gegeben) wurde für Routine %.64s überschritten"
ER_SP_PROC_TABLE_CORRUPT ER_SP_PROC_TABLE_CORRUPT
eng "Failed to load routine %s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)" eng "Failed to load routine %-.64s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)"
ER_PARTITION_REQUIRES_VALUES_ERROR ER_PARTITION_REQUIRES_VALUES_ERROR
eng "%s PARTITIONING requires definition of VALUES %s for each partition" eng "%-.64s PARTITIONING requires definition of VALUES %-.64s for each partition"
swe "%s PARTITIONering kräver definition av VALUES %s för varje partition" swe "%-.64s PARTITIONering kräver definition av VALUES %-.64s för varje partition"
ER_PARTITION_WRONG_VALUES_ERROR ER_PARTITION_WRONG_VALUES_ERROR
eng "Only %s PARTITIONING can use VALUES %s in partition definition" eng "Only %-.64s PARTITIONING can use VALUES %-.64s in partition definition"
swe "Endast %s partitionering kan använda VALUES %s i definition av partitionen" swe "Endast %-.64s partitionering kan använda VALUES %-.64s i definition av partitionen"
ER_PARTITION_MAXVALUE_ERROR ER_PARTITION_MAXVALUE_ERROR
eng "MAXVALUE can only be used in last partition definition" eng "MAXVALUE can only be used in last partition definition"
swe "MAXVALUE kan bara användas i definitionen av den sista partitionen" swe "MAXVALUE kan bara användas i definitionen av den sista partitionen"
@ -5636,11 +5636,11 @@ ER_INCONSISTENT_PARTITION_INFO_ERROR
eng "The partition info in the frm file is not consistent with what can be written into the frm file" eng "The partition info in the frm file is not consistent with what can be written into the frm file"
swe "Partitioneringsinformationen i frm-filen är inte konsistent med vad som kan skrivas i frm-filen" swe "Partitioneringsinformationen i frm-filen är inte konsistent med vad som kan skrivas i frm-filen"
ER_PARTITION_FUNC_NOT_ALLOWED_ERROR ER_PARTITION_FUNC_NOT_ALLOWED_ERROR
eng "The %s function returns the wrong type" eng "The %-.64s function returns the wrong type"
swe "%s-funktionen returnerar felaktig typ" swe "%-.64s-funktionen returnerar felaktig typ"
ER_PARTITIONS_MUST_BE_DEFINED_ERROR ER_PARTITIONS_MUST_BE_DEFINED_ERROR
eng "For %s partitions each partition must be defined" eng "For %-.64s partitions each partition must be defined"
swe "För %s partitionering så måste varje partition definieras" swe "För %-.64s partitionering så måste varje partition definieras"
ER_RANGE_NOT_INCREASING_ERROR ER_RANGE_NOT_INCREASING_ERROR
eng "VALUES LESS THAN value must be strictly increasing for each partition" eng "VALUES LESS THAN value must be strictly increasing for each partition"
swe "Värden i VALUES LESS THAN måste vara strikt växande för varje partition" swe "Värden i VALUES LESS THAN måste vara strikt växande för varje partition"
@ -5657,8 +5657,8 @@ ER_MIX_HANDLER_ERROR
eng "The mix of handlers in the partitions is not allowed in this version of MySQL" eng "The mix of handlers in the partitions is not allowed in this version of MySQL"
swe "Denna mix av lagringsmotorer är inte tillåten i denna version av MySQL" swe "Denna mix av lagringsmotorer är inte tillåten i denna version av MySQL"
ER_PARTITION_NOT_DEFINED_ERROR ER_PARTITION_NOT_DEFINED_ERROR
eng "For the partitioned engine it is necessary to define all %s" eng "For the partitioned engine it is necessary to define all %-.64s"
swe "För partitioneringsmotorn så är det nödvändigt att definiera alla %s" swe "För partitioneringsmotorn så är det nödvändigt att definiera alla %-.64s"
ER_TOO_MANY_PARTITIONS_ERROR ER_TOO_MANY_PARTITIONS_ERROR
eng "Too many partitions were defined" eng "Too many partitions were defined"
swe "För många partitioner definierades" swe "För många partitioner definierades"
@ -5671,30 +5671,36 @@ ER_CANT_CREATE_HANDLER_FILE
ER_BLOB_FIELD_IN_PART_FUNC_ERROR ER_BLOB_FIELD_IN_PART_FUNC_ERROR
eng "A BLOB field is not allowed in partition function" eng "A BLOB field is not allowed in partition function"
swe "Ett BLOB-fält är inte tillåtet i partitioneringsfunktioner" swe "Ett BLOB-fält är inte tillåtet i partitioneringsfunktioner"
ER_CHAR_SET_IN_PART_FIELD_ERROR
eng "VARCHAR only allowed if binary collation for partition functions"
swe "VARCHAR endast tillåten med binär collation för partitioneringsfunktion"
ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF
eng "A %s need to include all fields in the partition function" eng "A %-.64s need to include all fields in the partition function"
swe "En %s behöver inkludera alla fält i partitioneringsfunktionen för denna lagringsmotor" swe "En %-.64s behöver inkludera alla fält i partitioneringsfunktionen för denna lagringsmotor"
ER_NO_PARTS_ERROR ER_NO_PARTS_ERROR
eng "Number of %s = 0 is not an allowed value" eng "Number of %-.64s = 0 is not an allowed value"
swe "Antal %s = 0 är inte ett tillåten värde" swe "Antal %-.64s = 0 är inte ett tillåten värde"
ER_PARTITION_MGMT_ON_NONPARTITIONED ER_PARTITION_MGMT_ON_NONPARTITIONED
eng "Partition management on a not partitioned table is not possible" eng "Partition management on a not partitioned table is not possible"
swe "Partitioneringskommando på en opartitionerad tabell är inte möjligt" swe "Partitioneringskommando på en opartitionerad tabell är inte möjligt"
ER_FOREIGN_KEY_ON_PARTITIONED
eng "Foreign key condition is not yet supported in conjunction with partitioning"
swe "Foreign key villkor är inte ännu implementerad i kombination med partitionering"
ER_DROP_PARTITION_NON_EXISTENT ER_DROP_PARTITION_NON_EXISTENT
eng "Error in list of partitions to change" eng "Error in list of partitions to %-.64s"
swe "Fel i listan av partitioner att förändra" swe "Fel i listan av partitioner att %-.64s"
ER_DROP_LAST_PARTITION ER_DROP_LAST_PARTITION
eng "Cannot remove all partitions, use DROP TABLE instead" eng "Cannot remove all partitions, use DROP TABLE instead"
swe "Det är inte tillåtet att ta bort alla partitioner, använd DROP TABLE istället" swe "Det är inte tillåtet att ta bort alla partitioner, använd DROP TABLE istället"
ER_COALESCE_ONLY_ON_HASH_PARTITION ER_COALESCE_ONLY_ON_HASH_PARTITION
eng "COALESCE PARTITION can only be used on HASH/KEY partitions" eng "COALESCE PARTITION can only be used on HASH/KEY partitions"
swe "COALESCE PARTITION kan bara användas på HASH/KEY partitioner" swe "COALESCE PARTITION kan bara användas på HASH/KEY partitioner"
ER_REORG_HASH_ONLY_ON_SAME_NO
eng "REORGANISE PARTITION can only be used to reorganise partitions not to change their numbers"
swe "REORGANISE PARTITION kan bara användas för att omorganisera partitioner, inte för att ändra deras antal"
ER_REORG_NO_PARAM_ERROR
eng "REORGANISE PARTITION without parameters can only be used on auto-partitioned tables using HASH PARTITIONs"
swe "REORGANISE PARTITION utan parametrar kan bara användas på auto-partitionerade tabeller som använder HASH partitionering"
ER_ONLY_ON_RANGE_LIST_PARTITION ER_ONLY_ON_RANGE_LIST_PARTITION
eng "%s PARTITION can only be used on RANGE/LIST partitions" eng "%-.64s PARTITION can only be used on RANGE/LIST partitions"
swe "%s PARTITION kan bara användas på RANGE/LIST-partitioner" swe "%-.64s PARTITION kan bara användas på RANGE/LIST-partitioner"
ER_ADD_PARTITION_SUBPART_ERROR ER_ADD_PARTITION_SUBPART_ERROR
eng "Trying to Add partition(s) with wrong number of subpartitions" eng "Trying to Add partition(s) with wrong number of subpartitions"
swe "ADD PARTITION med fel antal subpartitioner" swe "ADD PARTITION med fel antal subpartitioner"
@ -5708,19 +5714,25 @@ ER_REORG_PARTITION_NOT_EXIST
eng "More partitions to reorganise than there are partitions" eng "More partitions to reorganise than there are partitions"
swe "Fler partitioner att reorganisera än det finns partitioner" swe "Fler partitioner att reorganisera än det finns partitioner"
ER_SAME_NAME_PARTITION ER_SAME_NAME_PARTITION
eng "All partitions must have unique names in the table" eng "Duplicate partition name %-.64s"
swe "Alla partitioner i tabellen måste ha unika namn" swe "Duplicerat partitionsnamn %-.64s"
ER_NO_BINLOG_ERROR
eng "It is not allowed to shut off binlog on this command"
swe "Det är inte tillåtet att stänga av binlog på detta kommando"
ER_CONSECUTIVE_REORG_PARTITIONS ER_CONSECUTIVE_REORG_PARTITIONS
eng "When reorganising a set of partitions they must be in consecutive order" eng "When reorganising a set of partitions they must be in consecutive order"
swe "När ett antal partitioner omorganiseras måste de vara i konsekutiv ordning" swe "När ett antal partitioner omorganiseras måste de vara i konsekutiv ordning"
ER_REORG_OUTSIDE_RANGE ER_REORG_OUTSIDE_RANGE
eng "The new partitions cover a bigger range then the reorganised partitions do" eng "Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range"
swe "De nya partitionerna täcker ett större intervall än de omorganiserade partitionerna" swe "Reorganisering av rangepartitioner kan inte ändra den totala intervallet utom för den sista partitionen där intervallet kan utökas"
ER_DROP_PARTITION_FAILURE ER_PARTITION_FUNCTION_FAILURE
eng "Drop partition not supported in this version for this handler" eng "Partition function not supported in this version for this handler"
ER_DROP_PARTITION_WHEN_FK_DEFINED ER_PART_STATE_ERROR
eng "Cannot drop a partition when a foreign key constraint is defined on the table" eng "Partition state cannot be defined from CREATE/ALTER TABLE"
swe "Kan inte ta bort en partition när en främmande nyckel är definierad på tabellen" swe "Partition state kan inte definieras från CREATE/ALTER TABLE"
ER_LIMITED_PART_RANGE
eng "The %-.64s handler only supports 32 bit integers in VALUES"
swe "%-.64s stödjer endast 32 bitar i integers i VALUES"
ER_PLUGIN_IS_NOT_LOADED ER_PLUGIN_IS_NOT_LOADED
eng "Plugin '%-.64s' is not loaded" eng "Plugin '%-.64s' is not loaded"
ER_WRONG_VALUE ER_WRONG_VALUE

View File

@ -2218,7 +2218,7 @@ void close_old_data_files(THD *thd, TABLE *table, bool abort_locks,
{ {
if (abort_locks) if (abort_locks)
{ {
mysql_lock_abort(thd,table); // Close waiting threads mysql_lock_abort(thd,table, TRUE); // Close waiting threads
mysql_lock_remove(thd, thd->locked_tables,table); mysql_lock_remove(thd, thd->locked_tables,table);
table->locked_by_flush=1; // Will be reopened with locks table->locked_by_flush=1; // Will be reopened with locks
} }
@ -2361,7 +2361,7 @@ void abort_locked_tables(THD *thd,const char *db, const char *table_name)
if (!strcmp(table->s->table_name.str, table_name) && if (!strcmp(table->s->table_name.str, table_name) &&
!strcmp(table->s->db.str, db)) !strcmp(table->s->db.str, db))
{ {
mysql_lock_abort(thd,table); mysql_lock_abort(thd,table, TRUE);
break; break;
} }
} }
@ -2473,7 +2473,7 @@ retry:
HA_TRY_READ_ONLY), HA_TRY_READ_ONLY),
(READ_KEYINFO | COMPUTE_TYPES | (READ_KEYINFO | COMPUTE_TYPES |
EXTRA_RECORD), EXTRA_RECORD),
thd->open_options, entry))) thd->open_options, entry, FALSE)))
{ {
if (error == 7) // Table def changed if (error == 7) // Table def changed
{ {
@ -2537,7 +2537,7 @@ retry:
HA_TRY_READ_ONLY), HA_TRY_READ_ONLY),
READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD, READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD,
ha_open_options | HA_OPEN_FOR_REPAIR, ha_open_options | HA_OPEN_FOR_REPAIR,
entry) || ! entry->file || entry, FALSE) || ! entry->file ||
(entry->file->is_crashed() && entry->file->check_and_repair(thd))) (entry->file->is_crashed() && entry->file->check_and_repair(thd)))
{ {
/* Give right error message */ /* Give right error message */
@ -3366,7 +3366,7 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db,
HA_GET_INDEX), HA_GET_INDEX),
READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD, READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD,
ha_open_options, ha_open_options,
tmp_table)) tmp_table, FALSE))
{ {
/* No need to lock share->mutex as this is not needed for tmp tables */ /* No need to lock share->mutex as this is not needed for tmp tables */
free_table_share(share); free_table_share(share);
@ -6069,3 +6069,155 @@ bool is_equal(const LEX_STRING *a, const LEX_STRING *b)
{ {
return a->length == b->length && !strncmp(a->str, b->str, a->length); return a->length == b->length && !strncmp(a->str, b->str, a->length);
} }
/*
SYNOPSIS
abort_and_upgrade_lock()
lpt Parameter passing struct
All parameters passed through the ALTER_PARTITION_PARAM_TYPE object
RETURN VALUES
TRUE Failure
FALSE Success
DESCRIPTION
Remember old lock level (for possible downgrade later on), abort all
waiting threads and ensure that all keeping locks currently are
completed such that we own the lock exclusively and no other interaction
is ongoing.
thd Thread object
table Table object
db Database name
table_name Table name
old_lock_level Old lock level
*/
bool abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt)
{
uint flags= RTFC_WAIT_OTHER_THREAD_FLAG | RTFC_CHECK_KILLED_FLAG;
int error= FALSE;
DBUG_ENTER("abort_and_upgrade_locks");
lpt->old_lock_type= lpt->table->reginfo.lock_type;
VOID(pthread_mutex_lock(&LOCK_open));
mysql_lock_abort(lpt->thd, lpt->table, TRUE);
VOID(remove_table_from_cache(lpt->thd, lpt->db, lpt->table_name, flags));
if (lpt->thd->killed)
{
lpt->thd->no_warnings_for_error= 0;
error= TRUE;
}
VOID(pthread_mutex_unlock(&LOCK_open));
DBUG_RETURN(error);
}
/*
SYNOPSIS
close_open_tables_and_downgrade()
RESULT VALUES
NONE
DESCRIPTION
We need to ensure that any thread that has managed to open the table
but not yet encountered our lock on the table is also thrown out to
ensure that no threads see our frm changes premature to the final
version. The intermediate versions are only meant for use after a
crash and later REPAIR TABLE.
We also downgrade locks after the upgrade to WRITE_ONLY
*/
void close_open_tables_and_downgrade(ALTER_PARTITION_PARAM_TYPE *lpt)
{
VOID(pthread_mutex_lock(&LOCK_open));
remove_table_from_cache(lpt->thd, lpt->db, lpt->table_name,
RTFC_WAIT_OTHER_THREAD_FLAG);
VOID(pthread_mutex_unlock(&LOCK_open));
mysql_lock_downgrade_write(lpt->thd, lpt->table, lpt->old_lock_type);
}
/*
SYNOPSIS
mysql_wait_completed_table()
lpt Parameter passing struct
my_table My table object
All parameters passed through the ALTER_PARTITION_PARAM object
RETURN VALUES
TRUE Failure
FALSE Success
DESCRIPTION
We have changed the frm file and now we want to wait for all users of
the old frm to complete before proceeding to ensure that no one
remains that uses the old frm definition.
Start by ensuring that all users of the table will be removed from cache
once they are done. Then abort all that have stumbled on locks and
haven't been started yet.
thd Thread object
table Table object
db Database name
table_name Table name
*/
void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table)
{
char key[MAX_DBKEY_LENGTH];
uint key_length;
TABLE *table;
DBUG_ENTER("mysql_wait_completed_table");
key_length=(uint) (strmov(strmov(key,lpt->db)+1,lpt->table_name)-key)+1;
VOID(pthread_mutex_lock(&LOCK_open));
HASH_SEARCH_STATE state;
for (table= (TABLE*) hash_first(&open_cache,(byte*) key,key_length,
&state) ;
table;
table= (TABLE*) hash_next(&open_cache,(byte*) key,key_length,
&state))
{
THD *in_use= table->in_use;
table->s->version= 0L;
if (!in_use)
{
relink_unused(table);
}
else
{
/* Kill delayed insert threads */
if ((in_use->system_thread & SYSTEM_THREAD_DELAYED_INSERT) &&
! in_use->killed)
{
in_use->killed= THD::KILL_CONNECTION;
pthread_mutex_lock(&in_use->mysys_var->mutex);
if (in_use->mysys_var->current_cond)
{
pthread_mutex_lock(in_use->mysys_var->current_mutex);
pthread_cond_broadcast(in_use->mysys_var->current_cond);
pthread_mutex_unlock(in_use->mysys_var->current_mutex);
}
pthread_mutex_unlock(&in_use->mysys_var->mutex);
}
/*
Now we must abort all tables locks used by this thread
as the thread may be waiting to get a lock for another table
*/
for (TABLE *thd_table= in_use->open_tables;
thd_table ;
thd_table= thd_table->next)
{
if (thd_table->db_stat) // If table is open
mysql_lock_abort_for_thread(lpt->thd, thd_table);
}
}
}
/*
We start by removing all unused objects from the cache and marking
those in use for removal after completion. Now we also need to abort
all that are locked and are not progressing due to being locked
by our lock. We don't upgrade our lock here.
*/
mysql_lock_abort(lpt->thd, my_table, FALSE);
VOID(pthread_mutex_unlock(&LOCK_open));
DBUG_VOID_RETURN;
}

View File

@ -665,23 +665,31 @@ public:
}; };
typedef class st_select_lex SELECT_LEX; typedef class st_select_lex SELECT_LEX;
#define ALTER_ADD_COLUMN 1 #define ALTER_ADD_COLUMN (1L << 0)
#define ALTER_DROP_COLUMN 2 #define ALTER_DROP_COLUMN (1L << 1)
#define ALTER_CHANGE_COLUMN 4 #define ALTER_CHANGE_COLUMN (1L << 2)
#define ALTER_ADD_INDEX 8 #define ALTER_ADD_INDEX (1L << 3)
#define ALTER_DROP_INDEX 16 #define ALTER_DROP_INDEX (1L << 4)
#define ALTER_RENAME 32 #define ALTER_RENAME (1L << 5)
#define ALTER_ORDER 64 #define ALTER_ORDER (1L << 6)
#define ALTER_OPTIONS 128 #define ALTER_OPTIONS (1L << 7)
#define ALTER_CHANGE_COLUMN_DEFAULT 256 #define ALTER_CHANGE_COLUMN_DEFAULT (1L << 8)
#define ALTER_KEYS_ONOFF 512 #define ALTER_KEYS_ONOFF (1L << 9)
#define ALTER_CONVERT 1024 #define ALTER_CONVERT (1L << 10)
#define ALTER_FORCE 2048 #define ALTER_FORCE (1L << 11)
#define ALTER_RECREATE 4096 #define ALTER_RECREATE (1L << 12)
#define ALTER_ADD_PARTITION 8192 #define ALTER_ADD_PARTITION (1L << 13)
#define ALTER_DROP_PARTITION 16384 #define ALTER_DROP_PARTITION (1L << 14)
#define ALTER_COALESCE_PARTITION 32768 #define ALTER_COALESCE_PARTITION (1L << 15)
#define ALTER_REORGANISE_PARTITION 65536 #define ALTER_REORGANIZE_PARTITION (1L << 16)
#define ALTER_PARTITION (1L << 17)
#define ALTER_OPTIMIZE_PARTITION (1L << 18)
#define ALTER_TABLE_REORG (1L << 19)
#define ALTER_REBUILD_PARTITION (1L << 20)
#define ALTER_ALL_PARTITION (1L << 21)
#define ALTER_ANALYZE_PARTITION (1L << 22)
#define ALTER_CHECK_PARTITION (1L << 23)
#define ALTER_REPAIR_PARTITION (1L << 24)
typedef struct st_alter_info typedef struct st_alter_info
{ {

File diff suppressed because it is too large Load Diff

View File

@ -1243,8 +1243,8 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
char *part_syntax; char *part_syntax;
if (table->part_info && if (table->part_info &&
((part_syntax= generate_partition_syntax(table->part_info, ((part_syntax= generate_partition_syntax(table->part_info,
&part_syntax_len, &part_syntax_len,
FALSE,FALSE)))) FALSE,FALSE))))
{ {
packet->append(part_syntax, part_syntax_len); packet->append(part_syntax, part_syntax_len);
my_free(part_syntax, MYF(0)); my_free(part_syntax, MYF(0));

File diff suppressed because it is too large Load Diff

View File

@ -530,6 +530,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token READ_SYM %token READ_SYM
%token READ_WRITE_SYM %token READ_WRITE_SYM
%token REAL %token REAL
%token REBUILD_SYM
%token RECOVER_SYM %token RECOVER_SYM
%token REDO_BUFFER_SIZE_SYM %token REDO_BUFFER_SIZE_SYM
%token REDOFILE_SYM %token REDOFILE_SYM
@ -542,7 +543,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token RELEASE_SYM %token RELEASE_SYM
%token RELOAD %token RELOAD
%token RENAME %token RENAME
%token REORGANISE_SYM %token REORGANIZE_SYM
%token REPAIR %token REPAIR
%token REPEATABLE_SYM %token REPEATABLE_SYM
%token REPEAT_SYM %token REPEAT_SYM
@ -3331,9 +3332,13 @@ partitioning:
lex->part_info= new partition_info(); lex->part_info= new partition_info();
if (!lex->part_info) if (!lex->part_info)
{ {
my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_info)); mem_alloc_error(sizeof(partition_info));
YYABORT; YYABORT;
} }
if (lex->sql_command == SQLCOM_ALTER_TABLE)
{
lex->alter_info.flags|= ALTER_PARTITION;
}
} }
partition partition
; ;
@ -3342,24 +3347,15 @@ partition_entry:
PARTITION_SYM PARTITION_SYM
{ {
LEX *lex= Lex; LEX *lex= Lex;
if (lex->part_info) if (!lex->part_info)
{
/*
We enter here when opening the frm file to translate
partition info string into part_info data structure.
*/
lex->part_info= new partition_info();
if (!lex->part_info)
{
my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_info));
YYABORT;
}
}
else
{ {
yyerror(ER(ER_PARTITION_ENTRY_ERROR)); yyerror(ER(ER_PARTITION_ENTRY_ERROR));
YYABORT; YYABORT;
} }
/*
We enter here when opening the frm file to translate
partition info string into part_info data structure.
*/
} }
partition {} partition {}
; ;
@ -3393,14 +3389,23 @@ opt_linear:
; ;
part_field_list: part_field_list:
/* empty */ {}
| part_field_item_list {}
;
part_field_item_list:
part_field_item {} part_field_item {}
| part_field_list ',' part_field_item {} | part_field_item_list ',' part_field_item {}
; ;
part_field_item: part_field_item:
ident ident
{ {
Lex->part_info->part_field_list.push_back($1.str); if (Lex->part_info->part_field_list.push_back($1.str))
{
mem_alloc_error(1);
YYABORT;
}
} }
; ;
@ -3434,12 +3439,15 @@ opt_no_parts:
| PARTITIONS_SYM ulong_num | PARTITIONS_SYM ulong_num
{ {
uint no_parts= $2; uint no_parts= $2;
LEX *lex= Lex;
if (no_parts == 0) if (no_parts == 0)
{ {
my_error(ER_NO_PARTS_ERROR, MYF(0), "partitions"); my_error(ER_NO_PARTS_ERROR, MYF(0), "partitions");
YYABORT; YYABORT;
} }
Lex->part_info->no_parts= no_parts;
lex->part_info->no_parts= no_parts;
lex->part_info->use_default_no_partitions= FALSE;
} }
; ;
@ -3465,7 +3473,13 @@ sub_part_field_list:
sub_part_field_item: sub_part_field_item:
ident ident
{ Lex->part_info->subpart_field_list.push_back($1.str); } {
if (Lex->part_info->subpart_field_list.push_back($1.str))
{
mem_alloc_error(1);
YYABORT;
}
}
; ;
part_func_expr: part_func_expr:
@ -3489,12 +3503,14 @@ opt_no_subparts:
| SUBPARTITIONS_SYM ulong_num | SUBPARTITIONS_SYM ulong_num
{ {
uint no_parts= $2; uint no_parts= $2;
LEX *lex= Lex;
if (no_parts == 0) if (no_parts == 0)
{ {
my_error(ER_NO_PARTS_ERROR, MYF(0), "subpartitions"); my_error(ER_NO_PARTS_ERROR, MYF(0), "subpartitions");
YYABORT; YYABORT;
} }
Lex->part_info->no_subparts= no_parts; lex->part_info->no_subparts= no_parts;
lex->part_info->use_default_no_subpartitions= FALSE;
} }
; ;
@ -3505,21 +3521,21 @@ part_defs:
{ {
LEX *lex= Lex; LEX *lex= Lex;
partition_info *part_info= lex->part_info; partition_info *part_info= lex->part_info;
uint count_curr_parts= part_info->partitions.elements;
if (part_info->no_parts != 0) if (part_info->no_parts != 0)
{ {
if (part_info->no_parts != if (part_info->no_parts !=
part_info->count_curr_parts) count_curr_parts)
{ {
yyerror(ER(ER_PARTITION_WRONG_NO_PART_ERROR)); yyerror(ER(ER_PARTITION_WRONG_NO_PART_ERROR));
YYABORT; YYABORT;
} }
} }
else if (part_info->count_curr_parts > 0) else if (count_curr_parts > 0)
{ {
part_info->no_parts= part_info->count_curr_parts; part_info->no_parts= count_curr_parts;
} }
part_info->count_curr_subparts= 0; part_info->count_curr_subparts= 0;
part_info->count_curr_parts= 0;
} }
; ;
@ -3534,17 +3550,79 @@ part_definition:
LEX *lex= Lex; LEX *lex= Lex;
partition_info *part_info= lex->part_info; partition_info *part_info= lex->part_info;
partition_element *p_elem= new partition_element(); partition_element *p_elem= new partition_element();
if (!p_elem) uint part_id= part_info->partitions.elements +
part_info->temp_partitions.elements;
enum partition_state part_state;
if (part_info->part_state)
part_state= (enum partition_state)part_info->part_state[part_id];
else
part_state= PART_NORMAL;
switch (part_state)
{ {
my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_element)); case PART_TO_BE_DROPPED:
YYABORT; /*
This part is currently removed so we keep it in a
temporary list for REPAIR TABLE to be able to handle
failures during drop partition process.
*/
case PART_TO_BE_ADDED:
/*
This part is currently being added so we keep it in a
temporary list for REPAIR TABLE to be able to handle
failures during add partition process.
*/
if (!p_elem || part_info->temp_partitions.push_back(p_elem))
{
mem_alloc_error(sizeof(partition_element));
YYABORT;
}
break;
case PART_IS_ADDED:
/*
Part has been added and is now a normal partition
*/
case PART_TO_BE_REORGED:
/*
This part is currently reorganised, it is still however
used so we keep it in the list of partitions. We do
however need the state to be able to handle REPAIR TABLE
after failures in the reorganisation process.
*/
case PART_REORGED_DROPPED:
/*
This part is currently reorganised as part of a
COALESCE PARTITION and it will be dropped without a new
replacement partition after completing the reorganisation.
*/
case PART_CHANGED:
/*
This part is currently split or merged as part of ADD
PARTITION for a hash partition or as part of COALESCE
PARTITION for a hash partitioned table.
*/
case PART_IS_CHANGED:
/*
This part has been split or merged as part of ADD
PARTITION for a hash partition or as part of COALESCE
PARTITION for a hash partitioned table.
*/
case PART_NORMAL:
if (!p_elem || part_info->partitions.push_back(p_elem))
{
mem_alloc_error(sizeof(partition_element));
YYABORT;
}
break;
default:
mem_alloc_error((part_id * 1000) + part_state);
YYABORT;
} }
p_elem->part_state= part_state;
part_info->curr_part_elem= p_elem; part_info->curr_part_elem= p_elem;
part_info->current_partition= p_elem; part_info->current_partition= p_elem;
part_info->use_default_partitions= FALSE; part_info->use_default_partitions= FALSE;
part_info->partitions.push_back(p_elem); part_info->use_default_no_partitions= FALSE;
p_elem->engine_type= NULL;
part_info->count_curr_parts++;
} }
part_name {} part_name {}
opt_part_values {} opt_part_values {}
@ -3554,7 +3632,12 @@ part_definition:
part_name: part_name:
ident_or_text ident_or_text
{ Lex->part_info->curr_part_elem->partition_name= $1.str; } {
LEX *lex= Lex;
partition_info *part_info= lex->part_info;
partition_element *p_elem= part_info->curr_part_elem;
p_elem->partition_name= $1.str;
}
; ;
opt_part_values: opt_part_values:
@ -3643,13 +3726,13 @@ part_list_item:
part_bit_expr part_bit_expr
{ {
longlong *value_ptr; longlong *value_ptr;
if (!(value_ptr= (longlong*)sql_alloc(sizeof(longlong)))) if (!(value_ptr= (longlong*)sql_alloc(sizeof(longlong))) ||
((*value_ptr= $1, FALSE) ||
Lex->part_info->curr_part_elem->list_val_list.push_back(value_ptr)))
{ {
my_error(ER_OUTOFMEMORY, MYF(0), sizeof(longlong)); mem_alloc_error(sizeof(longlong));
YYABORT; YYABORT;
} }
*value_ptr= $1;
Lex->part_info->curr_part_elem->list_val_list.push_back(value_ptr);
} }
; ;
@ -3659,20 +3742,23 @@ part_bit_expr:
Item *part_expr= $1; Item *part_expr= $1;
bool not_corr_func; bool not_corr_func;
LEX *lex= Lex; LEX *lex= Lex;
THD *thd= YYTHD;
longlong item_value; longlong item_value;
Name_resolution_context *context= &lex->current_select->context; Name_resolution_context *context= &lex->current_select->context;
TABLE_LIST *save_list= context->table_list; TABLE_LIST *save_list= context->table_list;
const char *save_where= thd->where;
context->table_list= 0; context->table_list= 0;
part_expr->fix_fields(YYTHD, (Item**)0); thd->where= "partition function";
context->table_list= save_list; if (part_expr->fix_fields(YYTHD, (Item**)0) ||
not_corr_func= !part_expr->const_item() || ((context->table_list= save_list), FALSE) ||
!lex->safe_to_cache_query; (!part_expr->const_item()) ||
if (not_corr_func) (!lex->safe_to_cache_query))
{ {
yyerror(ER(ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR)); yyerror(ER(ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR));
YYABORT; YYABORT;
} }
thd->where= save_where;
if (part_expr->result_type() != INT_RESULT) if (part_expr->result_type() != INT_RESULT)
{ {
yyerror(ER(ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR)); yyerror(ER(ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR));
@ -3717,16 +3803,16 @@ sub_part_definition:
LEX *lex= Lex; LEX *lex= Lex;
partition_info *part_info= lex->part_info; partition_info *part_info= lex->part_info;
partition_element *p_elem= new partition_element(); partition_element *p_elem= new partition_element();
if (!p_elem) if (!p_elem ||
part_info->current_partition->subpartitions.push_back(p_elem))
{ {
my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_element)); mem_alloc_error(sizeof(partition_element));
YYABORT; YYABORT;
} }
part_info->curr_part_elem= p_elem; part_info->curr_part_elem= p_elem;
part_info->current_partition->subpartitions.push_back(p_elem);
part_info->use_default_subpartitions= FALSE; part_info->use_default_subpartitions= FALSE;
part_info->use_default_no_subpartitions= FALSE;
part_info->count_curr_subparts++; part_info->count_curr_subparts++;
p_elem->engine_type= NULL;
} }
sub_name opt_part_options {} sub_name opt_part_options {}
; ;
@ -4794,7 +4880,7 @@ alter_commands:
| DISCARD TABLESPACE { Lex->alter_info.tablespace_op= DISCARD_TABLESPACE; } | DISCARD TABLESPACE { Lex->alter_info.tablespace_op= DISCARD_TABLESPACE; }
| IMPORT TABLESPACE { Lex->alter_info.tablespace_op= IMPORT_TABLESPACE; } | IMPORT TABLESPACE { Lex->alter_info.tablespace_op= IMPORT_TABLESPACE; }
| alter_list | alter_list
opt_partitioning opt_partitioning
| partitioning | partitioning
/* /*
This part was added for release 5.1 by Mikael Ronström. This part was added for release 5.1 by Mikael Ronström.
@ -4809,26 +4895,77 @@ alter_commands:
{ {
Lex->alter_info.flags|= ALTER_DROP_PARTITION; Lex->alter_info.flags|= ALTER_DROP_PARTITION;
} }
| COALESCE PARTITION_SYM ulong_num | REBUILD_SYM PARTITION_SYM opt_no_write_to_binlog
all_or_alt_part_name_list
{
LEX *lex= Lex;
lex->alter_info.flags|= ALTER_REBUILD_PARTITION;
lex->no_write_to_binlog= $3;
}
| OPTIMIZE PARTITION_SYM opt_no_write_to_binlog
all_or_alt_part_name_list
{
LEX *lex= Lex;
lex->alter_info.flags|= ALTER_OPTIMIZE_PARTITION;
lex->no_write_to_binlog= $3;
lex->check_opt.init();
}
opt_no_write_to_binlog opt_mi_check_type
| ANALYZE_SYM PARTITION_SYM opt_no_write_to_binlog
all_or_alt_part_name_list
{
LEX *lex= Lex;
lex->alter_info.flags|= ALTER_ANALYZE_PARTITION;
lex->no_write_to_binlog= $3;
lex->check_opt.init();
}
opt_mi_check_type
| CHECK_SYM PARTITION_SYM all_or_alt_part_name_list
{
LEX *lex= Lex;
lex->alter_info.flags|= ALTER_CHECK_PARTITION;
lex->check_opt.init();
}
opt_mi_check_type
| REPAIR PARTITION_SYM opt_no_write_to_binlog
all_or_alt_part_name_list
{
LEX *lex= Lex;
lex->alter_info.flags|= ALTER_REPAIR_PARTITION;
lex->no_write_to_binlog= $3;
lex->check_opt.init();
}
opt_mi_repair_type
| COALESCE PARTITION_SYM opt_no_write_to_binlog ulong_num
{ {
LEX *lex= Lex; LEX *lex= Lex;
lex->alter_info.flags|= ALTER_COALESCE_PARTITION; lex->alter_info.flags|= ALTER_COALESCE_PARTITION;
lex->alter_info.no_parts= $3; lex->no_write_to_binlog= $3;
lex->alter_info.no_parts= $4;
} }
| reorg_partition_rule | reorg_partition_rule
; ;
all_or_alt_part_name_list:
| ALL
{
Lex->alter_info.flags|= ALTER_ALL_PARTITION;
}
| alt_part_name_list
;
add_partition_rule: add_partition_rule:
ADD PARTITION_SYM ADD PARTITION_SYM opt_no_write_to_binlog
{ {
LEX *lex= Lex; LEX *lex= Lex;
lex->part_info= new partition_info(); lex->part_info= new partition_info();
if (!lex->part_info) if (!lex->part_info)
{ {
my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_info)); mem_alloc_error(sizeof(partition_info));
YYABORT; YYABORT;
} }
lex->alter_info.flags|= ALTER_ADD_PARTITION; lex->alter_info.flags|= ALTER_ADD_PARTITION;
lex->no_write_to_binlog= $3;
} }
add_part_extra add_part_extra
{} {}
@ -4838,7 +4975,7 @@ add_part_extra:
| '(' part_def_list ')' | '(' part_def_list ')'
{ {
LEX *lex= Lex; LEX *lex= Lex;
lex->part_info->no_parts= lex->part_info->count_curr_parts; lex->part_info->no_parts= lex->part_info->partitions.elements;
} }
| PARTITIONS_SYM ulong_num | PARTITIONS_SYM ulong_num
{ {
@ -4848,21 +4985,34 @@ add_part_extra:
; ;
reorg_partition_rule: reorg_partition_rule:
REORGANISE_SYM PARTITION_SYM REORGANIZE_SYM PARTITION_SYM opt_no_write_to_binlog
{ {
LEX *lex= Lex; LEX *lex= Lex;
lex->part_info= new partition_info(); lex->part_info= new partition_info();
if (!lex->part_info) if (!lex->part_info)
{ {
my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_info)); mem_alloc_error(sizeof(partition_info));
YYABORT; YYABORT;
} }
lex->alter_info.flags|= ALTER_REORGANISE_PARTITION; lex->no_write_to_binlog= $3;
} }
alt_part_name_list INTO '(' part_def_list ')' reorg_parts_rule
;
reorg_parts_rule:
/* empty */
{
Lex->alter_info.flags|= ALTER_TABLE_REORG;
}
|
alt_part_name_list
{
Lex->alter_info.flags|= ALTER_REORGANIZE_PARTITION;
}
INTO '(' part_def_list ')'
{ {
LEX *lex= Lex; LEX *lex= Lex;
lex->part_info->no_parts= lex->part_info->count_curr_parts; lex->part_info->no_parts= lex->part_info->partitions.elements;
} }
; ;
@ -4874,7 +5024,11 @@ alt_part_name_list:
alt_part_name_item: alt_part_name_item:
ident ident
{ {
Lex->alter_info.partition_names.push_back($1.str); if (Lex->alter_info.partition_names.push_back($1.str))
{
mem_alloc_error(1);
YYABORT;
}
} }
; ;
@ -9262,6 +9416,7 @@ keyword_sp:
| RAID_CHUNKSIZE {} | RAID_CHUNKSIZE {}
| RAID_STRIPED_SYM {} | RAID_STRIPED_SYM {}
| RAID_TYPE {} | RAID_TYPE {}
| REBUILD_SYM {}
| RECOVER_SYM {} | RECOVER_SYM {}
| REDO_BUFFER_SIZE_SYM {} | REDO_BUFFER_SIZE_SYM {}
| REDOFILE_SYM {} | REDOFILE_SYM {}
@ -9269,7 +9424,7 @@ keyword_sp:
| RELAY_LOG_FILE_SYM {} | RELAY_LOG_FILE_SYM {}
| RELAY_LOG_POS_SYM {} | RELAY_LOG_POS_SYM {}
| RELOAD {} | RELOAD {}
| REORGANISE_SYM {} | REORGANIZE_SYM {}
| REPEATABLE_SYM {} | REPEATABLE_SYM {}
| REPLICATION {} | REPLICATION {}
| RESOURCES {} | RESOURCES {}

View File

@ -388,6 +388,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
#ifdef WITH_PARTITION_STORAGE_ENGINE #ifdef WITH_PARTITION_STORAGE_ENGINE
share->default_part_db_type= share->default_part_db_type=
ha_checktype(thd, (enum legacy_db_type) (uint) *(head+61), 0, 0); ha_checktype(thd, (enum legacy_db_type) (uint) *(head+61), 0, 0);
DBUG_PRINT("info", ("default_part_db_type = %u", head[61]));
#endif #endif
legacy_db_type= (enum legacy_db_type) (uint) *(head+3); legacy_db_type= (enum legacy_db_type) (uint) *(head+3);
share->db_type= ha_checktype(thd, legacy_db_type, 0, 0); share->db_type= ha_checktype(thd, legacy_db_type, 0, 0);
@ -525,7 +526,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
((uint2korr(head+14) == 0xffff ? ((uint2korr(head+14) == 0xffff ?
uint4korr(head+47) : uint2korr(head+14)))); uint4korr(head+47) : uint2korr(head+14))));
if ((n_length= uint2korr(head+55))) if ((n_length= uint4korr(head+55)))
{ {
/* Read extra data segment */ /* Read extra data segment */
char *buff, *next_chunk, *buff_end; char *buff, *next_chunk, *buff_end;
@ -599,6 +600,38 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
#endif #endif
next_chunk+= 5 + partition_info_len; next_chunk+= 5 + partition_info_len;
} }
if (share->mysql_version > 50105 && next_chunk + 5 < buff_end)
{
/*
Partition state was introduced to support partition management in version 5.1.5
*/
uint32 part_state_len= uint4korr(next_chunk);
#ifdef WITH_PARTITION_STORAGE_ENGINE
if ((share->part_state_len= part_state_len))
if (!(share->part_state=
(uchar*) memdup_root(&share->mem_root, next_chunk + 4,
part_state_len)))
{
my_free(buff, MYF(0));
goto err;
}
#else
if (part_state_len)
{
DBUG_PRINT("info", ("WITH_PARTITION_STORAGE_ENGINE is not defined"));
my_free(buff, MYF(0));
goto err;
}
#endif
next_chunk+= 4 + part_state_len;
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
else
{
share->part_state_len= 0;
share->part_state= NULL;
}
#endif
keyinfo= share->key_info; keyinfo= share->key_info;
for (i= 0; i < keys; i++, keyinfo++) for (i= 0; i < keys; i++, keyinfo++)
{ {
@ -1223,7 +1256,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias, int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
uint db_stat, uint prgflag, uint ha_open_flags, uint db_stat, uint prgflag, uint ha_open_flags,
TABLE *outparam) TABLE *outparam, bool is_create_table)
{ {
int error; int error;
uint records, i; uint records, i;
@ -1379,13 +1412,17 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
{ {
if (mysql_unpack_partition(thd, share->partition_info, if (mysql_unpack_partition(thd, share->partition_info,
share->partition_info_len, share->partition_info_len,
outparam, share->default_part_db_type)) (uchar*)share->part_state,
share->part_state_len,
outparam, is_create_table,
share->default_part_db_type))
goto err; goto err;
/* /*
Fix the partition functions and ensure they are not constant Fix the partition functions and ensure they are not constant
functions functions
*/ */
if (fix_partition_func(thd, share->normalized_path.str, outparam)) if (fix_partition_func(thd, share->normalized_path.str, outparam,
is_create_table))
goto err; goto err;
} }
#endif #endif
@ -1503,6 +1540,7 @@ int closefrm(register TABLE *table, bool free_share)
if (table->part_info) if (table->part_info)
{ {
free_items(table->part_info->item_free_list); free_items(table->part_info->item_free_list);
table->part_info->item_free_list= 0;
table->part_info= 0; table->part_info= 0;
} }
#endif #endif
@ -1985,7 +2023,7 @@ File create_frm(THD *thd, const char *name, const char *db,
int4store(fileinfo+47, key_length); int4store(fileinfo+47, key_length);
tmp= MYSQL_VERSION_ID; // Store to avoid warning from int4store tmp= MYSQL_VERSION_ID; // Store to avoid warning from int4store
int4store(fileinfo+51, tmp); int4store(fileinfo+51, tmp);
int2store(fileinfo+55, create_info->extra_size); int4store(fileinfo+55, create_info->extra_size);
bzero(fill,IO_SIZE); bzero(fill,IO_SIZE);
for (; length > IO_SIZE ; length-= IO_SIZE) for (; length > IO_SIZE ; length-= IO_SIZE)
{ {

View File

@ -201,6 +201,8 @@ typedef struct st_table_share
#ifdef WITH_PARTITION_STORAGE_ENGINE #ifdef WITH_PARTITION_STORAGE_ENGINE
const uchar *partition_info; const uchar *partition_info;
uint partition_info_len; uint partition_info_len;
const uchar *part_state;
uint part_state_len;
handlerton *default_part_db_type; handlerton *default_part_db_type;
#endif #endif
} TABLE_SHARE; } TABLE_SHARE;

View File

@ -89,9 +89,6 @@ bool mysql_create_frm(THD *thd, const char *file_name,
partition_info *part_info= thd->lex->part_info; partition_info *part_info= thd->lex->part_info;
#endif #endif
DBUG_ENTER("mysql_create_frm"); DBUG_ENTER("mysql_create_frm");
#ifdef WITH_PARTITION_STORAGE_ENGINE
thd->lex->part_info= NULL;
#endif
DBUG_ASSERT(*fn_rext((char*)file_name)); // Check .frm extension DBUG_ASSERT(*fn_rext((char*)file_name)); // Check .frm extension
formnames.type_names=0; formnames.type_names=0;
@ -134,10 +131,13 @@ bool mysql_create_frm(THD *thd, const char *file_name,
create_info->extra_size= (2 + str_db_type.length + create_info->extra_size= (2 + str_db_type.length +
2 + create_info->connect_string.length); 2 + create_info->connect_string.length);
/* Partition */ /* Partition */
create_info->extra_size+= 5; create_info->extra_size+= 9;
#ifdef WITH_PARTITION_STORAGE_ENGINE #ifdef WITH_PARTITION_STORAGE_ENGINE
if (part_info) if (part_info)
{
create_info->extra_size+= part_info->part_info_len; create_info->extra_size+= part_info->part_info_len;
create_info->extra_size+= part_info->part_state_len;
}
#endif #endif
for (i= 0; i < keys; i++) for (i= 0; i < keys; i++)
@ -171,7 +171,10 @@ bool mysql_create_frm(THD *thd, const char *file_name,
#ifdef WITH_PARTITION_STORAGE_ENGINE #ifdef WITH_PARTITION_STORAGE_ENGINE
if (part_info) if (part_info)
{
fileinfo[61]= (uchar) ha_legacy_type(part_info->default_engine_type); fileinfo[61]= (uchar) ha_legacy_type(part_info->default_engine_type);
DBUG_PRINT("info", ("part_db_type = %d", fileinfo[61]));
}
#endif #endif
int2store(fileinfo+59,db_file->extra_rec_buf_length()); int2store(fileinfo+59,db_file->extra_rec_buf_length());
if (my_pwrite(file,(byte*) fileinfo,64,0L,MYF_RW) || if (my_pwrite(file,(byte*) fileinfo,64,0L,MYF_RW) ||
@ -206,12 +209,18 @@ bool mysql_create_frm(THD *thd, const char *file_name,
my_write(file, (const byte*)part_info->part_info_string, my_write(file, (const byte*)part_info->part_info_string,
part_info->part_info_len + 1, MYF_RW)) part_info->part_info_len + 1, MYF_RW))
goto err; goto err;
DBUG_PRINT("info", ("Part state len = %d", part_info->part_state_len));
int4store(buff, part_info->part_state_len);
if (my_write(file, (const byte*)buff, 4, MYF_RW) ||
my_write(file, (const byte*)part_info->part_state,
part_info->part_state_len, MYF_RW))
goto err;
} }
else else
#endif #endif
{ {
bzero(buff, 5); bzero(buff, 9);
if (my_write(file, (byte*) buff, 5, MYF_RW)) if (my_write(file, (byte*) buff, 9, MYF_RW))
goto err; goto err;
} }
for (i= 0; i < keys; i++) for (i= 0; i < keys; i++)

View File

@ -88,6 +88,8 @@ handlerton tina_hton= {
NULL, /* Start Consistent Snapshot */ NULL, /* Start Consistent Snapshot */
NULL, /* Flush logs */ NULL, /* Flush logs */
NULL, /* Show status */ NULL, /* Show status */
NULL, /* Partition flags */
NULL, /* Alter table flags */
NULL, /* Alter Tablespace */ NULL, /* Alter Tablespace */
HTON_CAN_RECREATE HTON_CAN_RECREATE
}; };

View File

@ -103,6 +103,8 @@ handlerton example_hton= {
NULL, /* Start Consistent Snapshot */ NULL, /* Start Consistent Snapshot */
NULL, /* Flush logs */ NULL, /* Flush logs */
NULL, /* Show status */ NULL, /* Show status */
NULL, /* Partition flags */
NULL, /* Alter table flags */
NULL, /* Alter tablespace */ NULL, /* Alter tablespace */
HTON_CAN_RECREATE HTON_CAN_RECREATE
}; };

View File

@ -64,6 +64,7 @@
#define MAX_NULL_BITS 4096 #define MAX_NULL_BITS 4096
#define MAX_FRAGMENT_DATA_BYTES (4+(2 * 8 * MAX_REPLICAS * MAX_NDB_NODES)) #define MAX_FRAGMENT_DATA_BYTES (4+(2 * 8 * MAX_REPLICAS * MAX_NDB_NODES))
#define MAX_NDB_PARTITIONS 1024 #define MAX_NDB_PARTITIONS 1024
#define MAX_RANGE_DATA (131072+MAX_NDB_PARTITIONS) //0.5 MByte of list data
#define MIN_ATTRBUF ((MAX_ATTRIBUTES_IN_TABLE/24) + 1) #define MIN_ATTRBUF ((MAX_ATTRIBUTES_IN_TABLE/24) + 1)
/* /*

View File

@ -63,6 +63,10 @@ private:
/* /*
n = Changed name n = Changed name
f = Changed frm f = Changed frm
d = Changed fragment data
r = Changed range or list array
t = Changed tablespace name array
s = Changed tablespace id array
1111111111222222222233 1111111111222222222233
01234567890123456789012345678901 01234567890123456789012345678901
@ -70,6 +74,10 @@ private:
*/ */
#define NAME_SHIFT (0) #define NAME_SHIFT (0)
#define FRM_SHIFT (1) #define FRM_SHIFT (1)
#define FRAG_DATA_SHIFT (2)
#define RANGE_LIST_SHIFT (3)
#define TS_NAME_SHIFT (4)
#define TS_SHIFT (5)
/** /**
* Getters and setters * Getters and setters
@ -78,8 +86,28 @@ private:
static void setNameFlag(UintR & changeMask, Uint32 nameFlg); static void setNameFlag(UintR & changeMask, Uint32 nameFlg);
static Uint8 getFrmFlag(const UintR & changeMask); static Uint8 getFrmFlag(const UintR & changeMask);
static void setFrmFlag(UintR & changeMask, Uint32 frmFlg); static void setFrmFlag(UintR & changeMask, Uint32 frmFlg);
static Uint8 getFragDataFlag(const UintR & changeMask);
static void setFragDataFlag(UintR & changeMask, Uint32 fragFlg);
static Uint8 getRangeListFlag(const UintR & changeMask);
static void setRangeListFlag(UintR & changeMask, Uint32 rangeFlg);
static Uint8 getTsNameFlag(const UintR & changeMask);
static void setTsNameFlag(UintR & changeMask, Uint32 tsNameFlg);
static Uint8 getTsFlag(const UintR & changeMask);
static void setTsFlag(UintR & changeMask, Uint32 tsFlg);
}; };
inline
Uint8
AlterTableReq::getTsFlag(const UintR & changeMask){
return (Uint8)((changeMask >> TS_SHIFT) & 1);
}
inline
void
AlterTableReq::setTsFlag(UintR & changeMask, Uint32 tsFlg){
changeMask |= (tsFlg << TS_SHIFT);
}
inline inline
Uint8 Uint8
AlterTableReq::getNameFlag(const UintR & changeMask){ AlterTableReq::getNameFlag(const UintR & changeMask){
@ -104,6 +132,42 @@ AlterTableReq::setFrmFlag(UintR & changeMask, Uint32 frmFlg){
changeMask |= (frmFlg << FRM_SHIFT); changeMask |= (frmFlg << FRM_SHIFT);
} }
inline
Uint8
AlterTableReq::getFragDataFlag(const UintR & changeMask){
return (Uint8)((changeMask >> FRAG_DATA_SHIFT) & 1);
}
inline
void
AlterTableReq::setFragDataFlag(UintR & changeMask, Uint32 fragDataFlg){
changeMask |= (fragDataFlg << FRAG_DATA_SHIFT);
}
inline
Uint8
AlterTableReq::getRangeListFlag(const UintR & changeMask){
return (Uint8)((changeMask >> RANGE_LIST_SHIFT) & 1);
}
inline
void
AlterTableReq::setRangeListFlag(UintR & changeMask, Uint32 rangeFlg){
changeMask |= (rangeFlg << RANGE_LIST_SHIFT);
}
inline
Uint8
AlterTableReq::getTsNameFlag(const UintR & changeMask){
return (Uint8)((changeMask >> TS_NAME_SHIFT) & 1);
}
inline
void
AlterTableReq::setTsNameFlag(UintR & changeMask, Uint32 tsNameFlg){
changeMask |= (tsNameFlg << TS_NAME_SHIFT);
}
class AlterTableRef { class AlterTableRef {
/** /**

View File

@ -32,6 +32,7 @@ class DiAddTabReq {
public: public:
STATIC_CONST( SignalLength = 9 ); STATIC_CONST( SignalLength = 9 );
SECTION( FRAGMENTATION = 0 ); SECTION( FRAGMENTATION = 0 );
SECTION( TS_RANGE = 0 );
private: private:
Uint32 connectPtr; Uint32 connectPtr;

View File

@ -122,6 +122,15 @@ public:
FragmentData = 130, // CREATE_FRAGMENTATION reply FragmentData = 130, // CREATE_FRAGMENTATION reply
TablespaceId = 131, TablespaceId = 131,
TablespaceVersion = 132, TablespaceVersion = 132,
TablespaceDataLen = 133,
TablespaceData = 134,
RangeListDataLen = 135,
RangeListData = 136,
ReplicaDataLen = 137,
ReplicaData = 138,
MaxRowsLow = 139,
MaxRowsHigh = 140,
DefaultNoPartFlag = 141,
RowGCIFlag = 150, RowGCIFlag = 150,
RowChecksumFlag = 151, RowChecksumFlag = 151,
@ -298,11 +307,25 @@ public:
Uint32 CustomTriggerId; Uint32 CustomTriggerId;
Uint32 TablespaceId; Uint32 TablespaceId;
Uint32 TablespaceVersion; Uint32 TablespaceVersion;
Uint32 MaxRowsLow;
Uint32 MaxRowsHigh;
Uint32 DefaultNoPartFlag;
/*
TODO RONM:
We need to replace FRM, Fragment Data, Tablespace Data and in
very particular RangeListData with dynamic arrays
*/
Uint32 FrmLen; Uint32 FrmLen;
char FrmData[MAX_FRM_DATA_SIZE]; char FrmData[MAX_FRM_DATA_SIZE];
Uint32 FragmentCount; Uint32 FragmentCount;
Uint32 ReplicaDataLen;
Uint16 ReplicaData[MAX_FRAGMENT_DATA_BYTES];
Uint32 FragmentDataLen; Uint32 FragmentDataLen;
Uint16 FragmentData[(MAX_FRAGMENT_DATA_BYTES+1)/2]; Uint16 FragmentData[3*MAX_NDB_PARTITIONS];
Uint32 TablespaceDataLen;
Uint32 TablespaceData[2*MAX_NDB_PARTITIONS];
Uint32 RangeListDataLen;
char RangeListData[4*2*MAX_NDB_PARTITIONS*2];
Uint32 RowGCIFlag; Uint32 RowGCIFlag;
Uint32 RowChecksumFlag; Uint32 RowChecksumFlag;

View File

@ -49,6 +49,7 @@ private:
Uint32 nodeId; Uint32 nodeId;
Uint32 totalFragments; Uint32 totalFragments;
Uint32 startGci; Uint32 startGci;
Uint32 tablespaceId;
Uint32 logPartId; Uint32 logPartId;
}; };

View File

@ -669,10 +669,22 @@ public:
Uint32 getFrmLength() const; Uint32 getFrmLength() const;
/** /**
* Get Node Group and Tablespace id's for fragments in table * Get Fragment Data (id, state and node group)
*/ */
const void *getNodeGroupIds() const; const void *getFragmentData() const;
Uint32 getNodeGroupIdsLength() const; Uint32 getFragmentDataLen() const;
/**
* Get Range or List Array (value, partition)
*/
const void *getRangeListData() const;
Uint32 getRangeListDataLen() const;
/**
* Get Tablespace Data (id, version)
*/
const void *getTablespaceData() const;
Uint32 getTablespaceDataLen() const;
/** @} *******************************************************************/ /** @} *******************************************************************/
@ -721,6 +733,16 @@ public:
*/ */
void setLogging(bool); void setLogging(bool);
/**
* Set fragment count
*/
void setFragmentCount(Uint32);
/**
* Get fragment count
*/
Uint32 getFragmentCount() const;
/** /**
* Set fragmentation type * Set fragmentation type
*/ */
@ -772,6 +794,19 @@ public:
*/ */
virtual int getObjectVersion() const; virtual int getObjectVersion() const;
/**
* Set/Get Maximum number of rows in table (only used to calculate
* number of partitions).
*/
void setMaxRows(Uint64 maxRows);
Uint64 getMaxRows();
/**
* Set/Get indicator if default number of partitions is used in table.
*/
void setDefaultNoPartitionsFlag(Uint32 indicator);
Uint32 getDefaultNoPartitionsFlag();
/** /**
* Get object id * Get object id
*/ */
@ -783,9 +818,34 @@ public:
void setFrm(const void* data, Uint32 len); void setFrm(const void* data, Uint32 len);
/** /**
* Set node group for fragments * Set array of fragment information containing
* Fragment Identity
* Node group identity
* Fragment State
*/ */
void setNodeGroupIds(const void *data, Uint32 len); void setFragmentData(const void* data, Uint32 len);
/**
* Set/Get tablespace names per fragment
*/
void setTablespaceNames(const void* data, Uint32 len);
const void *getTablespaceNames();
Uint32 getTablespaceNamesLen();
/**
* Set tablespace information per fragment
* Contains a tablespace id and a tablespace version
*/
void setTablespaceData(const void* data, Uint32 len);
/**
* Set array of information mapping range values and list values
* to fragments. This is essentially a sorted map consisting of
* pairs of value, fragment identity. For range partitions there is
* one pair per fragment. For list partitions it could be any number
* of pairs, at least as many as there are fragments.
*/
void setRangeListData(const void* data, Uint32 len);
/** /**
* Set table object type * Set table object type

View File

@ -44,11 +44,21 @@ DictTabInfo::TableMapping[] = {
DTIMAP(Table, CustomTriggerId, CustomTriggerId), DTIMAP(Table, CustomTriggerId, CustomTriggerId),
DTIMAP2(Table, FrmLen, FrmLen, 0, MAX_FRM_DATA_SIZE), DTIMAP2(Table, FrmLen, FrmLen, 0, MAX_FRM_DATA_SIZE),
DTIMAPB(Table, FrmData, FrmData, 0, MAX_FRM_DATA_SIZE, FrmLen), DTIMAPB(Table, FrmData, FrmData, 0, MAX_FRM_DATA_SIZE, FrmLen),
DTIMAP(Table, FragmentCount, FragmentCount), DTIMAP2(Table, FragmentCount, FragmentCount, 0, MAX_NDB_PARTITIONS),
DTIMAP2(Table, FragmentDataLen, FragmentDataLen, 0, MAX_FRAGMENT_DATA_BYTES), DTIMAP2(Table, ReplicaDataLen, ReplicaDataLen, 0, 2*MAX_FRAGMENT_DATA_BYTES),
DTIMAPB(Table, FragmentData, FragmentData, 0, MAX_FRAGMENT_DATA_BYTES, FragmentDataLen), DTIMAPB(Table, ReplicaData, ReplicaData, 0, 2*MAX_FRAGMENT_DATA_BYTES, ReplicaDataLen),
DTIMAP2(Table, FragmentDataLen, FragmentDataLen, 0, 6*MAX_NDB_PARTITIONS),
DTIMAPB(Table, FragmentData, FragmentData, 0, 6*MAX_NDB_PARTITIONS, FragmentDataLen),
DTIMAP2(Table, TablespaceDataLen, TablespaceDataLen, 0, 8*MAX_NDB_PARTITIONS),
DTIMAPB(Table, TablespaceData, TablespaceData, 0, 8*MAX_NDB_PARTITIONS, TablespaceDataLen),
DTIMAP2(Table, RangeListDataLen, RangeListDataLen, 0, 8*MAX_NDB_PARTITIONS),
DTIMAPB(Table, RangeListData, RangeListData, 0, 8*MAX_NDB_PARTITIONS, RangeListDataLen),
DTIMAP(Table, TablespaceId, TablespaceId), DTIMAP(Table, TablespaceId, TablespaceId),
DTIMAP(Table, TablespaceVersion, TablespaceVersion), DTIMAP(Table, TablespaceVersion, TablespaceVersion),
DTIMAP(Table, MaxRowsLow, MaxRowsLow),
DTIMAP(Table, MaxRowsHigh, MaxRowsHigh),
DTIMAP(Table, DefaultNoPartFlag, DefaultNoPartFlag),
DTIMAP(Table, TablespaceVersion, TablespaceVersion),
DTIMAP(Table, RowGCIFlag, RowGCIFlag), DTIMAP(Table, RowGCIFlag, RowGCIFlag),
DTIMAP(Table, RowChecksumFlag, RowChecksumFlag), DTIMAP(Table, RowChecksumFlag, RowChecksumFlag),
DTIBREAK(AttributeName) DTIBREAK(AttributeName)
@ -124,12 +134,21 @@ DictTabInfo::Table::init(){
DeleteTriggerId = RNIL; DeleteTriggerId = RNIL;
CustomTriggerId = RNIL; CustomTriggerId = RNIL;
FrmLen = 0; FrmLen = 0;
memset(FrmData, 0, sizeof(FrmData));
FragmentCount = 0;
FragmentDataLen = 0; FragmentDataLen = 0;
ReplicaDataLen = 0;
RangeListDataLen = 0;
TablespaceDataLen = 0;
memset(FrmData, 0, sizeof(FrmData));
memset(FragmentData, 0, sizeof(FragmentData)); memset(FragmentData, 0, sizeof(FragmentData));
memset(ReplicaData, 0, sizeof(ReplicaData));
memset(RangeListData, 0, sizeof(RangeListData));
memset(TablespaceData, 0, sizeof(TablespaceData));
FragmentCount = 0;
TablespaceId = RNIL; TablespaceId = RNIL;
TablespaceVersion = ~0; TablespaceVersion = ~0;
MaxRowsLow = 0;
MaxRowsHigh = 0;
DefaultNoPartFlag = 1;
RowGCIFlag = ~0; RowGCIFlag = ~0;
RowChecksumFlag = ~0; RowChecksumFlag = ~0;

View File

@ -408,6 +408,9 @@ Dbdict::packTableIntoPages(SimpleProperties::Writer & w,
union { union {
char tableName[MAX_TAB_NAME_SIZE]; char tableName[MAX_TAB_NAME_SIZE];
char frmData[MAX_FRM_DATA_SIZE]; char frmData[MAX_FRM_DATA_SIZE];
char rangeData[16*MAX_NDB_PARTITIONS];
char ngData[2*MAX_NDB_PARTITIONS];
char tsData[2*2*MAX_NDB_PARTITIONS];
char defaultValue[MAX_ATTR_DEFAULT_VALUE_SIZE]; char defaultValue[MAX_ATTR_DEFAULT_VALUE_SIZE];
char attributeName[MAX_ATTR_NAME_SIZE]; char attributeName[MAX_ATTR_NAME_SIZE];
}; };
@ -434,13 +437,15 @@ Dbdict::packTableIntoPages(SimpleProperties::Writer & w,
w.add(DictTabInfo::TableKValue, tablePtr.p->kValue); w.add(DictTabInfo::TableKValue, tablePtr.p->kValue);
w.add(DictTabInfo::FragmentTypeVal, tablePtr.p->fragmentType); w.add(DictTabInfo::FragmentTypeVal, tablePtr.p->fragmentType);
w.add(DictTabInfo::TableTypeVal, tablePtr.p->tableType); w.add(DictTabInfo::TableTypeVal, tablePtr.p->tableType);
w.add(DictTabInfo::MaxRowsLow, tablePtr.p->maxRowsLow);
if(!signal) w.add(DictTabInfo::MaxRowsHigh, tablePtr.p->maxRowsHigh);
{ w.add(DictTabInfo::DefaultNoPartFlag, tablePtr.p->defaultNoPartFlag);
w.add(DictTabInfo::FragmentCount, tablePtr.p->fragmentCount); w.add(DictTabInfo::FragmentCount, tablePtr.p->fragmentCount);
}
else if(signal)
{ {
/* Denna branch körs vid GET_TABINFOREQ */
Uint32 * theData = signal->getDataPtrSend(); Uint32 * theData = signal->getDataPtrSend();
CreateFragmentationReq * const req = (CreateFragmentationReq*)theData; CreateFragmentationReq * const req = (CreateFragmentationReq*)theData;
req->senderRef = 0; req->senderRef = 0;
@ -450,18 +455,16 @@ Dbdict::packTableIntoPages(SimpleProperties::Writer & w,
req->primaryTableId = tablePtr.i; req->primaryTableId = tablePtr.i;
EXECUTE_DIRECT(DBDIH, GSN_CREATE_FRAGMENTATION_REQ, signal, EXECUTE_DIRECT(DBDIH, GSN_CREATE_FRAGMENTATION_REQ, signal,
CreateFragmentationReq::SignalLength); CreateFragmentationReq::SignalLength);
if(signal->theData[0] == 0) ndbrequire(signal->theData[0] == 0);
{ Uint16 *data = (Uint16*)&signal->theData[25];
Uint16 *data = (Uint16*)&signal->theData[25]; Uint32 count = 2 + data[0] * data[1];
Uint32 count = 2 + data[0] * data[1]; w.add(DictTabInfo::ReplicaDataLen, 2*count);
w.add(DictTabInfo::FragmentDataLen, 2*count); w.add(DictTabInfo::ReplicaData, data, 2*count);
w.add(DictTabInfo::FragmentData, data, 2*count); }
ndbrequire(count > 0); else
} {
else /* Denna del körs vid CREATE_TABLEREQ, ALTER_TABLEREQ */
{ ;
ndbrequire(false);
}
} }
if (tablePtr.p->primaryTableId != RNIL){ if (tablePtr.p->primaryTableId != RNIL){
@ -480,10 +483,27 @@ Dbdict::packTableIntoPages(SimpleProperties::Writer & w,
ConstRope frm(c_rope_pool, tablePtr.p->frmData); ConstRope frm(c_rope_pool, tablePtr.p->frmData);
frm.copy(frmData); frm.copy(frmData);
w.add(DictTabInfo::FrmLen, frm.size()); w.add(DictTabInfo::FrmLen, frm.size());
w.add(DictTabInfo::FrmData, frmData, frm.size()); w.add(DictTabInfo::FrmData, frmData, frm.size());
{
jam();
ConstRope ts(c_rope_pool, tablePtr.p->tsData);
ts.copy(tsData);
w.add(DictTabInfo::TablespaceDataLen, ts.size());
w.add(DictTabInfo::TablespaceData, tsData, ts.size());
ConstRope ng(c_rope_pool, tablePtr.p->ngData);
ng.copy(ngData);
w.add(DictTabInfo::FragmentDataLen, ng.size());
w.add(DictTabInfo::FragmentData, ngData, ng.size());
ConstRope range(c_rope_pool, tablePtr.p->rangeData);
range.copy(rangeData);
w.add(DictTabInfo::RangeListDataLen, range.size());
w.add(DictTabInfo::RangeListData, rangeData, range.size());
}
if(tablePtr.p->m_tablespace_id != RNIL) if(tablePtr.p->m_tablespace_id != RNIL)
{ {
w.add(DictTabInfo::TablespaceId, tablePtr.p->m_tablespace_id); w.add(DictTabInfo::TablespaceId, tablePtr.p->m_tablespace_id);
@ -1797,8 +1817,6 @@ void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr)
tablePtr.p->gciTableCreated = 0; tablePtr.p->gciTableCreated = 0;
tablePtr.p->noOfAttributes = ZNIL; tablePtr.p->noOfAttributes = ZNIL;
tablePtr.p->noOfNullAttr = 0; tablePtr.p->noOfNullAttr = 0;
tablePtr.p->ngLen = 0;
memset(tablePtr.p->ngData, 0, sizeof(tablePtr.p->ngData));
tablePtr.p->fragmentCount = 0; tablePtr.p->fragmentCount = 0;
/* /*
tablePtr.p->lh3PageIndexBits = 0; tablePtr.p->lh3PageIndexBits = 0;
@ -1811,6 +1829,9 @@ void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr)
tablePtr.p->minLoadFactor = 70; tablePtr.p->minLoadFactor = 70;
tablePtr.p->noOfPrimkey = 1; tablePtr.p->noOfPrimkey = 1;
tablePtr.p->tupKeyLength = 1; tablePtr.p->tupKeyLength = 1;
tablePtr.p->maxRowsLow = 0;
tablePtr.p->maxRowsHigh = 0;
tablePtr.p->defaultNoPartFlag = true;
tablePtr.p->m_bits = 0; tablePtr.p->m_bits = 0;
tablePtr.p->tableType = DictTabInfo::UserTable; tablePtr.p->tableType = DictTabInfo::UserTable;
tablePtr.p->primaryTableId = RNIL; tablePtr.p->primaryTableId = RNIL;
@ -3608,15 +3629,15 @@ Dbdict::execCREATE_TABLE_REQ(Signal* signal){
Uint32 key = c_opRecordSequence + 1; Uint32 key = c_opRecordSequence + 1;
Uint32 *theData = signal->getDataPtrSend(), i; Uint32 *theData = signal->getDataPtrSend(), i;
Uint16 *node_group= (Uint16*)&signal->theData[25]; Uint16 *frag_data= (Uint16*)&signal->theData[25];
CreateFragmentationReq * const req = (CreateFragmentationReq*)theData; CreateFragmentationReq * const req = (CreateFragmentationReq*)theData;
req->senderRef = reference(); req->senderRef = reference();
req->senderData = key; req->senderData = key;
req->primaryTableId = parseRecord.tablePtr.p->primaryTableId; req->primaryTableId = parseRecord.tablePtr.p->primaryTableId;
req->noOfFragments = parseRecord.tablePtr.p->ngLen >> 1; req->noOfFragments = parseRecord.tablePtr.p->fragmentCount;
req->fragmentationType = parseRecord.tablePtr.p->fragmentType; req->fragmentationType = parseRecord.tablePtr.p->fragmentType;
for (i = 0; i < req->noOfFragments; i++) MEMCOPY_NO_WORDS(frag_data, c_fragData, c_fragDataLen);
node_group[i] = parseRecord.tablePtr.p->ngData[i];
if (parseRecord.tablePtr.p->isOrderedIndex()) { if (parseRecord.tablePtr.p->isOrderedIndex()) {
jam(); jam();
// ordered index has same fragmentation as the table // ordered index has same fragmentation as the table
@ -4520,6 +4541,9 @@ int Dbdict::handleAlterTab(AlterTabReq * req,
ndbrequire(org.assign(tmp, src.size())); ndbrequire(org.assign(tmp, src.size()));
} }
/*
TODO RONM: Lite ny kod för FragmentData och RangeOrListData
*/
if (supportedAlteration) if (supportedAlteration)
{ {
// Set new schema version // Set new schema version
@ -4727,11 +4751,12 @@ Dbdict::execCREATE_FRAGMENTATION_CONF(Signal* signal){
packTableIntoPages(w, tabPtr); packTableIntoPages(w, tabPtr);
SegmentedSectionPtr spDataPtr; SegmentedSectionPtr spDataPtr;
Ptr<SectionSegment> tmpTsPtr;
w.getPtr(spDataPtr); w.getPtr(spDataPtr);
signal->setSection(spDataPtr, CreateTabReq::DICT_TAB_INFO); signal->setSection(spDataPtr, CreateTabReq::DICT_TAB_INFO);
signal->setSection(fragDataPtr, CreateTabReq::FRAGMENTATION); signal->setSection(fragDataPtr, CreateTabReq::FRAGMENTATION);
NodeReceiverGroup rg(DBDICT, c_aliveNodes); NodeReceiverGroup rg(DBDICT, c_aliveNodes);
SafeCounter tmp(c_counterMgr, createTabPtr.p->m_coordinatorData.m_counter); SafeCounter tmp(c_counterMgr, createTabPtr.p->m_coordinatorData.m_counter);
createTabPtr.p->m_coordinatorData.m_gsn = GSN_CREATE_TAB_REQ; createTabPtr.p->m_coordinatorData.m_gsn = GSN_CREATE_TAB_REQ;
@ -5109,6 +5134,9 @@ Dbdict::createTab_dih(Signal* signal,
req->schemaVersion = tabPtr.p->tableVersion; req->schemaVersion = tabPtr.p->tableVersion;
req->primaryTableId = tabPtr.p->primaryTableId; req->primaryTableId = tabPtr.p->primaryTableId;
/*
Behöver fiska upp fragDataPtr från table object istället
*/
if(!fragDataPtr.isNull()){ if(!fragDataPtr.isNull()){
signal->setSection(fragDataPtr, DiAddTabReq::FRAGMENTATION); signal->setSection(fragDataPtr, DiAddTabReq::FRAGMENTATION);
} }
@ -5203,6 +5231,7 @@ Dbdict::execADD_FRAGREQ(Signal* signal) {
Uint32 fragCount = req->totalFragments; Uint32 fragCount = req->totalFragments;
Uint32 requestInfo = req->requestInfo; Uint32 requestInfo = req->requestInfo;
Uint32 startGci = req->startGci; Uint32 startGci = req->startGci;
Uint32 tablespace_id= req->tablespaceId;
Uint32 logPart = req->logPartId; Uint32 logPart = req->logPartId;
ndbrequire(node == getOwnNodeId()); ndbrequire(node == getOwnNodeId());
@ -5258,6 +5287,7 @@ Dbdict::execADD_FRAGREQ(Signal* signal) {
req->tableType = tabPtr.p->tableType; req->tableType = tabPtr.p->tableType;
req->primaryTableId = tabPtr.p->primaryTableId; req->primaryTableId = tabPtr.p->primaryTableId;
req->tablespace_id= tabPtr.p->m_tablespace_id; req->tablespace_id= tabPtr.p->m_tablespace_id;
//req->tablespace_id= tablespace_id;
req->logPartId = logPart; req->logPartId = logPart;
sendSignal(DBLQH_REF, GSN_LQHFRAGREQ, signal, sendSignal(DBLQH_REF, GSN_LQHFRAGREQ, signal,
LqhFragReq::SignalLength, JBB); LqhFragReq::SignalLength, JBB);
@ -5740,8 +5770,8 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
it.first(); it.first();
SimpleProperties::UnpackStatus status; SimpleProperties::UnpackStatus status;
DictTabInfo::Table tableDesc; tableDesc.init(); c_tableDesc.init();
status = SimpleProperties::unpack(it, &tableDesc, status = SimpleProperties::unpack(it, &c_tableDesc,
DictTabInfo::TableMapping, DictTabInfo::TableMapping,
DictTabInfo::TableMappingSize, DictTabInfo::TableMappingSize,
true, true); true, true);
@ -5767,12 +5797,12 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
// Verify that table name is an allowed table name. // Verify that table name is an allowed table name.
// TODO // TODO
/* ---------------------------------------------------------------- */ /* ---------------------------------------------------------------- */
const Uint32 tableNameLength = strlen(tableDesc.TableName) + 1; const Uint32 tableNameLength = strlen(c_tableDesc.TableName) + 1;
const Uint32 name_hash = Rope::hash(tableDesc.TableName, tableNameLength); const Uint32 name_hash = Rope::hash(c_tableDesc.TableName, tableNameLength);
if(checkExist){ if(checkExist){
jam(); jam();
tabRequire(get_object(tableDesc.TableName, tableNameLength) == 0, tabRequire(get_object(c_tableDesc.TableName, tableNameLength) == 0,
CreateTableRef::TableAlreadyExist); CreateTableRef::TableAlreadyExist);
} }
@ -5783,7 +5813,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
} }
case DictTabInfo::AlterTableFromAPI:{ case DictTabInfo::AlterTableFromAPI:{
jam(); jam();
tablePtr.i = getFreeTableRecord(tableDesc.PrimaryTableId); tablePtr.i = getFreeTableRecord(c_tableDesc.PrimaryTableId);
/* ---------------------------------------------------------------- */ /* ---------------------------------------------------------------- */
// Check if no free tables existed. // Check if no free tables existed.
/* ---------------------------------------------------------------- */ /* ---------------------------------------------------------------- */
@ -5799,7 +5829,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
/* ---------------------------------------------------------------- */ /* ---------------------------------------------------------------- */
// Get table id and check that table doesn't already exist // Get table id and check that table doesn't already exist
/* ---------------------------------------------------------------- */ /* ---------------------------------------------------------------- */
tablePtr.i = tableDesc.TableId; tablePtr.i = c_tableDesc.TableId;
if (parseP->requestType == DictTabInfo::ReadTableFromDiskSR) { if (parseP->requestType == DictTabInfo::ReadTableFromDiskSR) {
ndbrequire(tablePtr.i == c_restartRecord.activeTable); ndbrequire(tablePtr.i == c_restartRecord.activeTable);
@ -5821,7 +5851,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
/* ---------------------------------------------------------------- */ /* ---------------------------------------------------------------- */
// Set table version // Set table version
/* ---------------------------------------------------------------- */ /* ---------------------------------------------------------------- */
Uint32 tableVersion = tableDesc.TableVersion; Uint32 tableVersion = c_tableDesc.TableVersion;
tablePtr.p->tableVersion = tableVersion; tablePtr.p->tableVersion = tableVersion;
break; break;
@ -5834,7 +5864,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
{ {
Rope name(c_rope_pool, tablePtr.p->tableName); Rope name(c_rope_pool, tablePtr.p->tableName);
ndbrequire(name.assign(tableDesc.TableName, tableNameLength, name_hash)); ndbrequire(name.assign(c_tableDesc.TableName, tableNameLength, name_hash));
} }
Ptr<DictObject> obj_ptr; Ptr<DictObject> obj_ptr;
@ -5842,7 +5872,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
jam(); jam();
ndbrequire(c_obj_hash.seize(obj_ptr)); ndbrequire(c_obj_hash.seize(obj_ptr));
obj_ptr.p->m_id = tablePtr.i; obj_ptr.p->m_id = tablePtr.i;
obj_ptr.p->m_type = tableDesc.TableType; obj_ptr.p->m_type = c_tableDesc.TableType;
obj_ptr.p->m_name = tablePtr.p->tableName; obj_ptr.p->m_name = tablePtr.p->tableName;
obj_ptr.p->m_ref_count = 0; obj_ptr.p->m_ref_count = 0;
c_obj_hash.add(obj_ptr); c_obj_hash.add(obj_ptr);
@ -5850,42 +5880,54 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
#ifdef VM_TRACE #ifdef VM_TRACE
ndbout_c("Dbdict: name=%s,id=%u,obj_ptr_i=%d", ndbout_c("Dbdict: name=%s,id=%u,obj_ptr_i=%d",
tableDesc.TableName, tablePtr.i, tablePtr.p->m_obj_ptr_i); c_tableDesc.TableName, tablePtr.i, tablePtr.p->m_obj_ptr_i);
#endif #endif
} }
tablePtr.p->noOfAttributes = tableDesc.NoOfAttributes; tablePtr.p->noOfAttributes = c_tableDesc.NoOfAttributes;
tablePtr.p->m_bits |= tablePtr.p->m_bits |=
(tableDesc.TableLoggedFlag ? TableRecord::TR_Logged : 0); (c_tableDesc.TableLoggedFlag ? TableRecord::TR_Logged : 0);
tablePtr.p->m_bits |= tablePtr.p->m_bits |=
(tableDesc.RowChecksumFlag ? TableRecord::TR_RowChecksum : 0); (c_tableDesc.RowChecksumFlag ? TableRecord::TR_RowChecksum : 0);
tablePtr.p->m_bits |= tablePtr.p->m_bits |=
(tableDesc.RowGCIFlag ? TableRecord::TR_RowGCI : 0); (c_tableDesc.RowGCIFlag ? TableRecord::TR_RowGCI : 0);
tablePtr.p->minLoadFactor = tableDesc.MinLoadFactor; tablePtr.p->minLoadFactor = c_tableDesc.MinLoadFactor;
tablePtr.p->maxLoadFactor = tableDesc.MaxLoadFactor; tablePtr.p->maxLoadFactor = c_tableDesc.MaxLoadFactor;
tablePtr.p->fragmentType = (DictTabInfo::FragmentType)tableDesc.FragmentType; tablePtr.p->fragmentType = (DictTabInfo::FragmentType)c_tableDesc.FragmentType;
tablePtr.p->tableType = (DictTabInfo::TableType)tableDesc.TableType; tablePtr.p->tableType = (DictTabInfo::TableType)c_tableDesc.TableType;
tablePtr.p->kValue = tableDesc.TableKValue; tablePtr.p->kValue = c_tableDesc.TableKValue;
tablePtr.p->fragmentCount = tableDesc.FragmentCount; tablePtr.p->fragmentCount = c_tableDesc.FragmentCount;
tablePtr.p->m_tablespace_id = tableDesc.TablespaceId; tablePtr.p->m_tablespace_id = c_tableDesc.TablespaceId;
tablePtr.p->maxRowsLow = c_tableDesc.MaxRowsLow;
tablePtr.p->maxRowsHigh = c_tableDesc.MaxRowsHigh;
tablePtr.p->defaultNoPartFlag = c_tableDesc.DefaultNoPartFlag;
{ {
Rope frm(c_rope_pool, tablePtr.p->frmData); Rope frm(c_rope_pool, tablePtr.p->frmData);
ndbrequire(frm.assign(tableDesc.FrmData, tableDesc.FrmLen)); ndbrequire(frm.assign(c_tableDesc.FrmData, c_tableDesc.FrmLen));
Rope range(c_rope_pool, tablePtr.p->rangeData);
ndbrequire(range.assign(c_tableDesc.RangeListData,
c_tableDesc.RangeListDataLen));
Rope fd(c_rope_pool, tablePtr.p->ngData);
ndbrequire(fd.assign((const char*)c_tableDesc.FragmentData,
c_tableDesc.FragmentDataLen));
Rope ts(c_rope_pool, tablePtr.p->tsData);
ndbrequire(ts.assign((const char*)c_tableDesc.TablespaceData,
c_tableDesc.TablespaceDataLen));
} }
tablePtr.p->ngLen = tableDesc.FragmentDataLen; c_fragDataLen = c_tableDesc.FragmentDataLen;
memcpy(tablePtr.p->ngData, tableDesc.FragmentData, memcpy(c_fragData, c_tableDesc.FragmentData,
tableDesc.FragmentDataLen); c_tableDesc.FragmentDataLen);
if(tableDesc.PrimaryTableId != RNIL) { if(c_tableDesc.PrimaryTableId != RNIL) {
tablePtr.p->primaryTableId = tableDesc.PrimaryTableId; tablePtr.p->primaryTableId = c_tableDesc.PrimaryTableId;
tablePtr.p->indexState = (TableRecord::IndexState)tableDesc.IndexState; tablePtr.p->indexState = (TableRecord::IndexState)c_tableDesc.IndexState;
tablePtr.p->insertTriggerId = tableDesc.InsertTriggerId; tablePtr.p->insertTriggerId = c_tableDesc.InsertTriggerId;
tablePtr.p->updateTriggerId = tableDesc.UpdateTriggerId; tablePtr.p->updateTriggerId = c_tableDesc.UpdateTriggerId;
tablePtr.p->deleteTriggerId = tableDesc.DeleteTriggerId; tablePtr.p->deleteTriggerId = c_tableDesc.DeleteTriggerId;
tablePtr.p->customTriggerId = tableDesc.CustomTriggerId; tablePtr.p->customTriggerId = c_tableDesc.CustomTriggerId;
} else { } else {
tablePtr.p->primaryTableId = RNIL; tablePtr.p->primaryTableId = RNIL;
tablePtr.p->indexState = TableRecord::IS_UNDEFINED; tablePtr.p->indexState = TableRecord::IS_UNDEFINED;
@ -5897,7 +5939,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
tablePtr.p->buildTriggerId = RNIL; tablePtr.p->buildTriggerId = RNIL;
tablePtr.p->indexLocal = 0; tablePtr.p->indexLocal = 0;
handleTabInfo(it, parseP, tableDesc); handleTabInfo(it, parseP, c_tableDesc);
if(parseP->errorCode != 0) if(parseP->errorCode != 0)
{ {
@ -7460,10 +7502,9 @@ Dbdict::execCREATE_INDX_REQ(Signal* signal)
// save name and index table properties // save name and index table properties
signal->getSection(ssPtr, CreateIndxReq::INDEX_NAME_SECTION); signal->getSection(ssPtr, CreateIndxReq::INDEX_NAME_SECTION);
SimplePropertiesSectionReader r1(ssPtr, getSectionSegmentPool()); SimplePropertiesSectionReader r1(ssPtr, getSectionSegmentPool());
DictTabInfo::Table tableDesc; c_tableDesc.init();
tableDesc.init();
SimpleProperties::UnpackStatus status = SimpleProperties::unpack( SimpleProperties::UnpackStatus status = SimpleProperties::unpack(
r1, &tableDesc, r1, &c_tableDesc,
DictTabInfo::TableMapping, DictTabInfo::TableMappingSize, DictTabInfo::TableMapping, DictTabInfo::TableMappingSize,
true, true); true, true);
if (status != SimpleProperties::Eof) { if (status != SimpleProperties::Eof) {
@ -7473,8 +7514,8 @@ Dbdict::execCREATE_INDX_REQ(Signal* signal)
createIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster); createIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster);
return; return;
} }
memcpy(opPtr.p->m_indexName, tableDesc.TableName, MAX_TAB_NAME_SIZE); memcpy(opPtr.p->m_indexName, c_tableDesc.TableName, MAX_TAB_NAME_SIZE);
opPtr.p->m_storedIndex = tableDesc.TableLoggedFlag; opPtr.p->m_storedIndex = c_tableDesc.TableLoggedFlag;
releaseSections(signal); releaseSections(signal);
// master expects to hear from all // master expects to hear from all
if (opPtr.p->m_isMaster) if (opPtr.p->m_isMaster)
@ -13097,7 +13138,7 @@ Dbdict::getTableKeyList(TableRecordPtr tablePtr,
list.id[list.sz++] = attrPtr.p->attributeId; list.id[list.sz++] = attrPtr.p->attributeId;
} }
} }
ndbrequire(list.sz == tablePtr.p->noOfPrimkey + 1); ndbrequire(list.sz == (uint)(tablePtr.p->noOfPrimkey + 1));
ndbrequire(list.sz <= MAX_ATTRIBUTES_IN_INDEX + 1); ndbrequire(list.sz <= MAX_ATTRIBUTES_IN_INDEX + 1);
} }

View File

@ -203,6 +203,8 @@ public:
*/ */
struct TableRecord { struct TableRecord {
TableRecord(){} TableRecord(){}
Uint32 maxRowsLow;
Uint32 maxRowsHigh;
/* Table id (array index in DICT and other blocks) */ /* Table id (array index in DICT and other blocks) */
Uint32 tableId; Uint32 tableId;
Uint32 m_obj_ptr_i; Uint32 m_obj_ptr_i;
@ -268,6 +270,11 @@ public:
*/ */
Uint8 maxLoadFactor; Uint8 maxLoadFactor;
/*
Flag to indicate default number of partitions
*/
bool defaultNoPartFlag;
/* /*
* Used when shrinking to decide when to merge buckets. Hysteresis * Used when shrinking to decide when to merge buckets. Hysteresis
* is thus possible. Should be smaller but not much smaller than * is thus possible. Should be smaller but not much smaller than
@ -353,10 +360,9 @@ public:
/** frm data for this table */ /** frm data for this table */
RopeHandle frmData; RopeHandle frmData;
/** Node Group and Tablespace id for this table */ RopeHandle tsData;
/** TODO Could preferrably be made dynamic size */ RopeHandle ngData;
Uint32 ngLen; RopeHandle rangeData;
Uint16 ngData[MAX_NDB_PARTITIONS];
Uint32 fragmentCount; Uint32 fragmentCount;
Uint32 m_tablespace_id; Uint32 m_tablespace_id;
@ -365,6 +371,15 @@ public:
typedef Ptr<TableRecord> TableRecordPtr; typedef Ptr<TableRecord> TableRecordPtr;
ArrayPool<TableRecord> c_tableRecordPool; ArrayPool<TableRecord> c_tableRecordPool;
/** Node Group and Tablespace id+version + range or list data.
* This is only stored temporarily in DBDICT during an ongoing
* change.
* TODO RONM: Look into improvements of this
*/
Uint32 c_fragDataLen;
Uint16 c_fragData[MAX_NDB_PARTITIONS];
Uint32 c_tsIdData[2*MAX_NDB_PARTITIONS];
/** /**
* Triggers. This is volatile data not saved on disk. Setting a * Triggers. This is volatile data not saved on disk. Setting a
* trigger online creates the trigger in TC (if index) and LQH-TUP. * trigger online creates the trigger in TC (if index) and LQH-TUP.
@ -504,6 +519,8 @@ public:
CArray<SchemaPageRecord> c_schemaPageRecordArray; CArray<SchemaPageRecord> c_schemaPageRecordArray;
DictTabInfo::Table c_tableDesc;
/** /**
* A page for create index table signal. * A page for create index table signal.
*/ */

View File

@ -5785,6 +5785,9 @@ void Dblqh::execCOMPLETE(Signal* signal)
errorReport(signal, 1); errorReport(signal, 1);
return; return;
}//if }//if
if (ERROR_INSERTED(5042)) {
ndbrequire(false);
}
if (ERROR_INSERTED(5013)) { if (ERROR_INSERTED(5013)) {
CLEAR_ERROR_INSERT_VALUE; CLEAR_ERROR_INSERT_VALUE;
sendSignalWithDelay(cownref, GSN_COMPLETE, signal, 2000, 3); sendSignalWithDelay(cownref, GSN_COMPLETE, signal, 2000, 3);

View File

@ -534,6 +534,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
CreateFilegroupImplReq rep; CreateFilegroupImplReq rep;
if(regTabPtr.p->m_no_of_disk_attributes) if(regTabPtr.p->m_no_of_disk_attributes)
{ {
ljam();
Tablespace_client tsman(0, c_tsman, 0, 0, Tablespace_client tsman(0, c_tsman, 0, 0,
regFragPtr.p->m_tablespace_id); regFragPtr.p->m_tablespace_id);
ndbrequire(tsman.get_tablespace_info(&rep) == 0); ndbrequire(tsman.get_tablespace_info(&rep) == 0);
@ -545,11 +546,14 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
if (regTabPtr.p->m_no_of_disk_attributes) if (regTabPtr.p->m_no_of_disk_attributes)
{ {
ljam();
if(!(getNodeState().getSystemRestartInProgress() && if(!(getNodeState().getSystemRestartInProgress() &&
getNodeState().startLevel == NodeState::SL_STARTING && getNodeState().startLevel == NodeState::SL_STARTING &&
getNodeState().starting.startPhase <= 4)) getNodeState().starting.startPhase <= 4))
{ {
Callback cb; Callback cb;
ljam();
cb.m_callbackData= fragOperPtr.i; cb.m_callbackData= fragOperPtr.i;
cb.m_callbackFunction = cb.m_callbackFunction =
safe_cast(&Dbtup::undo_createtable_callback); safe_cast(&Dbtup::undo_createtable_callback);
@ -562,6 +566,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
int res= lgman.get_log_buffer(signal, sz, &cb); int res= lgman.get_log_buffer(signal, sz, &cb);
switch(res){ switch(res){
case 0: case 0:
ljam();
signal->theData[0] = 1; signal->theData[0] = 1;
return; return;
case -1: case -1:

View File

@ -76,11 +76,16 @@ NdbBlob::getBlobTable(NdbTableImpl& bt, const NdbTableImpl* t, const NdbColumnIm
BLOB tables use the same fragmentation as the original table BLOB tables use the same fragmentation as the original table
but may change the fragment type if it is UserDefined since it but may change the fragment type if it is UserDefined since it
must be hash based so that the kernel can handle it on its own. must be hash based so that the kernel can handle it on its own.
It also uses the same tablespaces and it never uses any range or
list arrays.
*/ */
bt.m_primaryTableId = t->m_id; bt.m_primaryTableId = t->m_id;
bt.m_fd.clear();
bt.m_ts.clear();
bt.m_range.clear();
bt.setFragmentCount(t->getFragmentCount());
bt.m_tablespace_id = t->m_tablespace_id; bt.m_tablespace_id = t->m_tablespace_id;
bt.m_tablespace_version = t->m_tablespace_version; bt.m_tablespace_version = t->m_tablespace_version;
bt.m_ng.clear();
switch (t->getFragmentType()) switch (t->getFragmentType())
{ {
case NdbDictionary::Object::FragAllSmall: case NdbDictionary::Object::FragAllSmall:

View File

@ -413,6 +413,30 @@ NdbDictionary::Table::getNoOfPrimaryKeys() const {
return m_impl.m_noOfKeys; return m_impl.m_noOfKeys;
} }
void
NdbDictionary::Table::setMaxRows(Uint64 maxRows)
{
m_impl.m_max_rows = maxRows;
}
Uint64
NdbDictionary::Table::getMaxRows()
{
return m_impl.m_max_rows;
}
void
NdbDictionary::Table::setDefaultNoPartitionsFlag(Uint32 flag)
{
m_impl.m_default_no_part_flag = flag;;
}
Uint32
NdbDictionary::Table::getDefaultNoPartitionsFlag()
{
return m_impl.m_default_no_part_flag;
}
const char* const char*
NdbDictionary::Table::getPrimaryKey(int no) const { NdbDictionary::Table::getPrimaryKey(int no) const {
int count = 0; int count = 0;
@ -435,25 +459,87 @@ NdbDictionary::Table::getFrmLength() const {
return m_impl.getFrmLength(); return m_impl.getFrmLength();
} }
void
NdbDictionary::Table::setTablespaceNames(const void *data, Uint32 len)
{
m_impl.setTablespaceNames(data, len);
}
const void*
NdbDictionary::Table::getTablespaceNames()
{
return m_impl.getTablespaceNames();
}
Uint32
NdbDictionary::Table::getTablespaceNamesLen()
{
return m_impl.getTablespaceNamesLen();
}
void
NdbDictionary::Table::setFragmentCount(Uint32 count)
{
m_impl.setFragmentCount(count);
}
Uint32
NdbDictionary::Table::getFragmentCount() const
{
return m_impl.getFragmentCount();
}
void void
NdbDictionary::Table::setFrm(const void* data, Uint32 len){ NdbDictionary::Table::setFrm(const void* data, Uint32 len){
m_impl.setFrm(data, len); m_impl.setFrm(data, len);
} }
const void* const void*
NdbDictionary::Table::getNodeGroupIds() const { NdbDictionary::Table::getFragmentData() const {
return m_impl.m_ng.get_data(); return m_impl.getFragmentData();
} }
Uint32 Uint32
NdbDictionary::Table::getNodeGroupIdsLength() const { NdbDictionary::Table::getFragmentDataLen() const {
return m_impl.m_ng.length(); return m_impl.getFragmentDataLen();
} }
void void
NdbDictionary::Table::setNodeGroupIds(const void* data, Uint32 noWords) NdbDictionary::Table::setFragmentData(const void* data, Uint32 len)
{ {
m_impl.m_ng.assign(data, 2*noWords); m_impl.setFragmentData(data, len);
}
const void*
NdbDictionary::Table::getTablespaceData() const {
return m_impl.getTablespaceData();
}
Uint32
NdbDictionary::Table::getTablespaceDataLen() const {
return m_impl.getTablespaceDataLen();
}
void
NdbDictionary::Table::setTablespaceData(const void* data, Uint32 len)
{
m_impl.setTablespaceData(data, len);
}
const void*
NdbDictionary::Table::getRangeListData() const {
return m_impl.getRangeListData();
}
Uint32
NdbDictionary::Table::getRangeListDataLen() const {
return m_impl.getRangeListDataLen();
}
void
NdbDictionary::Table::setRangeListData(const void* data, Uint32 len)
{
m_impl.setRangeListData(data, len);
} }
NdbDictionary::Object::Status NdbDictionary::Object::Status
@ -1523,7 +1609,7 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col)
out << " AT=MEDIUM_VAR"; out << " AT=MEDIUM_VAR";
break; break;
default: default:
out << " AT=" << col.getArrayType() << "?"; out << " AT=" << (int)col.getArrayType() << "?";
break; break;
} }
@ -1535,7 +1621,7 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col)
out << " ST=DISK"; out << " ST=DISK";
break; break;
default: default:
out << " ST=" << col.getStorageType() << "?"; out << " ST=" << (int)col.getStorageType() << "?";
break; break;
} }

View File

@ -42,6 +42,7 @@
#include <AttributeHeader.hpp> #include <AttributeHeader.hpp>
#include <my_sys.h> #include <my_sys.h>
#include <NdbEnv.h> #include <NdbEnv.h>
#include <NdbMem.h>
#define DEBUG_PRINT 0 #define DEBUG_PRINT 0
#define INCOMPATIBLE_VERSION -2 #define INCOMPATIBLE_VERSION -2
@ -370,28 +371,47 @@ void
NdbTableImpl::init(){ NdbTableImpl::init(){
m_changeMask= 0; m_changeMask= 0;
m_id= RNIL; m_id= RNIL;
m_version = ~0;
m_status = NdbDictionary::Object::Invalid;
m_type = NdbDictionary::Object::TypeUndefined;
m_primaryTableId= RNIL; m_primaryTableId= RNIL;
m_internalName.clear();
m_externalName.clear();
m_newExternalName.clear();
m_mysqlName.clear();
m_frm.clear(); m_frm.clear();
m_newFrm.clear(); m_newFrm.clear();
m_fragmentType= NdbDictionary::Object::DistrKeyHash; m_ts_name.clear();
m_new_ts_name.clear();
m_ts.clear();
m_new_ts.clear();
m_fd.clear();
m_new_fd.clear();
m_range.clear();
m_new_range.clear();
m_fragmentType= NdbDictionary::Object::FragAllSmall;
m_hashValueMask= 0; m_hashValueMask= 0;
m_hashpointerValue= 0; m_hashpointerValue= 0;
m_primaryTable.clear();
m_max_rows = 0;
m_default_no_part_flag = 1;
m_logging= true; m_logging= true;
m_row_gci = true;
m_row_checksum = true;
m_kvalue= 6; m_kvalue= 6;
m_minLoadFactor= 78; m_minLoadFactor= 78;
m_maxLoadFactor= 80; m_maxLoadFactor= 80;
m_keyLenInWords= 0; m_keyLenInWords= 0;
m_fragmentCount= 0; m_fragmentCount= 0;
m_dictionary= NULL;
m_index= NULL; m_index= NULL;
m_indexType= NdbDictionary::Object::TypeUndefined; m_indexType= NdbDictionary::Object::TypeUndefined;
m_noOfKeys= 0; m_noOfKeys= 0;
m_noOfDistributionKeys= 0; m_noOfDistributionKeys= 0;
m_noOfBlobs= 0; m_noOfBlobs= 0;
m_replicaCount= 0; m_replicaCount= 0;
m_tablespace_name.clear();
m_tablespace_id = ~0; m_tablespace_id = ~0;
m_row_gci = true; m_tablespace_version = ~0;
m_row_checksum = true;
} }
bool bool
@ -401,63 +421,185 @@ NdbTableImpl::equal(const NdbTableImpl& obj) const
if ((m_internalName.c_str() == NULL) || if ((m_internalName.c_str() == NULL) ||
(strcmp(m_internalName.c_str(), "") == 0) || (strcmp(m_internalName.c_str(), "") == 0) ||
(obj.m_internalName.c_str() == NULL) || (obj.m_internalName.c_str() == NULL) ||
(strcmp(obj.m_internalName.c_str(), "") == 0)) { (strcmp(obj.m_internalName.c_str(), "") == 0))
{
// Shallow equal // Shallow equal
if(strcmp(getName(), obj.getName()) != 0){ if(strcmp(getName(), obj.getName()) != 0)
{
DBUG_PRINT("info",("name %s != %s",getName(),obj.getName())); DBUG_PRINT("info",("name %s != %s",getName(),obj.getName()));
DBUG_RETURN(false); DBUG_RETURN(false);
} }
} else }
else
{
// Deep equal // Deep equal
if(strcmp(m_internalName.c_str(), obj.m_internalName.c_str()) != 0){ if(strcmp(m_internalName.c_str(), obj.m_internalName.c_str()) != 0)
{ {
DBUG_PRINT("info",("m_internalName %s != %s", DBUG_PRINT("info",("m_internalName %s != %s",
m_internalName.c_str(),obj.m_internalName.c_str())); m_internalName.c_str(),obj.m_internalName.c_str()));
DBUG_RETURN(false); DBUG_RETURN(false);
} }
} }
if(m_fragmentType != obj.m_fragmentType){ if (m_frm.length() != obj.m_frm.length() ||
DBUG_PRINT("info",("m_fragmentType %d != %d",m_fragmentType,obj.m_fragmentType)); (memcmp(m_frm.get_data(), obj.m_frm.get_data(), m_frm.length())))
{
DBUG_PRINT("info",("m_frm not equal"));
DBUG_RETURN(false); DBUG_RETURN(false);
} }
if(m_columns.size() != obj.m_columns.size()){ if (m_fd.length() != obj.m_fd.length() ||
DBUG_PRINT("info",("m_columns.size %d != %d",m_columns.size(),obj.m_columns.size())); (memcmp(m_fd.get_data(), obj.m_fd.get_data(), m_fd.length())))
{
DBUG_PRINT("info",("m_fd not equal"));
DBUG_RETURN(false);
}
if (m_ts.length() != obj.m_ts.length() ||
(memcmp(m_ts.get_data(), obj.m_ts.get_data(), m_ts.length())))
{
DBUG_PRINT("info",("m_ts not equal"));
DBUG_RETURN(false);
}
if (m_range.length() != obj.m_range.length() ||
(memcmp(m_range.get_data(), obj.m_range.get_data(), m_range.length())))
{
DBUG_PRINT("info",("m_range not equal"));
DBUG_RETURN(false);
}
if(m_fragmentType != obj.m_fragmentType)
{
DBUG_PRINT("info",("m_fragmentType %d != %d",m_fragmentType,
obj.m_fragmentType));
DBUG_RETURN(false);
}
if(m_columns.size() != obj.m_columns.size())
{
DBUG_PRINT("info",("m_columns.size %d != %d",m_columns.size(),
obj.m_columns.size()));
DBUG_RETURN(false); DBUG_RETURN(false);
} }
for(unsigned i = 0; i<obj.m_columns.size(); i++){ for(unsigned i = 0; i<obj.m_columns.size(); i++)
if(!m_columns[i]->equal(* obj.m_columns[i])){ {
if(!m_columns[i]->equal(* obj.m_columns[i]))
{
DBUG_PRINT("info",("m_columns [%d] != [%d]",i,i)); DBUG_PRINT("info",("m_columns [%d] != [%d]",i,i));
DBUG_RETURN(false); DBUG_RETURN(false);
} }
} }
if(m_logging != obj.m_logging){ if(m_max_rows != obj.m_max_rows)
{
DBUG_PRINT("info",("m_max_rows %d != %d",(int32)m_max_rows,
(int32)obj.m_max_rows));
DBUG_RETURN(false);
}
if(m_default_no_part_flag != obj.m_default_no_part_flag)
{
DBUG_PRINT("info",("m_default_no_part_flag %d != %d",m_default_no_part_flag,
obj.m_default_no_part_flag));
DBUG_RETURN(false);
}
if(m_logging != obj.m_logging)
{
DBUG_PRINT("info",("m_logging %d != %d",m_logging,obj.m_logging)); DBUG_PRINT("info",("m_logging %d != %d",m_logging,obj.m_logging));
DBUG_RETURN(false); DBUG_RETURN(false);
} }
if(m_kvalue != obj.m_kvalue){ if(m_row_gci != obj.m_row_gci)
{
DBUG_PRINT("info",("m_row_gci %d != %d",m_row_gci,obj.m_row_gci));
DBUG_RETURN(false);
}
if(m_row_checksum != obj.m_row_checksum)
{
DBUG_PRINT("info",("m_row_checksum %d != %d",m_row_checksum,
obj.m_row_checksum));
DBUG_RETURN(false);
}
if(m_kvalue != obj.m_kvalue)
{
DBUG_PRINT("info",("m_kvalue %d != %d",m_kvalue,obj.m_kvalue)); DBUG_PRINT("info",("m_kvalue %d != %d",m_kvalue,obj.m_kvalue));
DBUG_RETURN(false); DBUG_RETURN(false);
} }
if(m_minLoadFactor != obj.m_minLoadFactor){ if(m_minLoadFactor != obj.m_minLoadFactor)
DBUG_PRINT("info",("m_minLoadFactor %d != %d",m_minLoadFactor,obj.m_minLoadFactor)); {
DBUG_PRINT("info",("m_minLoadFactor %d != %d",m_minLoadFactor,
obj.m_minLoadFactor));
DBUG_RETURN(false); DBUG_RETURN(false);
} }
if(m_maxLoadFactor != obj.m_maxLoadFactor){ if(m_maxLoadFactor != obj.m_maxLoadFactor)
DBUG_PRINT("info",("m_maxLoadFactor %d != %d",m_maxLoadFactor,obj.m_maxLoadFactor)); {
DBUG_PRINT("info",("m_maxLoadFactor %d != %d",m_maxLoadFactor,
obj.m_maxLoadFactor));
DBUG_RETURN(false); DBUG_RETURN(false);
} }
if(m_tablespace_id != obj.m_tablespace_id)
{
DBUG_PRINT("info",("m_tablespace_id %d != %d",m_tablespace_id,
obj.m_tablespace_id));
DBUG_RETURN(false);
}
if(m_tablespace_version != obj.m_tablespace_version)
{
DBUG_PRINT("info",("m_tablespace_version %d != %d",m_tablespace_version,
obj.m_tablespace_version));
DBUG_RETURN(false);
}
if(m_id != obj.m_id)
{
DBUG_PRINT("info",("m_id %d != %d",m_id,obj.m_id));
DBUG_RETURN(false);
}
if(m_version != obj.m_version)
{
DBUG_PRINT("info",("m_version %d != %d",m_version,obj.m_version));
DBUG_RETURN(false);
}
if(m_type != obj.m_type)
{
DBUG_PRINT("info",("m_type %d != %d",m_type,obj.m_type));
DBUG_RETURN(false);
}
if (m_type == NdbDictionary::Object::UniqueHashIndex ||
m_type == NdbDictionary::Object::OrderedIndex)
{
if(m_primaryTableId != obj.m_primaryTableId)
{
DBUG_PRINT("info",("m_primaryTableId %d != %d",m_primaryTableId,
obj.m_primaryTableId));
DBUG_RETURN(false);
}
if (m_indexType != obj.m_indexType)
{
DBUG_PRINT("info",("m_indexType %d != %d",m_indexType,obj.m_indexType));
DBUG_RETURN(false);
}
if(strcmp(m_primaryTable.c_str(), obj.m_primaryTable.c_str()) != 0)
{
DBUG_PRINT("info",("m_primaryTable %s != %s",
m_primaryTable.c_str(),obj.m_primaryTable.c_str()));
DBUG_RETURN(false);
}
}
DBUG_RETURN(true); DBUG_RETURN(true);
} }
void void
NdbTableImpl::assign(const NdbTableImpl& org) NdbTableImpl::assign(const NdbTableImpl& org)
{ {
/* m_changeMask intentionally not copied */
m_primaryTableId = org.m_primaryTableId;
m_internalName.assign(org.m_internalName); m_internalName.assign(org.m_internalName);
updateMysqlName(); updateMysqlName();
// If the name has been explicitly set, use that name // If the name has been explicitly set, use that name
@ -467,10 +609,21 @@ NdbTableImpl::assign(const NdbTableImpl& org)
else else
m_externalName.assign(org.m_externalName); m_externalName.assign(org.m_externalName);
m_frm.assign(org.m_frm.get_data(), org.m_frm.length()); m_frm.assign(org.m_frm.get_data(), org.m_frm.length());
m_ng.assign(org.m_ng.get_data(), org.m_ng.length()); m_ts_name.assign(org.m_ts_name.get_data(), org.m_ts_name.length());
m_fragmentType = org.m_fragmentType; m_new_ts_name.assign(org.m_new_ts_name.get_data(),
m_fragmentCount = org.m_fragmentCount; org.m_new_ts_name.length());
m_ts.assign(org.m_ts.get_data(), org.m_ts.length());
m_new_ts.assign(org.m_new_ts.get_data(), org.m_new_ts.length());
m_fd.assign(org.m_fd.get_data(), org.m_fd.length());
m_new_fd.assign(org.m_new_fd.get_data(), org.m_new_fd.length());
m_range.assign(org.m_range.get_data(), org.m_range.length());
m_new_range.assign(org.m_new_range.get_data(), org.m_new_range.length());
m_fragmentType = org.m_fragmentType;
/*
m_columnHashMask, m_columnHash, m_hashValueMask, m_hashpointerValue
is state calculated by computeAggregates and buildColumnHash
*/
for(unsigned i = 0; i<org.m_columns.size(); i++){ for(unsigned i = 0; i<org.m_columns.size(); i++){
NdbColumnImpl * col = new NdbColumnImpl(); NdbColumnImpl * col = new NdbColumnImpl();
const NdbColumnImpl * iorg = org.m_columns[i]; const NdbColumnImpl * iorg = org.m_columns[i];
@ -478,19 +631,30 @@ NdbTableImpl::assign(const NdbTableImpl& org)
m_columns.push_back(col); m_columns.push_back(col);
} }
m_fragments = org.m_fragments;
m_max_rows = org.m_max_rows;
m_default_no_part_flag = org.m_default_no_part_flag;
m_logging = org.m_logging; m_logging = org.m_logging;
m_row_gci = org.m_row_gci;
m_row_checksum = org.m_row_checksum;
m_kvalue = org.m_kvalue; m_kvalue = org.m_kvalue;
m_minLoadFactor = org.m_minLoadFactor; m_minLoadFactor = org.m_minLoadFactor;
m_maxLoadFactor = org.m_maxLoadFactor; m_maxLoadFactor = org.m_maxLoadFactor;
m_keyLenInWords = org.m_keyLenInWords;
m_fragmentCount = org.m_fragmentCount;
if (m_index != 0) if (m_index != 0)
delete m_index; delete m_index;
m_index = org.m_index; m_index = org.m_index;
m_noOfDistributionKeys = org.m_noOfDistributionKeys; m_primaryTable = org.m_primaryTable;
m_indexType = org.m_indexType;
m_noOfKeys = org.m_noOfKeys; m_noOfKeys = org.m_noOfKeys;
m_keyLenInWords = org.m_keyLenInWords; m_noOfDistributionKeys = org.m_noOfDistributionKeys;
m_noOfBlobs = org.m_noOfBlobs; m_noOfBlobs = org.m_noOfBlobs;
m_replicaCount = org.m_replicaCount;
m_id = org.m_id; m_id = org.m_id;
m_version = org.m_version; m_version = org.m_version;
@ -575,6 +739,39 @@ NdbTableImpl::computeAggregates()
} }
} }
const void*
NdbTableImpl::getTablespaceNames() const
{
if (m_new_ts_name.empty())
return m_ts_name.get_data();
else
return m_new_ts_name.get_data();
}
Uint32
NdbTableImpl::getTablespaceNamesLen() const
{
if (m_new_ts_name.empty())
return m_ts_name.length();
else
return m_new_ts_name.length();
}
void NdbTableImpl::setTablespaceNames(const void *data, Uint32 len)
{
m_new_ts_name.assign(data, len);
}
void NdbTableImpl::setFragmentCount(Uint32 count)
{
m_fragmentCount= count;
}
Uint32 NdbTableImpl::getFragmentCount() const
{
return m_fragmentCount;
}
void NdbTableImpl::setFrm(const void* data, Uint32 len) void NdbTableImpl::setFrm(const void* data, Uint32 len)
{ {
m_newFrm.assign(data, len); m_newFrm.assign(data, len);
@ -598,6 +795,75 @@ NdbTableImpl::getFrmLength() const
return m_newFrm.length(); return m_newFrm.length();
} }
void NdbTableImpl::setFragmentData(const void* data, Uint32 len)
{
m_new_fd.assign(data, len);
}
const void *
NdbTableImpl::getFragmentData() const
{
if (m_new_fd.empty())
return m_fd.get_data();
else
return m_new_fd.get_data();
}
Uint32
NdbTableImpl::getFragmentDataLen() const
{
if (m_new_fd.empty())
return m_fd.length();
else
return m_new_fd.length();
}
void NdbTableImpl::setTablespaceData(const void* data, Uint32 len)
{
m_new_ts.assign(data, len);
}
const void *
NdbTableImpl::getTablespaceData() const
{
if (m_new_ts.empty())
return m_ts.get_data();
else
return m_new_ts.get_data();
}
Uint32
NdbTableImpl::getTablespaceDataLen() const
{
if (m_new_ts.empty())
return m_ts.length();
else
return m_new_ts.length();
}
void NdbTableImpl::setRangeListData(const void* data, Uint32 len)
{
m_new_range.assign(data, len);
}
const void *
NdbTableImpl::getRangeListData() const
{
if (m_new_range.empty())
return m_range.get_data();
else
return m_new_range.get_data();
}
Uint32
NdbTableImpl::getRangeListDataLen() const
{
if (m_new_range.empty())
return m_range.length();
else
return m_new_range.length();
}
void void
NdbTableImpl::updateMysqlName() NdbTableImpl::updateMysqlName()
{ {
@ -1512,59 +1778,82 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
const Uint32 * data, Uint32 len, const Uint32 * data, Uint32 len,
bool fullyQualifiedNames) bool fullyQualifiedNames)
{ {
SimplePropertiesLinearReader it(data, len);
DictTabInfo::Table *tableDesc;
SimpleProperties::UnpackStatus s;
DBUG_ENTER("NdbDictInterface::parseTableInfo"); DBUG_ENTER("NdbDictInterface::parseTableInfo");
SimplePropertiesLinearReader it(data, len); tableDesc = (DictTabInfo::Table*)NdbMem_Allocate(sizeof(DictTabInfo::Table));
DictTabInfo::Table tableDesc; tableDesc.init(); if (!tableDesc)
SimpleProperties::UnpackStatus s; {
s = SimpleProperties::unpack(it, &tableDesc, DBUG_RETURN(4000);
}
tableDesc->init();
s = SimpleProperties::unpack(it, tableDesc,
DictTabInfo::TableMapping, DictTabInfo::TableMapping,
DictTabInfo::TableMappingSize, DictTabInfo::TableMappingSize,
true, true); true, true);
if(s != SimpleProperties::Break){ if(s != SimpleProperties::Break){
NdbMem_Free((void*)tableDesc);
DBUG_RETURN(703); DBUG_RETURN(703);
} }
const char * internalName = tableDesc.TableName; const char * internalName = tableDesc->TableName;
const char * externalName = Ndb::externalizeTableName(internalName, fullyQualifiedNames); const char * externalName = Ndb::externalizeTableName(internalName, fullyQualifiedNames);
NdbTableImpl * impl = new NdbTableImpl(); NdbTableImpl * impl = new NdbTableImpl();
impl->m_id = tableDesc.TableId; impl->m_id = tableDesc->TableId;
impl->m_version = tableDesc.TableVersion; impl->m_version = tableDesc->TableVersion;
impl->m_status = NdbDictionary::Object::Retrieved; impl->m_status = NdbDictionary::Object::Retrieved;
impl->m_internalName.assign(internalName); impl->m_internalName.assign(internalName);
impl->updateMysqlName(); impl->updateMysqlName();
impl->m_externalName.assign(externalName); impl->m_externalName.assign(externalName);
impl->m_frm.assign(tableDesc.FrmData, tableDesc.FrmLen); impl->m_frm.assign(tableDesc->FrmData, tableDesc->FrmLen);
impl->m_ng.assign(tableDesc.FragmentData, tableDesc.FragmentDataLen); impl->m_fd.assign(tableDesc->FragmentData, tableDesc->FragmentDataLen);
impl->m_range.assign(tableDesc->RangeListData, tableDesc->RangeListDataLen);
impl->m_fragmentCount = tableDesc->FragmentCount;
/*
We specifically don't get tablespace data and range/list arrays here
since those are known by the MySQL Server through analysing the
frm file.
Fragment Data contains the real node group mapping and the fragment
identities used for each fragment. At the moment we have no need for
this.
Frm file is needed for autodiscovery.
*/
impl->m_fragmentType = (NdbDictionary::Object::FragmentType) impl->m_fragmentType = (NdbDictionary::Object::FragmentType)
getApiConstant(tableDesc.FragmentType, getApiConstant(tableDesc->FragmentType,
fragmentTypeMapping, fragmentTypeMapping,
(Uint32)NdbDictionary::Object::FragUndefined); (Uint32)NdbDictionary::Object::FragUndefined);
impl->m_logging = tableDesc.TableLoggedFlag; Uint64 max_rows = ((Uint64)tableDesc->MaxRowsHigh) << 32;
impl->m_row_gci = tableDesc.RowGCIFlag; max_rows += tableDesc->MaxRowsLow;
impl->m_row_checksum = tableDesc.RowChecksumFlag; impl->m_max_rows = max_rows;
impl->m_kvalue = tableDesc.TableKValue; impl->m_default_no_part_flag = tableDesc->DefaultNoPartFlag;
impl->m_minLoadFactor = tableDesc.MinLoadFactor; impl->m_logging = tableDesc->TableLoggedFlag;
impl->m_maxLoadFactor = tableDesc.MaxLoadFactor; impl->m_row_gci = tableDesc->RowGCIFlag;
impl->m_row_checksum = tableDesc->RowChecksumFlag;
impl->m_kvalue = tableDesc->TableKValue;
impl->m_minLoadFactor = tableDesc->MinLoadFactor;
impl->m_maxLoadFactor = tableDesc->MaxLoadFactor;
impl->m_indexType = (NdbDictionary::Object::Type) impl->m_indexType = (NdbDictionary::Object::Type)
getApiConstant(tableDesc.TableType, getApiConstant(tableDesc->TableType,
indexTypeMapping, indexTypeMapping,
NdbDictionary::Object::TypeUndefined); NdbDictionary::Object::TypeUndefined);
if(impl->m_indexType == NdbDictionary::Object::TypeUndefined){ if(impl->m_indexType == NdbDictionary::Object::TypeUndefined){
} else { } else {
const char * externalPrimary = const char * externalPrimary =
Ndb::externalizeTableName(tableDesc.PrimaryTable, fullyQualifiedNames); Ndb::externalizeTableName(tableDesc->PrimaryTable, fullyQualifiedNames);
impl->m_primaryTable.assign(externalPrimary); impl->m_primaryTable.assign(externalPrimary);
} }
Uint32 i; Uint32 i;
for(i = 0; i < tableDesc.NoOfAttributes; i++) { for(i = 0; i < tableDesc->NoOfAttributes; i++) {
DictTabInfo::Attribute attrDesc; attrDesc.init(); DictTabInfo::Attribute attrDesc; attrDesc.init();
s = SimpleProperties::unpack(it, s = SimpleProperties::unpack(it,
&attrDesc, &attrDesc,
@ -1573,6 +1862,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
true, true); true, true);
if(s != SimpleProperties::Break){ if(s != SimpleProperties::Break){
delete impl; delete impl;
NdbMem_Free((void*)tableDesc);
DBUG_RETURN(703); DBUG_RETURN(703);
} }
@ -1583,6 +1873,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
// check type and compute attribute size and array size // check type and compute attribute size and array size
if (! attrDesc.translateExtType()) { if (! attrDesc.translateExtType()) {
delete impl; delete impl;
NdbMem_Free((void*)tableDesc);
DBUG_RETURN(703); DBUG_RETURN(703);
} }
col->m_type = (NdbDictionary::Column::Type)attrDesc.AttributeExtType; col->m_type = (NdbDictionary::Column::Type)attrDesc.AttributeExtType;
@ -1594,12 +1885,14 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
// charset is defined exactly for char types // charset is defined exactly for char types
if (col->getCharType() != (cs_number != 0)) { if (col->getCharType() != (cs_number != 0)) {
delete impl; delete impl;
NdbMem_Free((void*)tableDesc);
DBUG_RETURN(703); DBUG_RETURN(703);
} }
if (col->getCharType()) { if (col->getCharType()) {
col->m_cs = get_charset(cs_number, MYF(0)); col->m_cs = get_charset(cs_number, MYF(0));
if (col->m_cs == NULL) { if (col->m_cs == NULL) {
delete impl; delete impl;
NdbMem_Free((void*)tableDesc);
DBUG_RETURN(743); DBUG_RETURN(743);
} }
} }
@ -1627,17 +1920,17 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
impl->computeAggregates(); impl->computeAggregates();
if(tableDesc.FragmentDataLen > 0) if(tableDesc->ReplicaDataLen > 0)
{ {
Uint16 replicaCount = tableDesc.FragmentData[0]; Uint16 replicaCount = tableDesc->ReplicaData[0];
Uint16 fragCount = tableDesc.FragmentData[1]; Uint16 fragCount = tableDesc->ReplicaData[1];
impl->m_replicaCount = replicaCount; impl->m_replicaCount = replicaCount;
impl->m_fragmentCount = fragCount; impl->m_fragmentCount = fragCount;
DBUG_PRINT("info", ("replicaCount=%x , fragCount=%x",replicaCount,fragCount)); DBUG_PRINT("info", ("replicaCount=%x , fragCount=%x",replicaCount,fragCount));
for(i = 0; i < (Uint32) (fragCount*replicaCount); i++) for(i = 0; i < (Uint32) (fragCount*replicaCount); i++)
{ {
impl->m_fragments.push_back(tableDesc.FragmentData[i+2]); impl->m_fragments.push_back(tableDesc->ReplicaData[i+2]);
} }
Uint32 topBit = (1 << 31); Uint32 topBit = (1 << 31);
@ -1649,17 +1942,18 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
} }
else else
{ {
impl->m_fragmentCount = tableDesc.FragmentCount; impl->m_fragmentCount = tableDesc->FragmentCount;
impl->m_replicaCount = 0; impl->m_replicaCount = 0;
impl->m_hashValueMask = 0; impl->m_hashValueMask = 0;
impl->m_hashpointerValue = 0; impl->m_hashpointerValue = 0;
} }
impl->m_tablespace_id = tableDesc.TablespaceId; impl->m_tablespace_id = tableDesc->TablespaceId;
impl->m_tablespace_version = tableDesc.TablespaceVersion; impl->m_tablespace_version = tableDesc->TablespaceVersion;
* ret = impl; * ret = impl;
NdbMem_Free((void*)tableDesc);
DBUG_ASSERT(impl->m_fragmentCount > 0); DBUG_ASSERT(impl->m_fragmentCount > 0);
DBUG_RETURN(0); DBUG_RETURN(0);
} }
@ -1800,8 +2094,9 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
NdbTableImpl & impl, NdbTableImpl & impl,
bool alter) bool alter)
{ {
DBUG_ENTER("NdbDictInterface::createOrAlterTable");
unsigned i; unsigned i;
char *ts_names[MAX_NDB_PARTITIONS];
DBUG_ENTER("NdbDictInterface::createOrAlterTable");
impl.computeAggregates(); impl.computeAggregates();
@ -1827,7 +2122,8 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
impl.m_newExternalName.clear(); impl.m_newExternalName.clear();
} }
// Definition change (frm) // Definition change (frm)
if (!impl.m_newFrm.empty()) { if (!impl.m_newFrm.empty())
{
if (alter) if (alter)
{ {
AlterTableReq::setFrmFlag(impl.m_changeMask, true); AlterTableReq::setFrmFlag(impl.m_changeMask, true);
@ -1835,6 +2131,55 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
impl.m_frm.assign(impl.m_newFrm.get_data(), impl.m_newFrm.length()); impl.m_frm.assign(impl.m_newFrm.get_data(), impl.m_newFrm.length());
impl.m_newFrm.clear(); impl.m_newFrm.clear();
} }
// Change FragmentData (fragment identity, state, tablespace id)
if (!impl.m_new_fd.empty())
{
if (alter)
{
AlterTableReq::setFragDataFlag(impl.m_changeMask, true);
}
impl.m_fd.assign(impl.m_new_fd.get_data(), impl.m_new_fd.length());
impl.m_new_fd.clear();
}
// Change Tablespace Name Data
if (!impl.m_new_ts_name.empty())
{
if (alter)
{
AlterTableReq::setTsNameFlag(impl.m_changeMask, true);
}
impl.m_ts_name.assign(impl.m_new_ts_name.get_data(),
impl.m_new_ts_name.length());
impl.m_new_ts_name.clear();
}
// Change Range/List Data
if (!impl.m_new_range.empty())
{
if (alter)
{
AlterTableReq::setRangeListFlag(impl.m_changeMask, true);
}
impl.m_range.assign(impl.m_new_range.get_data(),
impl.m_new_range.length());
impl.m_new_range.clear();
}
// Change Tablespace Data
if (!impl.m_new_ts.empty())
{
if (alter)
{
AlterTableReq::setTsFlag(impl.m_changeMask, true);
}
impl.m_ts.assign(impl.m_new_ts.get_data(),
impl.m_new_ts.length());
impl.m_new_ts.clear();
}
/*
TODO RONM: Here I need to insert checks for fragment array and
range or list array
*/
//validate(); //validate();
//aggregate(); //aggregate();
@ -1843,10 +2188,17 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
ndb.internalize_table_name(impl.m_externalName.c_str())); ndb.internalize_table_name(impl.m_externalName.c_str()));
impl.m_internalName.assign(internalName); impl.m_internalName.assign(internalName);
impl.updateMysqlName(); impl.updateMysqlName();
DictTabInfo::Table tmpTab; DictTabInfo::Table *tmpTab;
tmpTab.init();
BaseString::snprintf(tmpTab.TableName, tmpTab = (DictTabInfo::Table*)NdbMem_Allocate(sizeof(DictTabInfo::Table));
sizeof(tmpTab.TableName), if (!tmpTab)
{
m_error.code = 4000;
DBUG_RETURN(-1);
}
tmpTab->init();
BaseString::snprintf(tmpTab->TableName,
sizeof(tmpTab->TableName),
internalName.c_str()); internalName.c_str());
bool haveAutoIncrement = false; bool haveAutoIncrement = false;
@ -1859,6 +2211,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
if (col->m_autoIncrement) { if (col->m_autoIncrement) {
if (haveAutoIncrement) { if (haveAutoIncrement) {
m_error.code= 4335; m_error.code= 4335;
NdbMem_Free((void*)tmpTab);
DBUG_RETURN(-1); DBUG_RETURN(-1);
} }
haveAutoIncrement = true; haveAutoIncrement = true;
@ -1877,35 +2230,88 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
// Check max length of frm data // Check max length of frm data
if (impl.m_frm.length() > MAX_FRM_DATA_SIZE){ if (impl.m_frm.length() > MAX_FRM_DATA_SIZE){
m_error.code= 1229; m_error.code= 1229;
NdbMem_Free((void*)tmpTab);
DBUG_RETURN(-1); DBUG_RETURN(-1);
} }
tmpTab.FrmLen = impl.m_frm.length(); /*
memcpy(tmpTab.FrmData, impl.m_frm.get_data(), impl.m_frm.length()); TODO RONM: This needs to change to dynamic arrays instead
tmpTab.FragmentDataLen = impl.m_ng.length(); Frm Data, FragmentData, TablespaceData, RangeListData, TsNameData
memcpy(tmpTab.FragmentData, impl.m_ng.get_data(), impl.m_ng.length()); */
tmpTab->FrmLen = impl.m_frm.length();
memcpy(tmpTab->FrmData, impl.m_frm.get_data(), impl.m_frm.length());
tmpTab.TableLoggedFlag = impl.m_logging; tmpTab->FragmentDataLen = impl.m_fd.length();
tmpTab.RowGCIFlag = impl.m_row_gci; memcpy(tmpTab->FragmentData, impl.m_fd.get_data(), impl.m_fd.length());
tmpTab.RowChecksumFlag = impl.m_row_checksum;
tmpTab.TableLoggedFlag = impl.m_logging;
tmpTab.TableKValue = impl.m_kvalue;
tmpTab.MinLoadFactor = impl.m_minLoadFactor;
tmpTab.MaxLoadFactor = impl.m_maxLoadFactor;
tmpTab.TableType = DictTabInfo::UserTable;
tmpTab.PrimaryTableId = impl.m_primaryTableId;
tmpTab.NoOfAttributes = sz;
tmpTab.FragmentType = getKernelConstant(impl.m_fragmentType,
fragmentTypeMapping,
DictTabInfo::AllNodesSmallTable);
tmpTab.TableVersion = rand();
const char* tablespace_name= impl.m_tablespace_name.c_str(); tmpTab->TablespaceDataLen = impl.m_ts.length();
memcpy(tmpTab->TablespaceData, impl.m_ts.get_data(), impl.m_ts.length());
tmpTab->RangeListDataLen = impl.m_range.length();
memcpy(tmpTab->RangeListData, impl.m_range.get_data(),
impl.m_range.length());
memcpy(ts_names, impl.m_ts_name.get_data(),
impl.m_ts_name.length());
tmpTab->FragmentCount= impl.m_fragmentCount;
tmpTab->TableLoggedFlag = impl.m_logging;
tmpTab->RowGCIFlag = impl.m_row_gci;
tmpTab->RowChecksumFlag = impl.m_row_checksum;
tmpTab->TableKValue = impl.m_kvalue;
tmpTab->MinLoadFactor = impl.m_minLoadFactor;
tmpTab->MaxLoadFactor = impl.m_maxLoadFactor;
tmpTab->TableType = DictTabInfo::UserTable;
tmpTab->PrimaryTableId = impl.m_primaryTableId;
tmpTab->NoOfAttributes = sz;
tmpTab->MaxRowsHigh = (Uint32)(impl.m_max_rows >> 32);
tmpTab->MaxRowsLow = (Uint32)(impl.m_max_rows & 0xFFFFFFFF);
tmpTab->DefaultNoPartFlag = impl.m_default_no_part_flag;
if (impl.m_ts_name.length())
{
char **ts_name_ptr= (char**)ts_names;
i= 0;
do
{
NdbTablespaceImpl tmp;
if (*ts_name_ptr)
{
if(get_filegroup(tmp, NdbDictionary::Object::Tablespace,
(const char*)*ts_name_ptr) == 0)
{
tmpTab->TablespaceData[2*i] = tmp.m_id;
tmpTab->TablespaceData[2*i + 1] = tmp.m_version;
}
else
{
NdbMem_Free((void*)tmpTab);
DBUG_RETURN(-1);
}
}
else
{
/*
No tablespace used, set tablespace id to NULL
*/
tmpTab->TablespaceData[2*i] = RNIL;
tmpTab->TablespaceData[2*i + 1] = 0;
}
ts_name_ptr++;
} while (++i < tmpTab->FragmentCount);
tmpTab->TablespaceDataLen= 4*i;
}
tmpTab->FragmentType = getKernelConstant(impl.m_fragmentType,
fragmentTypeMapping,
DictTabInfo::AllNodesSmallTable);
tmpTab->TableVersion = rand();
const char *tablespace_name= impl.m_tablespace_name.c_str();
loop: loop:
if(impl.m_tablespace_id != ~(Uint32)0) if(impl.m_tablespace_id != ~(Uint32)0)
{ {
tmpTab.TablespaceId = impl.m_tablespace_id; tmpTab->TablespaceId = impl.m_tablespace_id;
tmpTab.TablespaceVersion = impl.m_tablespace_version; tmpTab->TablespaceVersion = impl.m_tablespace_version;
} }
else if(strlen(tablespace_name)) else if(strlen(tablespace_name))
{ {
@ -1913,13 +2319,14 @@ loop:
if(get_filegroup(tmp, NdbDictionary::Object::Tablespace, if(get_filegroup(tmp, NdbDictionary::Object::Tablespace,
tablespace_name) == 0) tablespace_name) == 0)
{ {
tmpTab.TablespaceId = tmp.m_id; tmpTab->TablespaceId = tmp.m_id;
tmpTab.TablespaceVersion = tmp.m_version; tmpTab->TablespaceVersion = tmp.m_version;
} }
else else
{ {
// error set by get filegroup // error set by get filegroup
return -1; NdbMem_Free((void*)tmpTab);
DBUG_RETURN(-1);
} }
} }
else else
@ -1937,13 +2344,14 @@ loop:
UtilBufferWriter w(m_buffer); UtilBufferWriter w(m_buffer);
SimpleProperties::UnpackStatus s; SimpleProperties::UnpackStatus s;
s = SimpleProperties::pack(w, s = SimpleProperties::pack(w,
&tmpTab, tmpTab,
DictTabInfo::TableMapping, DictTabInfo::TableMapping,
DictTabInfo::TableMappingSize, true); DictTabInfo::TableMappingSize, true);
if(s != SimpleProperties::Eof){ if(s != SimpleProperties::Eof){
abort(); abort();
} }
NdbMem_Free((void*)tmpTab);
DBUG_PRINT("info",("impl.m_noOfDistributionKeys: %d impl.m_noOfKeys: %d distKeys: %d", DBUG_PRINT("info",("impl.m_noOfDistributionKeys: %d impl.m_noOfKeys: %d distKeys: %d",
impl.m_noOfDistributionKeys, impl.m_noOfKeys, distKeys)); impl.m_noOfDistributionKeys, impl.m_noOfKeys, distKeys));
@ -2053,7 +2461,7 @@ loop:
if(m_error.code == AlterTableRef::InvalidTableVersion) { if(m_error.code == AlterTableRef::InvalidTableVersion) {
// Clear caches and try again // Clear caches and try again
return INCOMPATIBLE_VERSION; DBUG_RETURN(INCOMPATIBLE_VERSION);
} }
} else { } else {
tSignal.theVerId_signalNumber = GSN_CREATE_TABLE_REQ; tSignal.theVerId_signalNumber = GSN_CREATE_TABLE_REQ;

View File

@ -120,9 +120,24 @@ public:
void init(); void init();
void setName(const char * name); void setName(const char * name);
const char * getName() const; const char * getName() const;
void setFragmentCount(Uint32 count);
Uint32 getFragmentCount() const;
void setFrm(const void* data, Uint32 len); void setFrm(const void* data, Uint32 len);
const void * getFrmData() const; const void * getFrmData() const;
Uint32 getFrmLength() const; Uint32 getFrmLength() const;
void setFragmentData(const void* data, Uint32 len);
const void * getFragmentData() const;
Uint32 getFragmentDataLen() const;
void setTablespaceNames(const void* data, Uint32 len);
Uint32 getTablespaceNamesLen() const;
const void * getTablespaceNames() const;
void setTablespaceData(const void* data, Uint32 len);
const void * getTablespaceData() const;
Uint32 getTablespaceDataLen() const;
void setRangeListData(const void* data, Uint32 len);
const void * getRangeListData() const;
Uint32 getRangeListDataLen() const;
const char * getMysqlName() const; const char * getMysqlName() const;
void updateMysqlName(); void updateMysqlName();
@ -133,8 +148,15 @@ public:
BaseString m_mysqlName; BaseString m_mysqlName;
BaseString m_newExternalName; // Used for alter table BaseString m_newExternalName; // Used for alter table
UtilBuffer m_frm; UtilBuffer m_frm;
UtilBuffer m_newFrm; // Used for alter table UtilBuffer m_newFrm; // Used for alter table
UtilBuffer m_ng; UtilBuffer m_ts_name; //Tablespace Names
UtilBuffer m_new_ts_name; //Tablespace Names
UtilBuffer m_ts; //TablespaceData
UtilBuffer m_new_ts; //TablespaceData
UtilBuffer m_fd; //FragmentData
UtilBuffer m_new_fd; //FragmentData
UtilBuffer m_range; //Range Or List Array
UtilBuffer m_new_range; //Range Or List Array
NdbDictionary::Object::FragmentType m_fragmentType; NdbDictionary::Object::FragmentType m_fragmentType;
/** /**
@ -153,6 +175,8 @@ public:
Uint32 m_hashpointerValue; Uint32 m_hashpointerValue;
Vector<Uint16> m_fragments; Vector<Uint16> m_fragments;
Uint64 m_max_rows;
Uint32 m_default_no_part_flag;
bool m_logging; bool m_logging;
bool m_row_gci; bool m_row_gci;
bool m_row_checksum; bool m_row_checksum;
@ -162,7 +186,6 @@ public:
Uint16 m_keyLenInWords; Uint16 m_keyLenInWords;
Uint16 m_fragmentCount; Uint16 m_fragmentCount;
NdbDictionaryImpl * m_dictionary;
NdbIndexImpl * m_index; NdbIndexImpl * m_index;
NdbColumnImpl * getColumn(unsigned attrId); NdbColumnImpl * getColumn(unsigned attrId);
NdbColumnImpl * getColumn(const char * name); NdbColumnImpl * getColumn(const char * name);

View File

@ -334,7 +334,7 @@ int runCreateShadowTable(NDBT_Context* ctx, NDBT_Step* step)
table_shadow.setName(buf); table_shadow.setName(buf);
// TODO should be removed // TODO should be removed
// This should work wo/ next line // This should work wo/ next line
table_shadow.setNodeGroupIds(0, 0); //table_shadow.setNodeGroupIds(0, 0);
GETNDB(step)->getDictionary()->createTable(table_shadow); GETNDB(step)->getDictionary()->createTable(table_shadow);
if (GETNDB(step)->getDictionary()->getTable(buf)) if (GETNDB(step)->getDictionary()->getTable(buf))
return NDBT_OK; return NDBT_OK;

View File

@ -336,7 +336,7 @@ RestoreMetaData::parseTableDescriptor(const Uint32 * data, Uint32 len)
return false; return false;
debug << "parseTableInfo " << tableImpl->getName() << " done" << endl; debug << "parseTableInfo " << tableImpl->getName() << " done" << endl;
tableImpl->m_ng.clear(); tableImpl->m_fd.clear();
tableImpl->m_fragmentType = NdbDictionary::Object::FragAllSmall; tableImpl->m_fragmentType = NdbDictionary::Object::FragAllSmall;
TableS * table = new TableS(m_fileHeader.NdbVersion, tableImpl); TableS * table = new TableS(m_fileHeader.NdbVersion, tableImpl);
if(table == NULL) { if(table == NULL) {