Merge bk-internal.mysql.com:/data0/bk/mysql-5.1
into bk-internal.mysql.com:/data0/bk/mysql-5.1-kt mysql-test/r/partition_range.result: Auto merged sql/sql_show.cc: Auto merged
This commit is contained in:
commit
af17f788b5
@ -278,6 +278,7 @@ bkpush.log
|
|||||||
bkpush.log*
|
bkpush.log*
|
||||||
build.log
|
build.log
|
||||||
build_tags.sh
|
build_tags.sh
|
||||||
|
client/#mysql.cc#
|
||||||
client/*.ds?
|
client/*.ds?
|
||||||
client/*.vcproj
|
client/*.vcproj
|
||||||
client/completion_hash.cpp
|
client/completion_hash.cpp
|
||||||
@ -1181,7 +1182,9 @@ sql/pack.c
|
|||||||
sql/safe_to_cache_query.txt
|
sql/safe_to_cache_query.txt
|
||||||
sql/share/*.sys
|
sql/share/*.sys
|
||||||
sql/share/charsets/gmon.out
|
sql/share/charsets/gmon.out
|
||||||
|
sql/share/fixerrmsg.pl
|
||||||
sql/share/gmon.out
|
sql/share/gmon.out
|
||||||
|
sql/share/iso639-2.txt
|
||||||
sql/share/mysql
|
sql/share/mysql
|
||||||
sql/share/norwegian-ny/errmsg.sys
|
sql/share/norwegian-ny/errmsg.sys
|
||||||
sql/share/norwegian/errmsg.sys
|
sql/share/norwegian/errmsg.sys
|
||||||
|
@ -144,6 +144,10 @@ SOURCE=.\readline.cpp
|
|||||||
# End Source File
|
# End Source File
|
||||||
# Begin Source File
|
# Begin Source File
|
||||||
|
|
||||||
|
SOURCE=..\mysys\my_conio.c
|
||||||
|
# End Source File
|
||||||
|
# Begin Source File
|
||||||
|
|
||||||
SOURCE=.\sql_string.cpp
|
SOURCE=.\sql_string.cpp
|
||||||
# End Source File
|
# End Source File
|
||||||
# End Target
|
# End Target
|
||||||
|
@ -130,6 +130,10 @@ SOURCE=.\readline.cpp
|
|||||||
# End Source File
|
# End Source File
|
||||||
# Begin Source File
|
# Begin Source File
|
||||||
|
|
||||||
|
SOURCE=..\mysys\my_conio.c
|
||||||
|
# End Source File
|
||||||
|
# Begin Source File
|
||||||
|
|
||||||
SOURCE=.\sql_string.cpp
|
SOURCE=.\sql_string.cpp
|
||||||
# End Source File
|
# End Source File
|
||||||
# End Target
|
# End Target
|
||||||
|
@ -852,6 +852,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
|
|||||||
case OPT_NOPAGER:
|
case OPT_NOPAGER:
|
||||||
printf("WARNING: option deprecated; use --disable-pager instead.\n");
|
printf("WARNING: option deprecated; use --disable-pager instead.\n");
|
||||||
opt_nopager= 1;
|
opt_nopager= 1;
|
||||||
|
break;
|
||||||
case OPT_MYSQL_PROTOCOL:
|
case OPT_MYSQL_PROTOCOL:
|
||||||
{
|
{
|
||||||
if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
|
if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
|
||||||
|
@ -1,4 +1,14 @@
|
|||||||
yaSSL Release notes, version 1.3.0 (04/26/06)
|
yaSSL Release notes, version 1.3.5 (06/01/06)
|
||||||
|
|
||||||
|
|
||||||
|
This release of yaSSL contains bug fixes, portability enhancements,
|
||||||
|
better libcurl support, and improved non-blocking I/O.
|
||||||
|
|
||||||
|
See normal build instructions below under 1.0.6.
|
||||||
|
See libcurl build instructions below under 1.3.0.
|
||||||
|
|
||||||
|
|
||||||
|
********************yaSSL Release notes, version 1.3.0 (04/26/06)
|
||||||
|
|
||||||
|
|
||||||
This release of yaSSL contains minor bug fixes, portability enhancements,
|
This release of yaSSL contains minor bug fixes, portability enhancements,
|
||||||
@ -17,8 +27,8 @@ See normal build instructions below under 1.0.6.
|
|||||||
make
|
make
|
||||||
make openssl-links
|
make openssl-links
|
||||||
|
|
||||||
(then go to your libcurl home and tell libcurl about yaSSL)
|
(then go to your libcurl home and tell libcurl about yaSSL build dir)
|
||||||
./configure --with-ssl=/yaSSL-HomeDir
|
./configure --with-ssl=/yaSSL-BuildDir LDFLAGS=-lm
|
||||||
make
|
make
|
||||||
|
|
||||||
|
|
||||||
|
5
extra/yassl/include/openssl/engine.h
Normal file
5
extra/yassl/include/openssl/engine.h
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
/* engine.h for libcurl */
|
||||||
|
|
||||||
|
#undef HAVE_OPENSSL_ENGINE_H
|
||||||
|
|
||||||
|
|
5
extra/yassl/include/openssl/pkcs12.h
Normal file
5
extra/yassl/include/openssl/pkcs12.h
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
/* pkcs12.h for libcurl */
|
||||||
|
|
||||||
|
|
||||||
|
#undef HAVE_OPENSSL_PKCS12_H
|
||||||
|
|
@ -458,6 +458,11 @@ void ProcessOldClientHello(input_buffer& input, SSL& ssl)
|
|||||||
|
|
||||||
uint16 sz = ((b0 & 0x7f) << 8) | b1;
|
uint16 sz = ((b0 & 0x7f) << 8) | b1;
|
||||||
|
|
||||||
|
if (sz > input.get_remaining()) {
|
||||||
|
ssl.SetError(bad_input);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// hashHandShake manually
|
// hashHandShake manually
|
||||||
const opaque* buffer = input.get_buffer() + input.get_current();
|
const opaque* buffer = input.get_buffer() + input.get_current();
|
||||||
ssl.useHashes().use_MD5().update(buffer, sz);
|
ssl.useHashes().use_MD5().update(buffer, sz);
|
||||||
@ -681,25 +686,38 @@ DoProcessReply(SSL& ssl, mySTL::auto_ptr<input_buffer> buffered)
|
|||||||
// old style sslv2 client hello?
|
// old style sslv2 client hello?
|
||||||
if (ssl.getSecurity().get_parms().entity_ == server_end &&
|
if (ssl.getSecurity().get_parms().entity_ == server_end &&
|
||||||
ssl.getStates().getServer() == clientNull)
|
ssl.getStates().getServer() == clientNull)
|
||||||
if (buffer.peek() != handshake)
|
if (buffer.peek() != handshake) {
|
||||||
ProcessOldClientHello(buffer, ssl);
|
ProcessOldClientHello(buffer, ssl);
|
||||||
|
if (ssl.GetError()) {
|
||||||
|
buffered.reset(0);
|
||||||
|
return buffered;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
while(!buffer.eof()) {
|
while(!buffer.eof()) {
|
||||||
// each record
|
// each record
|
||||||
RecordLayerHeader hdr;
|
RecordLayerHeader hdr;
|
||||||
|
bool needHdr = false;
|
||||||
|
|
||||||
|
if (static_cast<uint>(RECORD_HEADER) > buffer.get_remaining())
|
||||||
|
needHdr = true;
|
||||||
|
else {
|
||||||
buffer >> hdr;
|
buffer >> hdr;
|
||||||
ssl.verifyState(hdr);
|
ssl.verifyState(hdr);
|
||||||
|
}
|
||||||
|
|
||||||
// make sure we have enough input in buffer to process this record
|
// make sure we have enough input in buffer to process this record
|
||||||
if (hdr.length_ > buffer.get_remaining()) {
|
if (needHdr || hdr.length_ > buffer.get_remaining()) {
|
||||||
uint sz = buffer.get_remaining() + RECORD_HEADER;
|
// put header in front for next time processing
|
||||||
|
uint extra = needHdr ? 0 : RECORD_HEADER;
|
||||||
|
uint sz = buffer.get_remaining() + extra;
|
||||||
buffered.reset(NEW_YS input_buffer(sz, buffer.get_buffer() +
|
buffered.reset(NEW_YS input_buffer(sz, buffer.get_buffer() +
|
||||||
buffer.get_current() - RECORD_HEADER, sz));
|
buffer.get_current() - extra, sz));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (buffer.get_current() < hdr.length_ + RECORD_HEADER + offset) {
|
while (buffer.get_current() < hdr.length_ + RECORD_HEADER + offset) {
|
||||||
// each message in record
|
// each message in record, can be more than 1 if not encrypted
|
||||||
if (ssl.getSecurity().get_parms().pending_ == false) // cipher on
|
if (ssl.getSecurity().get_parms().pending_ == false) // cipher on
|
||||||
decrypt_message(ssl, buffer, hdr.length_);
|
decrypt_message(ssl, buffer, hdr.length_);
|
||||||
mySTL::auto_ptr<Message> msg(mf.CreateObject(hdr.type_), ysDelete);
|
mySTL::auto_ptr<Message> msg(mf.CreateObject(hdr.type_), ysDelete);
|
||||||
@ -717,7 +735,7 @@ DoProcessReply(SSL& ssl, mySTL::auto_ptr<input_buffer> buffered)
|
|||||||
}
|
}
|
||||||
offset += hdr.length_ + RECORD_HEADER;
|
offset += hdr.length_ + RECORD_HEADER;
|
||||||
}
|
}
|
||||||
return buffered; // done, don't call again
|
return buffered;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -28,6 +28,9 @@
|
|||||||
#ifndef yaSSL_NEW_HPP
|
#ifndef yaSSL_NEW_HPP
|
||||||
#define yaSSL_NEW_HPP
|
#define yaSSL_NEW_HPP
|
||||||
|
|
||||||
|
#ifdef HAVE_CONFIG_H
|
||||||
|
#include "config.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef __sun
|
#ifdef __sun
|
||||||
|
|
||||||
|
@ -400,6 +400,7 @@ inline double ulonglong2double(ulonglong value)
|
|||||||
|
|
||||||
#define FN_LIBCHAR '\\'
|
#define FN_LIBCHAR '\\'
|
||||||
#define FN_ROOTDIR "\\"
|
#define FN_ROOTDIR "\\"
|
||||||
|
#define FN_DEVCHAR ':'
|
||||||
#define FN_NETWORK_DRIVES /* Uses \\ to indicate network drives */
|
#define FN_NETWORK_DRIVES /* Uses \\ to indicate network drives */
|
||||||
#define FN_NO_CASE_SENCE /* Files are not case-sensitive */
|
#define FN_NO_CASE_SENCE /* Files are not case-sensitive */
|
||||||
#define OS_FILE_LIMIT 2048
|
#define OS_FILE_LIMIT 2048
|
||||||
|
@ -681,7 +681,6 @@ typedef SOCKET_SIZE_TYPE size_socket;
|
|||||||
#define FN_HOMELIB '~' /* ~/ is used as abbrev for home dir */
|
#define FN_HOMELIB '~' /* ~/ is used as abbrev for home dir */
|
||||||
#define FN_CURLIB '.' /* ./ is used as abbrev for current dir */
|
#define FN_CURLIB '.' /* ./ is used as abbrev for current dir */
|
||||||
#define FN_PARENTDIR ".." /* Parent directory; Must be a string */
|
#define FN_PARENTDIR ".." /* Parent directory; Must be a string */
|
||||||
#define FN_DEVCHAR ':'
|
|
||||||
|
|
||||||
#ifndef FN_LIBCHAR
|
#ifndef FN_LIBCHAR
|
||||||
#define FN_LIBCHAR '/'
|
#define FN_LIBCHAR '/'
|
||||||
|
@ -99,6 +99,7 @@ enum enum_server_command
|
|||||||
#define GET_FIXED_FIELDS_FLAG (1 << 18) /* Used to get fields in item tree */
|
#define GET_FIXED_FIELDS_FLAG (1 << 18) /* Used to get fields in item tree */
|
||||||
#define FIELD_IN_PART_FUNC_FLAG (1 << 19)/* Field part of partition func */
|
#define FIELD_IN_PART_FUNC_FLAG (1 << 19)/* Field part of partition func */
|
||||||
#define FIELD_IN_ADD_INDEX (1<< 20) /* Intern: Field used in ADD INDEX */
|
#define FIELD_IN_ADD_INDEX (1<< 20) /* Intern: Field used in ADD INDEX */
|
||||||
|
#define FIELD_IS_RENAMED (1<< 21) /* Intern: Field is being renamed */
|
||||||
|
|
||||||
#define REFRESH_GRANT 1 /* Refresh grant tables */
|
#define REFRESH_GRANT 1 /* Refresh grant tables */
|
||||||
#define REFRESH_LOG 2 /* Start on new log file */
|
#define REFRESH_LOG 2 /* Start on new log file */
|
||||||
|
14
mysql-test/r/create_not_windows.result
Normal file
14
mysql-test/r/create_not_windows.result
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
drop table if exists `about:text`;
|
||||||
|
create table `about:text` (
|
||||||
|
_id int not null auto_increment,
|
||||||
|
`about:text` varchar(255) not null default '',
|
||||||
|
primary key (_id)
|
||||||
|
);
|
||||||
|
show create table `about:text`;
|
||||||
|
Table Create Table
|
||||||
|
about:text CREATE TABLE `about:text` (
|
||||||
|
`_id` int(11) NOT NULL AUTO_INCREMENT,
|
||||||
|
`about:text` varchar(255) NOT NULL DEFAULT '',
|
||||||
|
PRIMARY KEY (`_id`)
|
||||||
|
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||||
|
drop table `about:text`;
|
@ -7,20 +7,20 @@ period_add("9602",-12) period_diff(199505,"9404")
|
|||||||
199502 13
|
199502 13
|
||||||
select now()-now(),weekday(curdate())-weekday(now()),unix_timestamp()-unix_timestamp(now());
|
select now()-now(),weekday(curdate())-weekday(now()),unix_timestamp()-unix_timestamp(now());
|
||||||
now()-now() weekday(curdate())-weekday(now()) unix_timestamp()-unix_timestamp(now())
|
now()-now() weekday(curdate())-weekday(now()) unix_timestamp()-unix_timestamp(now())
|
||||||
0 0 0
|
0.000000 0 0
|
||||||
select from_unixtime(unix_timestamp("1994-03-02 10:11:12")),from_unixtime(unix_timestamp("1994-03-02 10:11:12"),"%Y-%m-%d %h:%i:%s"),from_unixtime(unix_timestamp("1994-03-02 10:11:12"))+0;
|
select from_unixtime(unix_timestamp("1994-03-02 10:11:12")),from_unixtime(unix_timestamp("1994-03-02 10:11:12"),"%Y-%m-%d %h:%i:%s"),from_unixtime(unix_timestamp("1994-03-02 10:11:12"))+0;
|
||||||
from_unixtime(unix_timestamp("1994-03-02 10:11:12")) from_unixtime(unix_timestamp("1994-03-02 10:11:12"),"%Y-%m-%d %h:%i:%s") from_unixtime(unix_timestamp("1994-03-02 10:11:12"))+0
|
from_unixtime(unix_timestamp("1994-03-02 10:11:12")) from_unixtime(unix_timestamp("1994-03-02 10:11:12"),"%Y-%m-%d %h:%i:%s") from_unixtime(unix_timestamp("1994-03-02 10:11:12"))+0
|
||||||
1994-03-02 10:11:12 1994-03-02 10:11:12 19940302101112
|
1994-03-02 10:11:12 1994-03-02 10:11:12 19940302101112.000000
|
||||||
select sec_to_time(9001),sec_to_time(9001)+0,time_to_sec("15:12:22"),
|
select sec_to_time(9001),sec_to_time(9001)+0,time_to_sec("15:12:22"),
|
||||||
sec_to_time(time_to_sec("0:30:47")/6.21);
|
sec_to_time(time_to_sec("0:30:47")/6.21);
|
||||||
sec_to_time(9001) sec_to_time(9001)+0 time_to_sec("15:12:22") sec_to_time(time_to_sec("0:30:47")/6.21)
|
sec_to_time(9001) sec_to_time(9001)+0 time_to_sec("15:12:22") sec_to_time(time_to_sec("0:30:47")/6.21)
|
||||||
02:30:01 23001 54742 00:04:57
|
02:30:01 23001.000000 54742 00:04:57
|
||||||
select sec_to_time(time_to_sec('-838:59:59'));
|
select sec_to_time(time_to_sec('-838:59:59'));
|
||||||
sec_to_time(time_to_sec('-838:59:59'))
|
sec_to_time(time_to_sec('-838:59:59'))
|
||||||
-838:59:59
|
-838:59:59
|
||||||
select now()-curdate()*1000000-curtime();
|
select now()-curdate()*1000000-curtime();
|
||||||
now()-curdate()*1000000-curtime()
|
now()-curdate()*1000000-curtime()
|
||||||
0
|
0.000000
|
||||||
select strcmp(current_timestamp(),concat(current_date()," ",current_time()));
|
select strcmp(current_timestamp(),concat(current_date()," ",current_time()));
|
||||||
strcmp(current_timestamp(),concat(current_date()," ",current_time()))
|
strcmp(current_timestamp(),concat(current_date()," ",current_time()))
|
||||||
0
|
0
|
||||||
@ -751,6 +751,10 @@ select monthname(str_to_date(null, '%m')), monthname(str_to_date(null, '%m')),
|
|||||||
monthname(str_to_date(1, '%m')), monthname(str_to_date(0, '%m'));
|
monthname(str_to_date(1, '%m')), monthname(str_to_date(0, '%m'));
|
||||||
monthname(str_to_date(null, '%m')) monthname(str_to_date(null, '%m')) monthname(str_to_date(1, '%m')) monthname(str_to_date(0, '%m'))
|
monthname(str_to_date(null, '%m')) monthname(str_to_date(null, '%m')) monthname(str_to_date(1, '%m')) monthname(str_to_date(0, '%m'))
|
||||||
NULL NULL January NULL
|
NULL NULL January NULL
|
||||||
|
select now() - now() + 0, curtime() - curtime() + 0,
|
||||||
|
sec_to_time(1) + 0, from_unixtime(1) + 0;
|
||||||
|
now() - now() + 0 curtime() - curtime() + 0 sec_to_time(1) + 0 from_unixtime(1) + 0
|
||||||
|
0.000000 0.000000 1.000000 19700101030001.000000
|
||||||
End of 4.1 tests
|
End of 4.1 tests
|
||||||
explain extended select timestampdiff(SQL_TSI_WEEK, '2001-02-01', '2001-05-01') as a1,
|
explain extended select timestampdiff(SQL_TSI_WEEK, '2001-02-01', '2001-05-01') as a1,
|
||||||
timestampdiff(SQL_TSI_FRAC_SECOND, '2001-02-01 12:59:59.120000', '2001-05-01 12:58:58.119999') as a2;
|
timestampdiff(SQL_TSI_FRAC_SECOND, '2001-02-01 12:59:59.120000', '2001-05-01 12:58:58.119999') as a2;
|
||||||
|
@ -912,58 +912,62 @@ grant select (f1) on mysqltest.t1 to user1@localhost;
|
|||||||
grant select on mysqltest.t2 to user2@localhost;
|
grant select on mysqltest.t2 to user2@localhost;
|
||||||
grant select on mysqltest.* to user3@localhost;
|
grant select on mysqltest.* to user3@localhost;
|
||||||
grant select on *.* to user4@localhost;
|
grant select on *.* to user4@localhost;
|
||||||
select * from information_schema.column_privileges;
|
select * from information_schema.column_privileges order by grantee;
|
||||||
GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME PRIVILEGE_TYPE IS_GRANTABLE
|
GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME PRIVILEGE_TYPE IS_GRANTABLE
|
||||||
'user1'@'localhost' NULL mysqltest t1 f1 SELECT NO
|
'user1'@'localhost' NULL mysqltest t1 f1 SELECT NO
|
||||||
select * from information_schema.table_privileges;
|
select * from information_schema.table_privileges order by grantee;
|
||||||
GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PRIVILEGE_TYPE IS_GRANTABLE
|
GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PRIVILEGE_TYPE IS_GRANTABLE
|
||||||
select * from information_schema.schema_privileges;
|
select * from information_schema.schema_privileges order by grantee;
|
||||||
GRANTEE TABLE_CATALOG TABLE_SCHEMA PRIVILEGE_TYPE IS_GRANTABLE
|
GRANTEE TABLE_CATALOG TABLE_SCHEMA PRIVILEGE_TYPE IS_GRANTABLE
|
||||||
select * from information_schema.user_privileges;
|
select * from information_schema.user_privileges order by grantee;
|
||||||
GRANTEE TABLE_CATALOG PRIVILEGE_TYPE IS_GRANTABLE
|
GRANTEE TABLE_CATALOG PRIVILEGE_TYPE IS_GRANTABLE
|
||||||
'user1'@'localhost' NULL USAGE NO
|
'user1'@'localhost' NULL USAGE NO
|
||||||
show grants;
|
show grants;
|
||||||
Grants for user1@localhost
|
Grants for user1@localhost
|
||||||
GRANT USAGE ON *.* TO 'user1'@'localhost'
|
GRANT USAGE ON *.* TO 'user1'@'localhost'
|
||||||
GRANT SELECT (f1) ON `mysqltest`.`t1` TO 'user1'@'localhost'
|
GRANT SELECT (f1) ON `mysqltest`.`t1` TO 'user1'@'localhost'
|
||||||
select * from information_schema.column_privileges;
|
select * from information_schema.column_privileges order by grantee;
|
||||||
GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME PRIVILEGE_TYPE IS_GRANTABLE
|
GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME PRIVILEGE_TYPE IS_GRANTABLE
|
||||||
select * from information_schema.table_privileges;
|
select * from information_schema.table_privileges order by grantee;
|
||||||
GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PRIVILEGE_TYPE IS_GRANTABLE
|
GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PRIVILEGE_TYPE IS_GRANTABLE
|
||||||
'user2'@'localhost' NULL mysqltest t2 SELECT NO
|
'user2'@'localhost' NULL mysqltest t2 SELECT NO
|
||||||
select * from information_schema.schema_privileges;
|
select * from information_schema.schema_privileges order by grantee;
|
||||||
GRANTEE TABLE_CATALOG TABLE_SCHEMA PRIVILEGE_TYPE IS_GRANTABLE
|
GRANTEE TABLE_CATALOG TABLE_SCHEMA PRIVILEGE_TYPE IS_GRANTABLE
|
||||||
select * from information_schema.user_privileges;
|
select * from information_schema.user_privileges order by grantee;
|
||||||
GRANTEE TABLE_CATALOG PRIVILEGE_TYPE IS_GRANTABLE
|
GRANTEE TABLE_CATALOG PRIVILEGE_TYPE IS_GRANTABLE
|
||||||
'user2'@'localhost' NULL USAGE NO
|
'user2'@'localhost' NULL USAGE NO
|
||||||
show grants;
|
show grants;
|
||||||
Grants for user2@localhost
|
Grants for user2@localhost
|
||||||
GRANT USAGE ON *.* TO 'user2'@'localhost'
|
GRANT USAGE ON *.* TO 'user2'@'localhost'
|
||||||
GRANT SELECT ON `mysqltest`.`t2` TO 'user2'@'localhost'
|
GRANT SELECT ON `mysqltest`.`t2` TO 'user2'@'localhost'
|
||||||
select * from information_schema.column_privileges;
|
select * from information_schema.column_privileges order by grantee;
|
||||||
GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME PRIVILEGE_TYPE IS_GRANTABLE
|
GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME PRIVILEGE_TYPE IS_GRANTABLE
|
||||||
select * from information_schema.table_privileges;
|
select * from information_schema.table_privileges order by grantee;
|
||||||
GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PRIVILEGE_TYPE IS_GRANTABLE
|
GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PRIVILEGE_TYPE IS_GRANTABLE
|
||||||
select * from information_schema.schema_privileges;
|
select * from information_schema.schema_privileges order by grantee;
|
||||||
GRANTEE TABLE_CATALOG TABLE_SCHEMA PRIVILEGE_TYPE IS_GRANTABLE
|
GRANTEE TABLE_CATALOG TABLE_SCHEMA PRIVILEGE_TYPE IS_GRANTABLE
|
||||||
'user3'@'localhost' NULL mysqltest SELECT NO
|
'user3'@'localhost' NULL mysqltest SELECT NO
|
||||||
select * from information_schema.user_privileges;
|
select * from information_schema.user_privileges order by grantee;
|
||||||
GRANTEE TABLE_CATALOG PRIVILEGE_TYPE IS_GRANTABLE
|
GRANTEE TABLE_CATALOG PRIVILEGE_TYPE IS_GRANTABLE
|
||||||
'user3'@'localhost' NULL USAGE NO
|
'user3'@'localhost' NULL USAGE NO
|
||||||
show grants;
|
show grants;
|
||||||
Grants for user3@localhost
|
Grants for user3@localhost
|
||||||
GRANT USAGE ON *.* TO 'user3'@'localhost'
|
GRANT USAGE ON *.* TO 'user3'@'localhost'
|
||||||
GRANT SELECT ON `mysqltest`.* TO 'user3'@'localhost'
|
GRANT SELECT ON `mysqltest`.* TO 'user3'@'localhost'
|
||||||
select * from information_schema.column_privileges where grantee like '%user%';
|
select * from information_schema.column_privileges where grantee like '%user%'
|
||||||
|
order by grantee;
|
||||||
GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME PRIVILEGE_TYPE IS_GRANTABLE
|
GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME PRIVILEGE_TYPE IS_GRANTABLE
|
||||||
'user1'@'localhost' NULL mysqltest t1 f1 SELECT NO
|
'user1'@'localhost' NULL mysqltest t1 f1 SELECT NO
|
||||||
select * from information_schema.table_privileges where grantee like '%user%';
|
select * from information_schema.table_privileges where grantee like '%user%'
|
||||||
|
order by grantee;
|
||||||
GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PRIVILEGE_TYPE IS_GRANTABLE
|
GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PRIVILEGE_TYPE IS_GRANTABLE
|
||||||
'user2'@'localhost' NULL mysqltest t2 SELECT NO
|
'user2'@'localhost' NULL mysqltest t2 SELECT NO
|
||||||
select * from information_schema.schema_privileges where grantee like '%user%';
|
select * from information_schema.schema_privileges where grantee like '%user%'
|
||||||
|
order by grantee;
|
||||||
GRANTEE TABLE_CATALOG TABLE_SCHEMA PRIVILEGE_TYPE IS_GRANTABLE
|
GRANTEE TABLE_CATALOG TABLE_SCHEMA PRIVILEGE_TYPE IS_GRANTABLE
|
||||||
'user3'@'localhost' NULL mysqltest SELECT NO
|
'user3'@'localhost' NULL mysqltest SELECT NO
|
||||||
select * from information_schema.user_privileges where grantee like '%user%';
|
select * from information_schema.user_privileges where grantee like '%user%'
|
||||||
|
order by grantee;
|
||||||
GRANTEE TABLE_CATALOG PRIVILEGE_TYPE IS_GRANTABLE
|
GRANTEE TABLE_CATALOG PRIVILEGE_TYPE IS_GRANTABLE
|
||||||
'user1'@'localhost' NULL USAGE NO
|
'user1'@'localhost' NULL USAGE NO
|
||||||
'user2'@'localhost' NULL USAGE NO
|
'user2'@'localhost' NULL USAGE NO
|
||||||
@ -1154,14 +1158,6 @@ routine_name
|
|||||||
|
|
||||||
delete from proc where name='';
|
delete from proc where name='';
|
||||||
use test;
|
use test;
|
||||||
select * from information_schema.engines WHERE ENGINE="MyISAM";
|
|
||||||
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
|
|
||||||
MyISAM ENABLED Default engine as of MySQL 3.23 with great performance NO NO NO
|
|
||||||
grant select on *.* to user3148@localhost;
|
|
||||||
select user,db from information_schema.processlist;
|
|
||||||
user db
|
|
||||||
user3148 test
|
|
||||||
drop user user3148@localhost;
|
|
||||||
grant select on test.* to mysqltest_1@localhost;
|
grant select on test.* to mysqltest_1@localhost;
|
||||||
create table t1 (id int);
|
create table t1 (id int);
|
||||||
create view v1 as select * from t1;
|
create view v1 as select * from t1;
|
||||||
@ -1175,3 +1171,20 @@ NULL test v2 select 1 AS `1` NONE NO mysqltest_1@localhost DEFINER
|
|||||||
drop view v1, v2;
|
drop view v1, v2;
|
||||||
drop table t1;
|
drop table t1;
|
||||||
drop user mysqltest_1@localhost;
|
drop user mysqltest_1@localhost;
|
||||||
|
set @a:= '.';
|
||||||
|
create table t1(f1 char(5));
|
||||||
|
create table t2(f1 char(5));
|
||||||
|
select concat(@a, table_name), @a, table_name
|
||||||
|
from information_schema.tables where table_schema = 'test';
|
||||||
|
concat(@a, table_name) @a table_name
|
||||||
|
.t1 . t1
|
||||||
|
.t2 . t2
|
||||||
|
drop table t1,t2;
|
||||||
|
select * from information_schema.engines WHERE ENGINE="MyISAM";
|
||||||
|
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
|
||||||
|
MyISAM ENABLED Default engine as of MySQL 3.23 with great performance NO NO NO
|
||||||
|
grant select on *.* to user3148@localhost;
|
||||||
|
select user,db from information_schema.processlist;
|
||||||
|
user db
|
||||||
|
user3148 test
|
||||||
|
drop user user3148@localhost;
|
||||||
|
@ -50,14 +50,6 @@ Field Type Null Key Default Extra
|
|||||||
a int(11) YES NULL
|
a int(11) YES NULL
|
||||||
unlock tables;
|
unlock tables;
|
||||||
drop table t1;
|
drop table t1;
|
||||||
CREATE DATABASE mysqltest_1;
|
|
||||||
FLUSH TABLES WITH READ LOCK;
|
|
||||||
DROP DATABASE mysqltest_1;
|
|
||||||
DROP DATABASE mysqltest_1;
|
|
||||||
ERROR HY000: Can't execute the query because you have a conflicting read lock
|
|
||||||
UNLOCK TABLES;
|
|
||||||
DROP DATABASE mysqltest_1;
|
|
||||||
ERROR HY000: Can't drop database 'mysqltest_1'; database doesn't exist
|
|
||||||
use mysql;
|
use mysql;
|
||||||
LOCK TABLES columns_priv WRITE, db WRITE, host WRITE, user WRITE;
|
LOCK TABLES columns_priv WRITE, db WRITE, host WRITE, user WRITE;
|
||||||
FLUSH TABLES;
|
FLUSH TABLES;
|
||||||
@ -74,3 +66,11 @@ Select_priv
|
|||||||
N
|
N
|
||||||
use test;
|
use test;
|
||||||
use test;
|
use test;
|
||||||
|
CREATE DATABASE mysqltest_1;
|
||||||
|
FLUSH TABLES WITH READ LOCK;
|
||||||
|
DROP DATABASE mysqltest_1;
|
||||||
|
DROP DATABASE mysqltest_1;
|
||||||
|
ERROR HY000: Can't execute the query because you have a conflicting read lock
|
||||||
|
UNLOCK TABLES;
|
||||||
|
DROP DATABASE mysqltest_1;
|
||||||
|
ERROR HY000: Can't drop database 'mysqltest_1'; database doesn't exist
|
||||||
|
@ -165,3 +165,12 @@ create table t1Aa (col1 int);
|
|||||||
select t1Aa.col1 from t1aA,t2Aa where t1Aa.col1 = t2aA.col1;
|
select t1Aa.col1 from t1aA,t2Aa where t1Aa.col1 = t2aA.col1;
|
||||||
col1
|
col1
|
||||||
drop table t2aA, t1Aa;
|
drop table t2aA, t1Aa;
|
||||||
|
create database mysqltest_LC2;
|
||||||
|
use mysqltest_LC2;
|
||||||
|
create table myUC (i int);
|
||||||
|
select TABLE_SCHEMA,TABLE_NAME FROM information_schema.TABLES
|
||||||
|
where TABLE_SCHEMA ='mysqltest_LC2';
|
||||||
|
TABLE_SCHEMA TABLE_NAME
|
||||||
|
mysqltest_LC2 myUC
|
||||||
|
use test;
|
||||||
|
drop database mysqltest_LC2;
|
||||||
|
@ -320,8 +320,13 @@ LOAD DATA INFILE 'tmp.dat' INTO TABLE ndb_show_tables;
|
|||||||
set @t1_id = (select id from ndb_show_tables where name like '%t1%');
|
set @t1_id = (select id from ndb_show_tables where name like '%t1%');
|
||||||
truncate ndb_show_tables;
|
truncate ndb_show_tables;
|
||||||
alter table t1 change tiny new_tiny tinyint(4) DEFAULT '0' NOT NULL;
|
alter table t1 change tiny new_tiny tinyint(4) DEFAULT '0' NOT NULL;
|
||||||
|
LOAD DATA INFILE 'tmp.dat' INTO TABLE ndb_show_tables;
|
||||||
|
select 'no_copy' from ndb_show_tables where id = @t1_id and name like '%t1%';
|
||||||
|
no_copy
|
||||||
|
set @t1_id = (select id from ndb_show_tables where name like '%t1%');
|
||||||
|
truncate ndb_show_tables;
|
||||||
create index i1 on t1(medium);
|
create index i1 on t1(medium);
|
||||||
alter table t1 add index i2(long_int);
|
alter table t1 add index i2(new_tiny);
|
||||||
drop index i1 on t1;
|
drop index i1 on t1;
|
||||||
LOAD DATA INFILE 'tmp.dat' INTO TABLE ndb_show_tables;
|
LOAD DATA INFILE 'tmp.dat' INTO TABLE ndb_show_tables;
|
||||||
select 'no_copy' from ndb_show_tables where id = @t1_id and name like '%t1%';
|
select 'no_copy' from ndb_show_tables where id = @t1_id and name like '%t1%';
|
||||||
|
46
mysql-test/r/ndb_loaddatalocal.result
Normal file
46
mysql-test/r/ndb_loaddatalocal.result
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
DROP TABLE IF EXISTS t1;
|
||||||
|
create table t1(a int) engine=myisam;
|
||||||
|
select * into outfile 'MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' from t1;
|
||||||
|
drop table t1;
|
||||||
|
create table t1(a int) engine=ndb;
|
||||||
|
load data local infile 'MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' into table t1;
|
||||||
|
select count(*) from t1;
|
||||||
|
count(*)
|
||||||
|
10000
|
||||||
|
drop table t1;
|
||||||
|
create table t1(a int) engine=myisam;
|
||||||
|
insert into t1 values (1), (2), (2), (3);
|
||||||
|
select * into outfile 'MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' from t1;
|
||||||
|
drop table t1;
|
||||||
|
create table t1(a int primary key) engine=ndb;
|
||||||
|
load data local infile 'MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' into table t1;
|
||||||
|
select * from t1 order by a;
|
||||||
|
a
|
||||||
|
1
|
||||||
|
2
|
||||||
|
3
|
||||||
|
drop table t1;
|
||||||
|
create table t1(a int) engine=myisam;
|
||||||
|
insert into t1 values (1), (1), (2), (3);
|
||||||
|
select * into outfile 'MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' from t1;
|
||||||
|
drop table t1;
|
||||||
|
create table t1(a int primary key) engine=ndb;
|
||||||
|
load data local infile 'MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' into table t1;
|
||||||
|
select * from t1 order by a;
|
||||||
|
a
|
||||||
|
1
|
||||||
|
2
|
||||||
|
3
|
||||||
|
drop table t1;
|
||||||
|
create table t1(a int) engine=myisam;
|
||||||
|
insert into t1 values (1), (2), (3), (3);
|
||||||
|
select * into outfile 'MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' from t1;
|
||||||
|
drop table t1;
|
||||||
|
create table t1(a int primary key) engine=ndb;
|
||||||
|
load data local infile 'MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' into table t1;
|
||||||
|
select * from t1 order by a;
|
||||||
|
a
|
||||||
|
1
|
||||||
|
2
|
||||||
|
3
|
||||||
|
drop table t1;
|
469
mysql-test/r/ndb_restore_partition.result
Normal file
469
mysql-test/r/ndb_restore_partition.result
Normal file
@ -0,0 +1,469 @@
|
|||||||
|
use test;
|
||||||
|
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||||
|
drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
|
||||||
|
CREATE TABLE `t1_c` (
|
||||||
|
`capgoaledatta` smallint(5) unsigned NOT NULL auto_increment,
|
||||||
|
`goaledatta` char(2) NOT NULL default '',
|
||||||
|
`maturegarbagefa` varchar(32) NOT NULL default '',
|
||||||
|
PRIMARY KEY (`capgoaledatta`,`goaledatta`,`maturegarbagefa`)
|
||||||
|
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||||
|
INSERT INTO `t1_c` VALUES (2,'3','q3plus.qt'),(4,'4','q3plus.qt'),(1,'3','q3.net'),(3,'4','q3.net'),(3,'20','threetrees.qt');
|
||||||
|
CREATE TABLE `t2_c` (
|
||||||
|
`capgotod` smallint(5) unsigned NOT NULL auto_increment,
|
||||||
|
`gotod` smallint(5) unsigned NOT NULL default '0',
|
||||||
|
`goaledatta` char(2) default NULL,
|
||||||
|
`maturegarbagefa` varchar(32) default NULL,
|
||||||
|
`descrpooppo` varchar(64) default NULL,
|
||||||
|
`svcutonsa` varchar(64) NOT NULL default '',
|
||||||
|
PRIMARY KEY (`capgotod`),
|
||||||
|
KEY `i_quadaddsvr` (`gotod`)
|
||||||
|
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||||
|
INSERT INTO `t2_c` VALUES (5,4,'','q3.net','addavp:MK_CASELECTOR=1','postorod rattoaa'),(2,1,'4','','addavp:MK_BRANDTAD=345','REDS Brandtad'),(3,2,'4','q3.net','execorder','fixedRatediPO REDS'),(1,1,'3','','addavp:MK_BRANDTAD=123','TEST Brandtad'),(6,5,'','told.q3.net','addavp:MK_BRANDTAD=123','Brandtad Toldzone'),(4,3,'3','q3.net','addavp:MK_POOLHINT=2','ratedi PO TEST');
|
||||||
|
CREATE TABLE `t3_c` (
|
||||||
|
`CapGoaledatta` smallint(5) unsigned NOT NULL default '0',
|
||||||
|
`capgotod` smallint(5) unsigned NOT NULL default '0',
|
||||||
|
PRIMARY KEY (`capgotod`,`CapGoaledatta`)
|
||||||
|
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||||
|
INSERT INTO `t3_c` VALUES (5,3),(2,4),(5,4),(1,3);
|
||||||
|
CREATE TABLE `t4_c` (
|
||||||
|
`capfa` bigint(20) unsigned NOT NULL auto_increment,
|
||||||
|
`realm` varchar(32) NOT NULL default '',
|
||||||
|
`authpwchap` varchar(32) default NULL,
|
||||||
|
`fa` varchar(32) NOT NULL default '',
|
||||||
|
`payyingatta` tinyint(4) NOT NULL default '0',
|
||||||
|
`status` char(1) default NULL,
|
||||||
|
PRIMARY KEY (`fa`,`realm`),
|
||||||
|
KEY `capfa` (`capfa`),
|
||||||
|
KEY `i_quadentity` (`fa`,`realm`)
|
||||||
|
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||||
|
INSERT INTO `t4_c` VALUES (18,'john.smith','q3.net','dessjohn.smith',0,NULL),(21,'quad_katt_with_brandtad','q3.net','acne',0,NULL),(22,'quad_katt_carattoaa','q3.net','acne',0,NULL),(26,'436462612809','sqasdt.q3.net','N/A',0,'6'),(19,'john','smith.qt','dessjohn',0,NULL),(33,'436643196120','sqasdt.q3.net','N/A',1,'6'),(28,'436642900019','sqasdt.q3.net','N/A',0,'6'),(30,'436462900209','sqasdt.q3.net','N/A',0,'6'),(16,'436640006666','sqasdt.q3.net','',0,NULL),(19,'dette','el-redun.com','dessdette',0,NULL),(12,'quad_kattPP','q3.net','acne',2,NULL),(14,'436640008888','sqasdt.q3.net','',0,NULL),(29,'463624900028','sqasdt.q3.net','N/A',0,'6'),(15,'436640099099','sqasdt.q3.net','',0,NULL),(13,'pap','q3plus.qt','acne',1,NULL),(19,'436642612091','sqasdt.q3.net','N/A',0,'6'),(12,'quad_katt','q3.net','acne',0,NULL),(11,'quad_kattVK','q3.net','acne',1,NULL),(32,'463641969502','sqasdt.q3.net','N/A',1,'6'),(20,'joe','q3.net','joedesswd',0,NULL),(29,'436642900034','sqasdt.q3.net','N/A',0,'6'),(25,'contind','armerde.qt','acne',1,NULL);
|
||||||
|
CREATE TABLE `t5_c` (
|
||||||
|
`capfa` bigint(20) unsigned NOT NULL default '0',
|
||||||
|
`gotod` smallint(5) unsigned NOT NULL default '0',
|
||||||
|
`orderutonsa` varchar(64) NOT NULL default '',
|
||||||
|
PRIMARY KEY (`capfa`,`gotod`,`orderutonsa`)
|
||||||
|
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||||
|
INSERT INTO `t5_c` VALUES (21,2,''),(21,1,''),(22,4,'');
|
||||||
|
CREATE TABLE `t6_c` (
|
||||||
|
`capfa_parent` bigint(20) unsigned NOT NULL default '0',
|
||||||
|
`capfa_child` bigint(20) unsigned NOT NULL default '0',
|
||||||
|
`relatta` smallint(5) unsigned NOT NULL default '0',
|
||||||
|
PRIMARY KEY (`capfa_child`,`capfa_parent`,`relatta`)
|
||||||
|
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||||
|
INSERT INTO `t6_c` VALUES (15,16,0),(19,20,0),(18326932092909551615,30,0),(26,29,0),(18326932092909551615,29,0),(19,18,0),(26,28,0),(12,14,0);
|
||||||
|
CREATE TABLE `t7_c` (
|
||||||
|
`dardpo` char(15) NOT NULL default '',
|
||||||
|
`dardtestard` tinyint(3) unsigned NOT NULL default '0',
|
||||||
|
`FastFA` char(5) NOT NULL default '',
|
||||||
|
`FastCode` char(6) NOT NULL default '',
|
||||||
|
`Fastca` char(1) NOT NULL default '',
|
||||||
|
`Fastmag` char(1) NOT NULL default '',
|
||||||
|
`Beareratta` char(2) NOT NULL default '',
|
||||||
|
PRIMARY KEY (`dardpo`,`dardtestard`)
|
||||||
|
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||||
|
INSERT INTO `t7_c` VALUES ('2.6.2.4',24,'CECHP','54545','0','0','5'),('2.2.5.4',26,'CANFA','33223','1','1','4'),('4.3.2.4',28,'ITALD','54222','1','0','5'),('129..0.0.eins',28,'G','99999','1','1','5'),('1.1.1.1',24,'AUTPT','32323','0','1','3');
|
||||||
|
CREATE TABLE `t8_c` (
|
||||||
|
`kattjame` varchar(32) NOT NULL default '',
|
||||||
|
`realm` varchar(32) NOT NULL default '',
|
||||||
|
`realm_entered` varchar(32) NOT NULL default '',
|
||||||
|
`maturegarbagefa` varchar(32) NOT NULL default '',
|
||||||
|
`hunderaaarbagefa_parent` varchar(32) NOT NULL default '',
|
||||||
|
`kattjame_entered` varchar(32) NOT NULL default '',
|
||||||
|
`hunderaaarbagefa` varchar(32) NOT NULL default '',
|
||||||
|
`gest` varchar(16) default NULL,
|
||||||
|
`hassetino` varchar(16) NOT NULL default '',
|
||||||
|
`aaaproxysessfa` varchar(255) default NULL,
|
||||||
|
`autologonallowed` char(1) default NULL,
|
||||||
|
`squardporoot` varchar(15) NOT NULL default '',
|
||||||
|
`naspo` varchar(15) default NULL,
|
||||||
|
`beareratta` char(2) default NULL,
|
||||||
|
`fastCode` varchar(6) default NULL,
|
||||||
|
`fastFA` varchar(5) default NULL,
|
||||||
|
`fastca` char(1) default NULL,
|
||||||
|
`fastmag` char(1) default NULL,
|
||||||
|
`lastupdate` datetime default NULL,
|
||||||
|
`hassetistart` datetime NOT NULL default '0000-00-00 00:00:00',
|
||||||
|
`accthassetitime` int(10) unsigned default NULL,
|
||||||
|
`acctoutputoctets` bigint(20) unsigned default NULL,
|
||||||
|
`acctinputoctets` bigint(20) unsigned default NULL,
|
||||||
|
PRIMARY KEY (`kattjame`,`hunderaaarbagefa`,`hassetistart`,`hassetino`),
|
||||||
|
KEY `squardporoot` (`squardporoot`)
|
||||||
|
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||||
|
INSERT INTO `t8_c` VALUES ('4tt45345235','pap','q3plus.qt','q3plus.qt','q3.net','436643196120','436643196929','8956234534568968','5524595699','uxasmt21.net.acne.qt/481889229462692422','','1.1.1.1','2.2.4.6','4','86989','34','x','x','2012-03-12 12:55:34','2012-12-05 11:20:04',3223433,3369,9565),('4545435545','john','q3.net','q3.net','acne.li','436643196120','436643196929','45345234568968','995696699','uxasmt21.net.acne.qt/481889229462692423','','1.1.1.1','2.2.9.8','2','86989','34','x','x','2012-03-12 11:35:03','2012-12-05 08:50:04',8821923,169,3565),('versteckter_q3net_katt','joe','q3.net','elredun.com','q3.net','436643196120','436643196939','91341234568968','695595699','uxasmt21.net.acne.qt/481889229462692421','','1.1.1.1','2.5.2.5','3','86989','34','x','x','2012-03-12 18:35:04','2012-12-05 12:35:04',1923123,9569,6565);
|
||||||
|
CREATE TABLE `t9_c` (
|
||||||
|
`kattjame` varchar(32) NOT NULL default '',
|
||||||
|
`kattjame_entered` varchar(32) NOT NULL default '',
|
||||||
|
`realm` varchar(32) NOT NULL default '',
|
||||||
|
`realm_entered` varchar(32) NOT NULL default '',
|
||||||
|
`maturegarbagefa` varchar(32) NOT NULL default '',
|
||||||
|
`hunderaaarbagefa` varchar(32) NOT NULL default '',
|
||||||
|
`hunderaaarbagefa_parent` varchar(32) NOT NULL default '',
|
||||||
|
`gest` varchar(16) default NULL,
|
||||||
|
`hassetino` varchar(16) NOT NULL default '',
|
||||||
|
`squardporoot` varchar(15) NOT NULL default '',
|
||||||
|
`naspo` varchar(15) default NULL,
|
||||||
|
`beareratta` char(2) default NULL,
|
||||||
|
`fastCode` varchar(6) default NULL,
|
||||||
|
`fastFA` varchar(5) default NULL,
|
||||||
|
`fastca` char(1) default NULL,
|
||||||
|
`fastmag` char(1) default NULL,
|
||||||
|
`lastupdate` datetime default NULL,
|
||||||
|
`hassetistart` datetime NOT NULL default '0000-00-00 00:00:00',
|
||||||
|
`accthassetitime` int(10) unsigned default NULL,
|
||||||
|
`actcoutpuocttets` bigint(20) unsigned default NULL,
|
||||||
|
`actinputocctets` bigint(20) unsigned default NULL,
|
||||||
|
`terminateraste` tinyint(3) unsigned default NULL,
|
||||||
|
PRIMARY KEY (`kattjame`,`hunderaaarbagefa`,`hassetistart`,`hassetino`)
|
||||||
|
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||||
|
INSERT INTO `t9_c` VALUES ('3g4jh8gar2t','joe','q3.net','elredun.com','q3.net','436643316120','436643316939','91341234568968','695595699','1.1.1.1','2.2.6.2','3','86989','34','x','x','2012-03-12 18:35:04','2012-12-05 12:35:04',3123123,9569,6565,1),('4tt45345235','pap','q3plus.qt','q3plus.qt','q3.net','436643316120','436643316939','8956234534568968','5254595969','1.1.1.1','8.6.2.2','4','86989','34','x','x','2012-03-12 12:55:34','2012-12-05 11:20:04',3223433,3369,9565,2),('4545435545','john','q3.net','q3.net','acne.li','436643316120','436643316939','45345234568968','995696699','1.1.1.1','2.9.9.2','2','86998','34','x','x','2012-03-12 11:35:03','2012-12-05 08:50:04',8823123,169,3565,3);
|
||||||
|
create table t1 engine=myisam as select * from t1_c;
|
||||||
|
create table t2 engine=myisam as select * from t2_c;
|
||||||
|
create table t3 engine=myisam as select * from t3_c;
|
||||||
|
create table t4 engine=myisam as select * from t4_c;
|
||||||
|
create table t5 engine=myisam as select * from t5_c;
|
||||||
|
create table t6 engine=myisam as select * from t6_c;
|
||||||
|
create table t7 engine=myisam as select * from t7_c;
|
||||||
|
create table t8 engine=myisam as select * from t8_c;
|
||||||
|
create table t9 engine=myisam as select * from t9_c;
|
||||||
|
CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
|
||||||
|
DELETE FROM test.backup_info;
|
||||||
|
LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
|
||||||
|
SELECT @the_backup_id:=backup_id FROM test.backup_info;
|
||||||
|
@the_backup_id:=backup_id
|
||||||
|
<the_backup_id>
|
||||||
|
DROP TABLE test.backup_info;
|
||||||
|
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
|
||||||
|
select count(*) from t1;
|
||||||
|
count(*)
|
||||||
|
5
|
||||||
|
select count(*) from t1_c;
|
||||||
|
count(*)
|
||||||
|
5
|
||||||
|
select count(*)
|
||||||
|
from (select * from t1 union
|
||||||
|
select * from t1_c) a;
|
||||||
|
count(*)
|
||||||
|
5
|
||||||
|
select count(*) from t2;
|
||||||
|
count(*)
|
||||||
|
6
|
||||||
|
select count(*) from t2_c;
|
||||||
|
count(*)
|
||||||
|
6
|
||||||
|
select count(*)
|
||||||
|
from (select * from t2 union
|
||||||
|
select * from t2_c) a;
|
||||||
|
count(*)
|
||||||
|
6
|
||||||
|
select count(*) from t3;
|
||||||
|
count(*)
|
||||||
|
4
|
||||||
|
select count(*) from t3_c;
|
||||||
|
count(*)
|
||||||
|
4
|
||||||
|
select count(*)
|
||||||
|
from (select * from t3 union
|
||||||
|
select * from t3_c) a;
|
||||||
|
count(*)
|
||||||
|
4
|
||||||
|
select count(*) from t4;
|
||||||
|
count(*)
|
||||||
|
22
|
||||||
|
select count(*) from t4_c;
|
||||||
|
count(*)
|
||||||
|
22
|
||||||
|
select count(*)
|
||||||
|
from (select * from t4 union
|
||||||
|
select * from t4_c) a;
|
||||||
|
count(*)
|
||||||
|
22
|
||||||
|
select count(*) from t5;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*) from t5_c;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*)
|
||||||
|
from (select * from t5 union
|
||||||
|
select * from t5_c) a;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*) from t6;
|
||||||
|
count(*)
|
||||||
|
8
|
||||||
|
select count(*) from t6_c;
|
||||||
|
count(*)
|
||||||
|
8
|
||||||
|
select count(*)
|
||||||
|
from (select * from t6 union
|
||||||
|
select * from t6_c) a;
|
||||||
|
count(*)
|
||||||
|
8
|
||||||
|
select count(*) from t7;
|
||||||
|
count(*)
|
||||||
|
5
|
||||||
|
select count(*) from t7_c;
|
||||||
|
count(*)
|
||||||
|
5
|
||||||
|
select count(*)
|
||||||
|
from (select * from t7 union
|
||||||
|
select * from t7_c) a;
|
||||||
|
count(*)
|
||||||
|
5
|
||||||
|
select count(*) from t8;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*) from t8_c;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*)
|
||||||
|
from (select * from t8 union
|
||||||
|
select * from t8_c) a;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*) from t9;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*) from t9_c;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*)
|
||||||
|
from (select * from t9 union
|
||||||
|
select * from t9_c) a;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
ALTER TABLE t1_c
|
||||||
|
PARTITION BY RANGE (`capgoaledatta`)
|
||||||
|
(PARTITION p0 VALUES LESS THAN MAXVALUE);
|
||||||
|
ALTER TABLE t2_c
|
||||||
|
PARTITION BY LIST(`capgotod`)
|
||||||
|
(PARTITION p0 VALUES IN (0,1,2,3,4,5,6));
|
||||||
|
ALTER TABLE t3_c
|
||||||
|
PARTITION BY HASH (`CapGoaledatta`);
|
||||||
|
ALTER TABLE t5_c
|
||||||
|
PARTITION BY HASH (`capfa`)
|
||||||
|
PARTITIONS 4;
|
||||||
|
ALTER TABLE t6_c
|
||||||
|
PARTITION BY LINEAR HASH (`relatta`)
|
||||||
|
PARTITIONS 4;
|
||||||
|
ALTER TABLE t7_c
|
||||||
|
PARTITION BY LINEAR KEY (`dardtestard`);
|
||||||
|
CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
|
||||||
|
DELETE FROM test.backup_info;
|
||||||
|
LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
|
||||||
|
SELECT @the_backup_id:=backup_id FROM test.backup_info;
|
||||||
|
@the_backup_id:=backup_id
|
||||||
|
<the_backup_id>
|
||||||
|
DROP TABLE test.backup_info;
|
||||||
|
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
|
||||||
|
select count(*) from t1;
|
||||||
|
count(*)
|
||||||
|
5
|
||||||
|
select count(*) from t1_c;
|
||||||
|
count(*)
|
||||||
|
5
|
||||||
|
select count(*)
|
||||||
|
from (select * from t1 union
|
||||||
|
select * from t1_c) a;
|
||||||
|
count(*)
|
||||||
|
5
|
||||||
|
select count(*) from t2;
|
||||||
|
count(*)
|
||||||
|
6
|
||||||
|
select count(*) from t2_c;
|
||||||
|
count(*)
|
||||||
|
6
|
||||||
|
select count(*)
|
||||||
|
from (select * from t2 union
|
||||||
|
select * from t2_c) a;
|
||||||
|
count(*)
|
||||||
|
6
|
||||||
|
select count(*) from t3;
|
||||||
|
count(*)
|
||||||
|
4
|
||||||
|
select count(*) from t3_c;
|
||||||
|
count(*)
|
||||||
|
4
|
||||||
|
select count(*)
|
||||||
|
from (select * from t3 union
|
||||||
|
select * from t3_c) a;
|
||||||
|
count(*)
|
||||||
|
4
|
||||||
|
select count(*) from t4;
|
||||||
|
count(*)
|
||||||
|
22
|
||||||
|
select count(*) from t4_c;
|
||||||
|
count(*)
|
||||||
|
22
|
||||||
|
select count(*)
|
||||||
|
from (select * from t4 union
|
||||||
|
select * from t4_c) a;
|
||||||
|
count(*)
|
||||||
|
22
|
||||||
|
select count(*) from t5;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*) from t5_c;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*)
|
||||||
|
from (select * from t5 union
|
||||||
|
select * from t5_c) a;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*) from t6;
|
||||||
|
count(*)
|
||||||
|
8
|
||||||
|
select count(*) from t6_c;
|
||||||
|
count(*)
|
||||||
|
8
|
||||||
|
select count(*)
|
||||||
|
from (select * from t6 union
|
||||||
|
select * from t6_c) a;
|
||||||
|
count(*)
|
||||||
|
8
|
||||||
|
select count(*) from t7;
|
||||||
|
count(*)
|
||||||
|
5
|
||||||
|
select count(*) from t7_c;
|
||||||
|
count(*)
|
||||||
|
5
|
||||||
|
select count(*)
|
||||||
|
from (select * from t7 union
|
||||||
|
select * from t7_c) a;
|
||||||
|
count(*)
|
||||||
|
5
|
||||||
|
select count(*) from t8;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*) from t8_c;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*)
|
||||||
|
from (select * from t8 union
|
||||||
|
select * from t8_c) a;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*) from t9;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*) from t9_c;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*)
|
||||||
|
from (select * from t9 union
|
||||||
|
select * from t9_c) a;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
|
||||||
|
select count(*) from t1;
|
||||||
|
count(*)
|
||||||
|
5
|
||||||
|
select count(*) from t1_c;
|
||||||
|
count(*)
|
||||||
|
5
|
||||||
|
select count(*)
|
||||||
|
from (select * from t1 union
|
||||||
|
select * from t1_c) a;
|
||||||
|
count(*)
|
||||||
|
5
|
||||||
|
select count(*) from t2;
|
||||||
|
count(*)
|
||||||
|
6
|
||||||
|
select count(*) from t2_c;
|
||||||
|
count(*)
|
||||||
|
6
|
||||||
|
select count(*)
|
||||||
|
from (select * from t2 union
|
||||||
|
select * from t2_c) a;
|
||||||
|
count(*)
|
||||||
|
6
|
||||||
|
select count(*) from t3;
|
||||||
|
count(*)
|
||||||
|
4
|
||||||
|
select count(*) from t3_c;
|
||||||
|
count(*)
|
||||||
|
4
|
||||||
|
select count(*)
|
||||||
|
from (select * from t3 union
|
||||||
|
select * from t3_c) a;
|
||||||
|
count(*)
|
||||||
|
4
|
||||||
|
select count(*) from t4;
|
||||||
|
count(*)
|
||||||
|
22
|
||||||
|
select count(*) from t4_c;
|
||||||
|
count(*)
|
||||||
|
22
|
||||||
|
select count(*)
|
||||||
|
from (select * from t4 union
|
||||||
|
select * from t4_c) a;
|
||||||
|
count(*)
|
||||||
|
22
|
||||||
|
select count(*) from t5;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*) from t5_c;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*)
|
||||||
|
from (select * from t5 union
|
||||||
|
select * from t5_c) a;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*) from t6;
|
||||||
|
count(*)
|
||||||
|
8
|
||||||
|
select count(*) from t6_c;
|
||||||
|
count(*)
|
||||||
|
8
|
||||||
|
select count(*)
|
||||||
|
from (select * from t6 union
|
||||||
|
select * from t6_c) a;
|
||||||
|
count(*)
|
||||||
|
8
|
||||||
|
select count(*) from t7;
|
||||||
|
count(*)
|
||||||
|
5
|
||||||
|
select count(*) from t7_c;
|
||||||
|
count(*)
|
||||||
|
5
|
||||||
|
select count(*)
|
||||||
|
from (select * from t7 union
|
||||||
|
select * from t7_c) a;
|
||||||
|
count(*)
|
||||||
|
5
|
||||||
|
select count(*) from t8;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*) from t8_c;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*)
|
||||||
|
from (select * from t8 union
|
||||||
|
select * from t8_c) a;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*) from t9;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*) from t9_c;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
select count(*)
|
||||||
|
from (select * from t9 union
|
||||||
|
select * from t9_c) a;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
drop table t1_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
|
||||||
|
CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
|
||||||
|
DELETE FROM test.backup_info;
|
||||||
|
LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
|
||||||
|
SELECT @the_backup_id:=backup_id FROM test.backup_info;
|
||||||
|
@the_backup_id:=backup_id
|
||||||
|
<the_backup_id>
|
||||||
|
DROP TABLE test.backup_info;
|
||||||
|
Create table test/def/t2_c failed: Translate frm error
|
||||||
|
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||||
|
drop table if exists t2_c;
|
||||||
|
520093696,<the_backup_id>
|
@ -1,4 +1,38 @@
|
|||||||
drop table if exists t1;
|
drop table if exists t1;
|
||||||
|
create table t1 (a bigint)
|
||||||
|
partition by range (a)
|
||||||
|
(partition p0 values less than (0xFFFFFFFFFFFFFFFF),
|
||||||
|
partition p1 values less than (10));
|
||||||
|
ERROR 42000: VALUES value must be of same type as partition function near '),
|
||||||
|
partition p1 values less than (10))' at line 3
|
||||||
|
create table t1 (a bigint)
|
||||||
|
partition by list (a)
|
||||||
|
(partition p0 values in (0xFFFFFFFFFFFFFFFF),
|
||||||
|
partition p1 values in (10));
|
||||||
|
ERROR 42000: VALUES value must be of same type as partition function near '),
|
||||||
|
partition p1 values in (10))' at line 3
|
||||||
|
create table t1 (a bigint unsigned)
|
||||||
|
partition by range (a)
|
||||||
|
(partition p0 values less than (100),
|
||||||
|
partition p1 values less than MAXVALUE);
|
||||||
|
insert into t1 values (1);
|
||||||
|
drop table t1;
|
||||||
|
create table t1 (a bigint unsigned)
|
||||||
|
partition by hash (a);
|
||||||
|
insert into t1 values (0xFFFFFFFFFFFFFFFD);
|
||||||
|
insert into t1 values (0xFFFFFFFFFFFFFFFE);
|
||||||
|
select * from t1 where (a + 1) < 10;
|
||||||
|
a
|
||||||
|
select * from t1 where (a + 1) > 10;
|
||||||
|
a
|
||||||
|
18446744073709551613
|
||||||
|
18446744073709551614
|
||||||
|
drop table t1;
|
||||||
|
create table t1 (a int)
|
||||||
|
engine = csv
|
||||||
|
partition by list (a)
|
||||||
|
(partition p0 values in (null));
|
||||||
|
ERROR HY000: CSV handler cannot be used in partitioned tables
|
||||||
create table t1 (a int)
|
create table t1 (a int)
|
||||||
partition by key(a)
|
partition by key(a)
|
||||||
(partition p0 engine = MEMORY);
|
(partition p0 engine = MEMORY);
|
||||||
@ -12,13 +46,13 @@ show create table t1;
|
|||||||
Table Create Table
|
Table Create Table
|
||||||
t1 CREATE TABLE `t1` (
|
t1 CREATE TABLE `t1` (
|
||||||
`a` int(11) DEFAULT NULL
|
`a` int(11) DEFAULT NULL
|
||||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (a) SUBPARTITION BY KEY (a) (PARTITION p0 VALUES LESS THAN (1) ENGINE = MyISAM, PARTITION p1 VALUES LESS THAN (2) ENGINE = MyISAM) */
|
) ENGINE=MyISAM DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (a) SUBPARTITION BY KEY (a) (PARTITION p0 VALUES LESS THAN (1) ENGINE = MyISAM, PARTITION p1 VALUES LESS THAN (2) ENGINE = MyISAM)*/
|
||||||
alter table t1 reorganize partition p1 into (partition p1 values less than (3));
|
alter table t1 reorganize partition p1 into (partition p1 values less than (3));
|
||||||
show create table t1;
|
show create table t1;
|
||||||
Table Create Table
|
Table Create Table
|
||||||
t1 CREATE TABLE `t1` (
|
t1 CREATE TABLE `t1` (
|
||||||
`a` int(11) DEFAULT NULL
|
`a` int(11) DEFAULT NULL
|
||||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (a) SUBPARTITION BY KEY (a) (PARTITION p0 VALUES LESS THAN (1) ENGINE = MyISAM, PARTITION p1 VALUES LESS THAN (3) ENGINE = MyISAM) */
|
) ENGINE=MyISAM DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (a) SUBPARTITION BY KEY (a) (PARTITION p0 VALUES LESS THAN (1) ENGINE = MyISAM, PARTITION p1 VALUES LESS THAN (3) ENGINE = MyISAM)*/
|
||||||
drop table t1;
|
drop table t1;
|
||||||
CREATE TABLE t1 (
|
CREATE TABLE t1 (
|
||||||
a int not null,
|
a int not null,
|
||||||
@ -774,6 +808,18 @@ insert into t1 values (null);
|
|||||||
select * from t1 where f1 is null;
|
select * from t1 where f1 is null;
|
||||||
f1
|
f1
|
||||||
NULL
|
NULL
|
||||||
|
select * from t1 where f1 < 1;
|
||||||
|
f1
|
||||||
|
select * from t1 where f1 <= NULL;
|
||||||
|
f1
|
||||||
|
select * from t1 where f1 < NULL;
|
||||||
|
f1
|
||||||
|
select * from t1 where f1 >= NULL;
|
||||||
|
f1
|
||||||
|
select * from t1 where f1 > NULL;
|
||||||
|
f1
|
||||||
|
select * from t1 where f1 > 1;
|
||||||
|
f1
|
||||||
drop table t1;
|
drop table t1;
|
||||||
create table t1 (f1 smallint)
|
create table t1 (f1 smallint)
|
||||||
partition by range (f1) (partition p0 values less than (0));
|
partition by range (f1) (partition p0 values less than (0));
|
||||||
@ -860,6 +906,16 @@ SHOW TABLE STATUS;
|
|||||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||||
t1 MyISAM 10 Dynamic 0 0 0 0 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL partitioned
|
t1 MyISAM 10 Dynamic 0 0 0 0 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL partitioned
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
create table t1 (a bigint unsigned)
|
||||||
|
partition by list (a)
|
||||||
|
(partition p0 values in (0-1));
|
||||||
|
ERROR HY000: Partition constant is out of partition function domain
|
||||||
|
create table t1 (a bigint unsigned)
|
||||||
|
partition by range (a)
|
||||||
|
(partition p0 values less than (10));
|
||||||
|
insert into t1 values (0xFFFFFFFFFFFFFFFF);
|
||||||
|
ERROR HY000: Table has no partition for value 18446744073709551615
|
||||||
|
drop table t1;
|
||||||
create table t1 (a int)
|
create table t1 (a int)
|
||||||
partition by list (a)
|
partition by list (a)
|
||||||
(partition `s1 s2` values in (0));
|
(partition `s1 s2` values in (0));
|
||||||
|
@ -554,6 +554,10 @@ PARTITION BY RANGE (a) (PARTITION p1 VALUES LESS THAN(5));
|
|||||||
insert into t1 values (10);
|
insert into t1 values (10);
|
||||||
ERROR HY000: Table has no partition for value 10
|
ERROR HY000: Table has no partition for value 10
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
create table t1 (a bigint unsigned)
|
||||||
|
partition by range (a)
|
||||||
|
(partition p0 values less than (-1));
|
||||||
|
ERROR HY000: Partition constant is out of partition function domain
|
||||||
create table t1 (v varchar(12))
|
create table t1 (v varchar(12))
|
||||||
partition by range (ascii(v))
|
partition by range (ascii(v))
|
||||||
(partition p0 values less than (10));
|
(partition p0 values less than (10));
|
||||||
|
@ -7,12 +7,6 @@ t1 CREATE TABLE `t1` (
|
|||||||
`f_date` date DEFAULT NULL,
|
`f_date` date DEFAULT NULL,
|
||||||
`f_varchar` varchar(30) DEFAULT NULL
|
`f_varchar` varchar(30) DEFAULT NULL
|
||||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (CAST(YEAR(f_date) AS SIGNED INTEGER)) PARTITIONS 2
|
) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (CAST(YEAR(f_date) AS SIGNED INTEGER)) PARTITIONS 2
|
||||||
/home/pappa/bug19305/mysql-test/var/master-data/test/t1#P#p0.MYD
|
|
||||||
/home/pappa/bug19305/mysql-test/var/master-data/test/t1#P#p0.MYI
|
|
||||||
/home/pappa/bug19305/mysql-test/var/master-data/test/t1#P#p1.MYD
|
|
||||||
/home/pappa/bug19305/mysql-test/var/master-data/test/t1#P#p1.MYI
|
|
||||||
/home/pappa/bug19305/mysql-test/var/master-data/test/t1.frm
|
|
||||||
/home/pappa/bug19305/mysql-test/var/master-data/test/t1.par
|
|
||||||
ALTER TABLE t1 COALESCE PARTITION 1;
|
ALTER TABLE t1 COALESCE PARTITION 1;
|
||||||
SHOW CREATE TABLE t1;
|
SHOW CREATE TABLE t1;
|
||||||
Table Create Table
|
Table Create Table
|
||||||
@ -20,7 +14,3 @@ t1 CREATE TABLE `t1` (
|
|||||||
`f_date` date DEFAULT NULL,
|
`f_date` date DEFAULT NULL,
|
||||||
`f_varchar` varchar(30) DEFAULT NULL
|
`f_varchar` varchar(30) DEFAULT NULL
|
||||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (CAST(YEAR(f_date) AS SIGNED INTEGER)) PARTITIONS 1
|
) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (CAST(YEAR(f_date) AS SIGNED INTEGER)) PARTITIONS 1
|
||||||
/home/pappa/bug19305/mysql-test/var/master-data/test/t1#P#p0.MYD
|
|
||||||
/home/pappa/bug19305/mysql-test/var/master-data/test/t1#P#p0.MYI
|
|
||||||
/home/pappa/bug19305/mysql-test/var/master-data/test/t1.frm
|
|
||||||
/home/pappa/bug19305/mysql-test/var/master-data/test/t1.par
|
|
||||||
|
@ -363,6 +363,33 @@ SELECT COUNT(*) FROM t1 WHERE c3 < '2000-12-31';
|
|||||||
COUNT(*)
|
COUNT(*)
|
||||||
10
|
10
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
create table t1 (a bigint unsigned)
|
||||||
|
partition by range (a)
|
||||||
|
(partition p0 values less than (10),
|
||||||
|
partition p1 values less than (0));
|
||||||
|
ERROR HY000: VALUES LESS THAN value must be strictly increasing for each partition
|
||||||
|
create table t1 (a bigint unsigned)
|
||||||
|
partition by range (a)
|
||||||
|
(partition p0 values less than (0),
|
||||||
|
partition p1 values less than (10));
|
||||||
|
show create table t1;
|
||||||
|
Table Create Table
|
||||||
|
t1 CREATE TABLE `t1` (
|
||||||
|
`a` bigint(20) unsigned DEFAULT NULL
|
||||||
|
) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (0) ENGINE = MyISAM, PARTITION p1 VALUES LESS THAN (10) ENGINE = MyISAM)
|
||||||
|
drop table t1;
|
||||||
|
create table t1 (a bigint unsigned)
|
||||||
|
partition by range (a)
|
||||||
|
(partition p0 values less than (2),
|
||||||
|
partition p1 values less than (10));
|
||||||
|
show create table t1;
|
||||||
|
Table Create Table
|
||||||
|
t1 CREATE TABLE `t1` (
|
||||||
|
`a` bigint(20) unsigned DEFAULT NULL
|
||||||
|
) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (2) ENGINE = MyISAM, PARTITION p1 VALUES LESS THAN (10) ENGINE = MyISAM)
|
||||||
|
insert into t1 values (0xFFFFFFFFFFFFFFFF);
|
||||||
|
ERROR HY000: Table has no partition for value 18446744073709551615
|
||||||
|
drop table t1;
|
||||||
create table t1 (a int)
|
create table t1 (a int)
|
||||||
partition by range (MOD(a,3))
|
partition by range (MOD(a,3))
|
||||||
subpartition by hash(a)
|
subpartition by hash(a)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
set timestamp=1;
|
set timestamp=1;
|
||||||
SELECT sleep(1),NOW()-SYSDATE() as zero;
|
SELECT sleep(1),NOW()-SYSDATE() as zero;
|
||||||
sleep(1) zero
|
sleep(1) zero
|
||||||
0 0
|
0 0.000000
|
||||||
|
20
mysql-test/t/create_not_windows.test
Normal file
20
mysql-test/t/create_not_windows.test
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
# Non-windows specific create tests.
|
||||||
|
|
||||||
|
--source include/not_windows.inc
|
||||||
|
|
||||||
|
#
|
||||||
|
# Bug#19479:mysqldump creates invalid dump
|
||||||
|
#
|
||||||
|
--disable_warnings
|
||||||
|
drop table if exists `about:text`;
|
||||||
|
--enable_warnings
|
||||||
|
create table `about:text` (
|
||||||
|
_id int not null auto_increment,
|
||||||
|
`about:text` varchar(255) not null default '',
|
||||||
|
primary key (_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
show create table `about:text`;
|
||||||
|
drop table `about:text`;
|
||||||
|
|
||||||
|
# End of 5.0 tests
|
@ -367,6 +367,13 @@ select last_day('2005-01-00');
|
|||||||
select monthname(str_to_date(null, '%m')), monthname(str_to_date(null, '%m')),
|
select monthname(str_to_date(null, '%m')), monthname(str_to_date(null, '%m')),
|
||||||
monthname(str_to_date(1, '%m')), monthname(str_to_date(0, '%m'));
|
monthname(str_to_date(1, '%m')), monthname(str_to_date(0, '%m'));
|
||||||
|
|
||||||
|
#
|
||||||
|
# Bug #16546
|
||||||
|
#
|
||||||
|
|
||||||
|
select now() - now() + 0, curtime() - curtime() + 0,
|
||||||
|
sec_to_time(1) + 0, from_unixtime(1) + 0;
|
||||||
|
|
||||||
--echo End of 4.1 tests
|
--echo End of 4.1 tests
|
||||||
|
|
||||||
explain extended select timestampdiff(SQL_TSI_WEEK, '2001-02-01', '2001-05-01') as a1,
|
explain extended select timestampdiff(SQL_TSI_WEEK, '2001-02-01', '2001-05-01') as a1,
|
||||||
|
@ -580,28 +580,32 @@ connect (con2,localhost,user2,,mysqltest);
|
|||||||
connect (con3,localhost,user3,,mysqltest);
|
connect (con3,localhost,user3,,mysqltest);
|
||||||
connect (con4,localhost,user4,,);
|
connect (con4,localhost,user4,,);
|
||||||
connection con1;
|
connection con1;
|
||||||
select * from information_schema.column_privileges;
|
select * from information_schema.column_privileges order by grantee;
|
||||||
select * from information_schema.table_privileges;
|
select * from information_schema.table_privileges order by grantee;
|
||||||
select * from information_schema.schema_privileges;
|
select * from information_schema.schema_privileges order by grantee;
|
||||||
select * from information_schema.user_privileges;
|
select * from information_schema.user_privileges order by grantee;
|
||||||
show grants;
|
show grants;
|
||||||
connection con2;
|
connection con2;
|
||||||
select * from information_schema.column_privileges;
|
select * from information_schema.column_privileges order by grantee;
|
||||||
select * from information_schema.table_privileges;
|
select * from information_schema.table_privileges order by grantee;
|
||||||
select * from information_schema.schema_privileges;
|
select * from information_schema.schema_privileges order by grantee;
|
||||||
select * from information_schema.user_privileges;
|
select * from information_schema.user_privileges order by grantee;
|
||||||
show grants;
|
show grants;
|
||||||
connection con3;
|
connection con3;
|
||||||
select * from information_schema.column_privileges;
|
select * from information_schema.column_privileges order by grantee;
|
||||||
select * from information_schema.table_privileges;
|
select * from information_schema.table_privileges order by grantee;
|
||||||
select * from information_schema.schema_privileges;
|
select * from information_schema.schema_privileges order by grantee;
|
||||||
select * from information_schema.user_privileges;
|
select * from information_schema.user_privileges order by grantee;
|
||||||
show grants;
|
show grants;
|
||||||
connection con4;
|
connection con4;
|
||||||
select * from information_schema.column_privileges where grantee like '%user%';
|
select * from information_schema.column_privileges where grantee like '%user%'
|
||||||
select * from information_schema.table_privileges where grantee like '%user%';
|
order by grantee;
|
||||||
select * from information_schema.schema_privileges where grantee like '%user%';
|
select * from information_schema.table_privileges where grantee like '%user%'
|
||||||
select * from information_schema.user_privileges where grantee like '%user%';
|
order by grantee;
|
||||||
|
select * from information_schema.schema_privileges where grantee like '%user%'
|
||||||
|
order by grantee;
|
||||||
|
select * from information_schema.user_privileges where grantee like '%user%'
|
||||||
|
order by grantee;
|
||||||
show grants;
|
show grants;
|
||||||
connection default;
|
connection default;
|
||||||
drop user user1@localhost, user2@localhost, user3@localhost, user4@localhost;
|
drop user user1@localhost, user2@localhost, user3@localhost, user4@localhost;
|
||||||
@ -824,25 +828,6 @@ select routine_name from information_schema.routines;
|
|||||||
delete from proc where name='';
|
delete from proc where name='';
|
||||||
use test;
|
use test;
|
||||||
|
|
||||||
#
|
|
||||||
# End of 5.0 tests.
|
|
||||||
#
|
|
||||||
# Show engines
|
|
||||||
#
|
|
||||||
|
|
||||||
select * from information_schema.engines WHERE ENGINE="MyISAM";
|
|
||||||
|
|
||||||
#
|
|
||||||
# INFORMATION_SCHEMA.PROCESSLIST
|
|
||||||
#
|
|
||||||
|
|
||||||
grant select on *.* to user3148@localhost;
|
|
||||||
connect (con3148,localhost,user3148,,test);
|
|
||||||
connection con3148;
|
|
||||||
select user,db from information_schema.processlist;
|
|
||||||
connection default;
|
|
||||||
drop user user3148@localhost;
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Bug#16681 information_schema shows forbidden VIEW details
|
# Bug#16681 information_schema shows forbidden VIEW details
|
||||||
#
|
#
|
||||||
@ -862,3 +847,31 @@ drop view v1, v2;
|
|||||||
drop table t1;
|
drop table t1;
|
||||||
drop user mysqltest_1@localhost;
|
drop user mysqltest_1@localhost;
|
||||||
|
|
||||||
|
#
|
||||||
|
# Bug#19599 duplication of information_schema column value in a CONCAT expr with user var
|
||||||
|
#
|
||||||
|
set @a:= '.';
|
||||||
|
create table t1(f1 char(5));
|
||||||
|
create table t2(f1 char(5));
|
||||||
|
select concat(@a, table_name), @a, table_name
|
||||||
|
from information_schema.tables where table_schema = 'test';
|
||||||
|
drop table t1,t2;
|
||||||
|
|
||||||
|
# End of 5.0 tests.
|
||||||
|
#
|
||||||
|
# Show engines
|
||||||
|
#
|
||||||
|
|
||||||
|
select * from information_schema.engines WHERE ENGINE="MyISAM";
|
||||||
|
|
||||||
|
#
|
||||||
|
# INFORMATION_SCHEMA.PROCESSLIST
|
||||||
|
#
|
||||||
|
|
||||||
|
grant select on *.* to user3148@localhost;
|
||||||
|
connect (con3148,localhost,user3148,,test);
|
||||||
|
connection con3148;
|
||||||
|
select user,db from information_schema.processlist;
|
||||||
|
connection default;
|
||||||
|
drop user user3148@localhost;
|
||||||
|
|
||||||
|
@ -128,6 +128,36 @@ unlock tables;
|
|||||||
drop table t1;
|
drop table t1;
|
||||||
|
|
||||||
#
|
#
|
||||||
|
# Bug#16986 - Deadlock condition with MyISAM tables
|
||||||
|
#
|
||||||
|
connection locker;
|
||||||
|
use mysql;
|
||||||
|
LOCK TABLES columns_priv WRITE, db WRITE, host WRITE, user WRITE;
|
||||||
|
FLUSH TABLES;
|
||||||
|
--sleep 1
|
||||||
|
#
|
||||||
|
connection reader;
|
||||||
|
use mysql;
|
||||||
|
#NOTE: This must be a multi-table select, otherwise the deadlock will not occur
|
||||||
|
send SELECT user.Select_priv FROM user, db WHERE user.user = db.user LIMIT 1;
|
||||||
|
--sleep 1
|
||||||
|
#
|
||||||
|
connection locker;
|
||||||
|
# Make test case independent from earlier grants.
|
||||||
|
--replace_result "Table is already up to date" "OK"
|
||||||
|
OPTIMIZE TABLES columns_priv, db, host, user;
|
||||||
|
UNLOCK TABLES;
|
||||||
|
#
|
||||||
|
connection reader;
|
||||||
|
reap;
|
||||||
|
use test;
|
||||||
|
#
|
||||||
|
connection locker;
|
||||||
|
use test;
|
||||||
|
#
|
||||||
|
connection default;
|
||||||
|
|
||||||
|
# End of 5.0 tests
|
||||||
# Bug#19815 - CREATE/RENAME/DROP DATABASE can deadlock on a global read lock
|
# Bug#19815 - CREATE/RENAME/DROP DATABASE can deadlock on a global read lock
|
||||||
#
|
#
|
||||||
connect (con1,localhost,root,,);
|
connect (con1,localhost,root,,);
|
||||||
@ -161,33 +191,3 @@ disconnect con2;
|
|||||||
--error ER_DB_DROP_EXISTS
|
--error ER_DB_DROP_EXISTS
|
||||||
DROP DATABASE mysqltest_1;
|
DROP DATABASE mysqltest_1;
|
||||||
|
|
||||||
# Bug#16986 - Deadlock condition with MyISAM tables
|
|
||||||
#
|
|
||||||
connection locker;
|
|
||||||
use mysql;
|
|
||||||
LOCK TABLES columns_priv WRITE, db WRITE, host WRITE, user WRITE;
|
|
||||||
FLUSH TABLES;
|
|
||||||
--sleep 1
|
|
||||||
#
|
|
||||||
connection reader;
|
|
||||||
use mysql;
|
|
||||||
#NOTE: This must be a multi-table select, otherwise the deadlock will not occur
|
|
||||||
send SELECT user.Select_priv FROM user, db WHERE user.user = db.user LIMIT 1;
|
|
||||||
--sleep 1
|
|
||||||
#
|
|
||||||
connection locker;
|
|
||||||
# Make test case independent from earlier grants.
|
|
||||||
--replace_result "Table is already up to date" "OK"
|
|
||||||
OPTIMIZE TABLES columns_priv, db, host, user;
|
|
||||||
UNLOCK TABLES;
|
|
||||||
#
|
|
||||||
connection reader;
|
|
||||||
reap;
|
|
||||||
use test;
|
|
||||||
#
|
|
||||||
connection locker;
|
|
||||||
use test;
|
|
||||||
#
|
|
||||||
connection default;
|
|
||||||
|
|
||||||
# End of 5.0 tests
|
|
||||||
|
@ -139,3 +139,14 @@ select t1Aa.col1 from t1aA,t2Aa where t1Aa.col1 = t2aA.col1;
|
|||||||
drop table t2aA, t1Aa;
|
drop table t2aA, t1Aa;
|
||||||
|
|
||||||
# End of 4.1 tests
|
# End of 4.1 tests
|
||||||
|
|
||||||
|
#
|
||||||
|
# Bug#17661 information_schema.SCHEMATA returns uppercase with lower_case_table_names = 1
|
||||||
|
#
|
||||||
|
create database mysqltest_LC2;
|
||||||
|
use mysqltest_LC2;
|
||||||
|
create table myUC (i int);
|
||||||
|
select TABLE_SCHEMA,TABLE_NAME FROM information_schema.TABLES
|
||||||
|
where TABLE_SCHEMA ='mysqltest_LC2';
|
||||||
|
use test;
|
||||||
|
drop database mysqltest_LC2;
|
||||||
|
@ -367,12 +367,23 @@ CREATE TEMPORARY TABLE ndb_show_tables (id INT, type VARCHAR(20), state VARCHAR(
|
|||||||
LOAD DATA INFILE 'tmp.dat' INTO TABLE ndb_show_tables;
|
LOAD DATA INFILE 'tmp.dat' INTO TABLE ndb_show_tables;
|
||||||
--enable_warnings
|
--enable_warnings
|
||||||
|
|
||||||
|
# Ndb doesn't support renaming attributes on-line
|
||||||
set @t1_id = (select id from ndb_show_tables where name like '%t1%');
|
set @t1_id = (select id from ndb_show_tables where name like '%t1%');
|
||||||
truncate ndb_show_tables;
|
truncate ndb_show_tables;
|
||||||
|
|
||||||
alter table t1 change tiny new_tiny tinyint(4) DEFAULT '0' NOT NULL;
|
alter table t1 change tiny new_tiny tinyint(4) DEFAULT '0' NOT NULL;
|
||||||
|
--disable_warnings
|
||||||
|
--exec $NDB_TOOLS_DIR/ndb_show_tables --p > $MYSQLTEST_VARDIR/master-data/test/tmp.dat
|
||||||
|
LOAD DATA INFILE 'tmp.dat' INTO TABLE ndb_show_tables;
|
||||||
|
--enable_warnings
|
||||||
|
|
||||||
|
select 'no_copy' from ndb_show_tables where id = @t1_id and name like '%t1%';
|
||||||
|
|
||||||
|
set @t1_id = (select id from ndb_show_tables where name like '%t1%');
|
||||||
|
truncate ndb_show_tables;
|
||||||
|
|
||||||
create index i1 on t1(medium);
|
create index i1 on t1(medium);
|
||||||
alter table t1 add index i2(long_int);
|
alter table t1 add index i2(new_tiny);
|
||||||
drop index i1 on t1;
|
drop index i1 on t1;
|
||||||
|
|
||||||
--disable_warnings
|
--disable_warnings
|
||||||
|
70
mysql-test/t/ndb_loaddatalocal.test
Normal file
70
mysql-test/t/ndb_loaddatalocal.test
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
-- source include/have_ndb.inc
|
||||||
|
-- source include/not_embedded.inc
|
||||||
|
|
||||||
|
--disable_warnings
|
||||||
|
DROP TABLE IF EXISTS t1;
|
||||||
|
--enable_warnings
|
||||||
|
|
||||||
|
create table t1(a int) engine=myisam;
|
||||||
|
let $1=10000;
|
||||||
|
disable_query_log;
|
||||||
|
set SQL_LOG_BIN=0;
|
||||||
|
while ($1)
|
||||||
|
{
|
||||||
|
insert into t1 values(1);
|
||||||
|
dec $1;
|
||||||
|
}
|
||||||
|
set SQL_LOG_BIN=1;
|
||||||
|
enable_query_log;
|
||||||
|
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
|
||||||
|
eval select * into outfile '$MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' from t1;
|
||||||
|
#This will generate a 20KB file, now test LOAD DATA LOCAL
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
|
create table t1(a int) engine=ndb;
|
||||||
|
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
|
||||||
|
eval load data local infile '$MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' into table t1;
|
||||||
|
select count(*) from t1;
|
||||||
|
system rm $MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile ;
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
|
create table t1(a int) engine=myisam;
|
||||||
|
insert into t1 values (1), (2), (2), (3);
|
||||||
|
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
|
||||||
|
eval select * into outfile '$MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' from t1;
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
|
create table t1(a int primary key) engine=ndb;
|
||||||
|
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
|
||||||
|
eval load data local infile '$MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' into table t1;
|
||||||
|
system rm $MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile;
|
||||||
|
select * from t1 order by a;
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
|
create table t1(a int) engine=myisam;
|
||||||
|
insert into t1 values (1), (1), (2), (3);
|
||||||
|
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
|
||||||
|
eval select * into outfile '$MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' from t1;
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
|
create table t1(a int primary key) engine=ndb;
|
||||||
|
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
|
||||||
|
eval load data local infile '$MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' into table t1;
|
||||||
|
system rm $MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile;
|
||||||
|
select * from t1 order by a;
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
|
create table t1(a int) engine=myisam;
|
||||||
|
insert into t1 values (1), (2), (3), (3);
|
||||||
|
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
|
||||||
|
eval select * into outfile '$MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' from t1;
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
|
create table t1(a int primary key) engine=ndb;
|
||||||
|
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
|
||||||
|
eval load data local infile '$MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' into table t1;
|
||||||
|
system rm $MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile;
|
||||||
|
select * from t1 order by a;
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
|
# End of 4.1 tests
|
1
mysql-test/t/ndb_restore_partition-master.opt
Normal file
1
mysql-test/t/ndb_restore_partition-master.opt
Normal file
@ -0,0 +1 @@
|
|||||||
|
--new
|
375
mysql-test/t/ndb_restore_partition.test
Normal file
375
mysql-test/t/ndb_restore_partition.test
Normal file
@ -0,0 +1,375 @@
|
|||||||
|
-- source include/have_ndb.inc
|
||||||
|
-- source include/ndb_default_cluster.inc
|
||||||
|
-- source include/not_embedded.inc
|
||||||
|
|
||||||
|
--disable_warnings
|
||||||
|
use test;
|
||||||
|
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||||
|
drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
|
||||||
|
--enable_warnings
|
||||||
|
|
||||||
|
CREATE TABLE `t1_c` (
|
||||||
|
`capgoaledatta` smallint(5) unsigned NOT NULL auto_increment,
|
||||||
|
`goaledatta` char(2) NOT NULL default '',
|
||||||
|
`maturegarbagefa` varchar(32) NOT NULL default '',
|
||||||
|
PRIMARY KEY (`capgoaledatta`,`goaledatta`,`maturegarbagefa`)
|
||||||
|
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||||
|
INSERT INTO `t1_c` VALUES (2,'3','q3plus.qt'),(4,'4','q3plus.qt'),(1,'3','q3.net'),(3,'4','q3.net'),(3,'20','threetrees.qt');
|
||||||
|
|
||||||
|
CREATE TABLE `t2_c` (
|
||||||
|
`capgotod` smallint(5) unsigned NOT NULL auto_increment,
|
||||||
|
`gotod` smallint(5) unsigned NOT NULL default '0',
|
||||||
|
`goaledatta` char(2) default NULL,
|
||||||
|
`maturegarbagefa` varchar(32) default NULL,
|
||||||
|
`descrpooppo` varchar(64) default NULL,
|
||||||
|
`svcutonsa` varchar(64) NOT NULL default '',
|
||||||
|
PRIMARY KEY (`capgotod`),
|
||||||
|
KEY `i_quadaddsvr` (`gotod`)
|
||||||
|
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||||
|
INSERT INTO `t2_c` VALUES (5,4,'','q3.net','addavp:MK_CASELECTOR=1','postorod rattoaa'),(2,1,'4','','addavp:MK_BRANDTAD=345','REDS Brandtad'),(3,2,'4','q3.net','execorder','fixedRatediPO REDS'),(1,1,'3','','addavp:MK_BRANDTAD=123','TEST Brandtad'),(6,5,'','told.q3.net','addavp:MK_BRANDTAD=123','Brandtad Toldzone'),(4,3,'3','q3.net','addavp:MK_POOLHINT=2','ratedi PO TEST');
|
||||||
|
|
||||||
|
CREATE TABLE `t3_c` (
|
||||||
|
`CapGoaledatta` smallint(5) unsigned NOT NULL default '0',
|
||||||
|
`capgotod` smallint(5) unsigned NOT NULL default '0',
|
||||||
|
PRIMARY KEY (`capgotod`,`CapGoaledatta`)
|
||||||
|
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||||
|
INSERT INTO `t3_c` VALUES (5,3),(2,4),(5,4),(1,3);
|
||||||
|
|
||||||
|
CREATE TABLE `t4_c` (
|
||||||
|
`capfa` bigint(20) unsigned NOT NULL auto_increment,
|
||||||
|
`realm` varchar(32) NOT NULL default '',
|
||||||
|
`authpwchap` varchar(32) default NULL,
|
||||||
|
`fa` varchar(32) NOT NULL default '',
|
||||||
|
`payyingatta` tinyint(4) NOT NULL default '0',
|
||||||
|
`status` char(1) default NULL,
|
||||||
|
PRIMARY KEY (`fa`,`realm`),
|
||||||
|
KEY `capfa` (`capfa`),
|
||||||
|
KEY `i_quadentity` (`fa`,`realm`)
|
||||||
|
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||||
|
INSERT INTO `t4_c` VALUES (18,'john.smith','q3.net','dessjohn.smith',0,NULL),(21,'quad_katt_with_brandtad','q3.net','acne',0,NULL),(22,'quad_katt_carattoaa','q3.net','acne',0,NULL),(26,'436462612809','sqasdt.q3.net','N/A',0,'6'),(19,'john','smith.qt','dessjohn',0,NULL),(33,'436643196120','sqasdt.q3.net','N/A',1,'6'),(28,'436642900019','sqasdt.q3.net','N/A',0,'6'),(30,'436462900209','sqasdt.q3.net','N/A',0,'6'),(16,'436640006666','sqasdt.q3.net','',0,NULL),(19,'dette','el-redun.com','dessdette',0,NULL),(12,'quad_kattPP','q3.net','acne',2,NULL),(14,'436640008888','sqasdt.q3.net','',0,NULL),(29,'463624900028','sqasdt.q3.net','N/A',0,'6'),(15,'436640099099','sqasdt.q3.net','',0,NULL),(13,'pap','q3plus.qt','acne',1,NULL),(19,'436642612091','sqasdt.q3.net','N/A',0,'6'),(12,'quad_katt','q3.net','acne',0,NULL),(11,'quad_kattVK','q3.net','acne',1,NULL),(32,'463641969502','sqasdt.q3.net','N/A',1,'6'),(20,'joe','q3.net','joedesswd',0,NULL),(29,'436642900034','sqasdt.q3.net','N/A',0,'6'),(25,'contind','armerde.qt','acne',1,NULL);
|
||||||
|
|
||||||
|
CREATE TABLE `t5_c` (
|
||||||
|
`capfa` bigint(20) unsigned NOT NULL default '0',
|
||||||
|
`gotod` smallint(5) unsigned NOT NULL default '0',
|
||||||
|
`orderutonsa` varchar(64) NOT NULL default '',
|
||||||
|
PRIMARY KEY (`capfa`,`gotod`,`orderutonsa`)
|
||||||
|
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||||
|
INSERT INTO `t5_c` VALUES (21,2,''),(21,1,''),(22,4,'');
|
||||||
|
|
||||||
|
CREATE TABLE `t6_c` (
|
||||||
|
`capfa_parent` bigint(20) unsigned NOT NULL default '0',
|
||||||
|
`capfa_child` bigint(20) unsigned NOT NULL default '0',
|
||||||
|
`relatta` smallint(5) unsigned NOT NULL default '0',
|
||||||
|
PRIMARY KEY (`capfa_child`,`capfa_parent`,`relatta`)
|
||||||
|
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||||
|
INSERT INTO `t6_c` VALUES (15,16,0),(19,20,0),(18326932092909551615,30,0),(26,29,0),(18326932092909551615,29,0),(19,18,0),(26,28,0),(12,14,0);
|
||||||
|
|
||||||
|
CREATE TABLE `t7_c` (
|
||||||
|
`dardpo` char(15) NOT NULL default '',
|
||||||
|
`dardtestard` tinyint(3) unsigned NOT NULL default '0',
|
||||||
|
`FastFA` char(5) NOT NULL default '',
|
||||||
|
`FastCode` char(6) NOT NULL default '',
|
||||||
|
`Fastca` char(1) NOT NULL default '',
|
||||||
|
`Fastmag` char(1) NOT NULL default '',
|
||||||
|
`Beareratta` char(2) NOT NULL default '',
|
||||||
|
PRIMARY KEY (`dardpo`,`dardtestard`)
|
||||||
|
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||||
|
INSERT INTO `t7_c` VALUES ('2.6.2.4',24,'CECHP','54545','0','0','5'),('2.2.5.4',26,'CANFA','33223','1','1','4'),('4.3.2.4',28,'ITALD','54222','1','0','5'),('129..0.0.eins',28,'G','99999','1','1','5'),('1.1.1.1',24,'AUTPT','32323','0','1','3');
|
||||||
|
|
||||||
|
CREATE TABLE `t8_c` (
|
||||||
|
`kattjame` varchar(32) NOT NULL default '',
|
||||||
|
`realm` varchar(32) NOT NULL default '',
|
||||||
|
`realm_entered` varchar(32) NOT NULL default '',
|
||||||
|
`maturegarbagefa` varchar(32) NOT NULL default '',
|
||||||
|
`hunderaaarbagefa_parent` varchar(32) NOT NULL default '',
|
||||||
|
`kattjame_entered` varchar(32) NOT NULL default '',
|
||||||
|
`hunderaaarbagefa` varchar(32) NOT NULL default '',
|
||||||
|
`gest` varchar(16) default NULL,
|
||||||
|
`hassetino` varchar(16) NOT NULL default '',
|
||||||
|
`aaaproxysessfa` varchar(255) default NULL,
|
||||||
|
`autologonallowed` char(1) default NULL,
|
||||||
|
`squardporoot` varchar(15) NOT NULL default '',
|
||||||
|
`naspo` varchar(15) default NULL,
|
||||||
|
`beareratta` char(2) default NULL,
|
||||||
|
`fastCode` varchar(6) default NULL,
|
||||||
|
`fastFA` varchar(5) default NULL,
|
||||||
|
`fastca` char(1) default NULL,
|
||||||
|
`fastmag` char(1) default NULL,
|
||||||
|
`lastupdate` datetime default NULL,
|
||||||
|
`hassetistart` datetime NOT NULL default '0000-00-00 00:00:00',
|
||||||
|
`accthassetitime` int(10) unsigned default NULL,
|
||||||
|
`acctoutputoctets` bigint(20) unsigned default NULL,
|
||||||
|
`acctinputoctets` bigint(20) unsigned default NULL,
|
||||||
|
PRIMARY KEY (`kattjame`,`hunderaaarbagefa`,`hassetistart`,`hassetino`),
|
||||||
|
KEY `squardporoot` (`squardporoot`)
|
||||||
|
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||||
|
INSERT INTO `t8_c` VALUES ('4tt45345235','pap','q3plus.qt','q3plus.qt','q3.net','436643196120','436643196929','8956234534568968','5524595699','uxasmt21.net.acne.qt/481889229462692422','','1.1.1.1','2.2.4.6','4','86989','34','x','x','2012-03-12 12:55:34','2012-12-05 11:20:04',3223433,3369,9565),('4545435545','john','q3.net','q3.net','acne.li','436643196120','436643196929','45345234568968','995696699','uxasmt21.net.acne.qt/481889229462692423','','1.1.1.1','2.2.9.8','2','86989','34','x','x','2012-03-12 11:35:03','2012-12-05 08:50:04',8821923,169,3565),('versteckter_q3net_katt','joe','q3.net','elredun.com','q3.net','436643196120','436643196939','91341234568968','695595699','uxasmt21.net.acne.qt/481889229462692421','','1.1.1.1','2.5.2.5','3','86989','34','x','x','2012-03-12 18:35:04','2012-12-05 12:35:04',1923123,9569,6565);
|
||||||
|
|
||||||
|
CREATE TABLE `t9_c` (
|
||||||
|
`kattjame` varchar(32) NOT NULL default '',
|
||||||
|
`kattjame_entered` varchar(32) NOT NULL default '',
|
||||||
|
`realm` varchar(32) NOT NULL default '',
|
||||||
|
`realm_entered` varchar(32) NOT NULL default '',
|
||||||
|
`maturegarbagefa` varchar(32) NOT NULL default '',
|
||||||
|
`hunderaaarbagefa` varchar(32) NOT NULL default '',
|
||||||
|
`hunderaaarbagefa_parent` varchar(32) NOT NULL default '',
|
||||||
|
`gest` varchar(16) default NULL,
|
||||||
|
`hassetino` varchar(16) NOT NULL default '',
|
||||||
|
`squardporoot` varchar(15) NOT NULL default '',
|
||||||
|
`naspo` varchar(15) default NULL,
|
||||||
|
`beareratta` char(2) default NULL,
|
||||||
|
`fastCode` varchar(6) default NULL,
|
||||||
|
`fastFA` varchar(5) default NULL,
|
||||||
|
`fastca` char(1) default NULL,
|
||||||
|
`fastmag` char(1) default NULL,
|
||||||
|
`lastupdate` datetime default NULL,
|
||||||
|
`hassetistart` datetime NOT NULL default '0000-00-00 00:00:00',
|
||||||
|
`accthassetitime` int(10) unsigned default NULL,
|
||||||
|
`actcoutpuocttets` bigint(20) unsigned default NULL,
|
||||||
|
`actinputocctets` bigint(20) unsigned default NULL,
|
||||||
|
`terminateraste` tinyint(3) unsigned default NULL,
|
||||||
|
PRIMARY KEY (`kattjame`,`hunderaaarbagefa`,`hassetistart`,`hassetino`)
|
||||||
|
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||||
|
INSERT INTO `t9_c` VALUES ('3g4jh8gar2t','joe','q3.net','elredun.com','q3.net','436643316120','436643316939','91341234568968','695595699','1.1.1.1','2.2.6.2','3','86989','34','x','x','2012-03-12 18:35:04','2012-12-05 12:35:04',3123123,9569,6565,1),('4tt45345235','pap','q3plus.qt','q3plus.qt','q3.net','436643316120','436643316939','8956234534568968','5254595969','1.1.1.1','8.6.2.2','4','86989','34','x','x','2012-03-12 12:55:34','2012-12-05 11:20:04',3223433,3369,9565,2),('4545435545','john','q3.net','q3.net','acne.li','436643316120','436643316939','45345234568968','995696699','1.1.1.1','2.9.9.2','2','86998','34','x','x','2012-03-12 11:35:03','2012-12-05 08:50:04',8823123,169,3565,3);
|
||||||
|
|
||||||
|
create table t1 engine=myisam as select * from t1_c;
|
||||||
|
create table t2 engine=myisam as select * from t2_c;
|
||||||
|
create table t3 engine=myisam as select * from t3_c;
|
||||||
|
create table t4 engine=myisam as select * from t4_c;
|
||||||
|
create table t5 engine=myisam as select * from t5_c;
|
||||||
|
create table t6 engine=myisam as select * from t6_c;
|
||||||
|
create table t7 engine=myisam as select * from t7_c;
|
||||||
|
create table t8 engine=myisam as select * from t8_c;
|
||||||
|
create table t9 engine=myisam as select * from t9_c;
|
||||||
|
|
||||||
|
|
||||||
|
--source include/ndb_backup.inc
|
||||||
|
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
|
||||||
|
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
|
||||||
|
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
|
||||||
|
|
||||||
|
# random output order??
|
||||||
|
#show tables;
|
||||||
|
|
||||||
|
select count(*) from t1;
|
||||||
|
select count(*) from t1_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t1 union
|
||||||
|
select * from t1_c) a;
|
||||||
|
|
||||||
|
select count(*) from t2;
|
||||||
|
select count(*) from t2_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t2 union
|
||||||
|
select * from t2_c) a;
|
||||||
|
|
||||||
|
select count(*) from t3;
|
||||||
|
select count(*) from t3_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t3 union
|
||||||
|
select * from t3_c) a;
|
||||||
|
|
||||||
|
select count(*) from t4;
|
||||||
|
select count(*) from t4_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t4 union
|
||||||
|
select * from t4_c) a;
|
||||||
|
|
||||||
|
select count(*) from t5;
|
||||||
|
select count(*) from t5_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t5 union
|
||||||
|
select * from t5_c) a;
|
||||||
|
|
||||||
|
select count(*) from t6;
|
||||||
|
select count(*) from t6_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t6 union
|
||||||
|
select * from t6_c) a;
|
||||||
|
|
||||||
|
select count(*) from t7;
|
||||||
|
select count(*) from t7_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t7 union
|
||||||
|
select * from t7_c) a;
|
||||||
|
|
||||||
|
select count(*) from t8;
|
||||||
|
select count(*) from t8_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t8 union
|
||||||
|
select * from t8_c) a;
|
||||||
|
|
||||||
|
select count(*) from t9;
|
||||||
|
select count(*) from t9_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t9 union
|
||||||
|
select * from t9_c) a;
|
||||||
|
|
||||||
|
#
|
||||||
|
# Try Partitioned tables as well
|
||||||
|
#
|
||||||
|
ALTER TABLE t1_c
|
||||||
|
PARTITION BY RANGE (`capgoaledatta`)
|
||||||
|
(PARTITION p0 VALUES LESS THAN MAXVALUE);
|
||||||
|
|
||||||
|
ALTER TABLE t2_c
|
||||||
|
PARTITION BY LIST(`capgotod`)
|
||||||
|
(PARTITION p0 VALUES IN (0,1,2,3,4,5,6));
|
||||||
|
|
||||||
|
ALTER TABLE t3_c
|
||||||
|
PARTITION BY HASH (`CapGoaledatta`);
|
||||||
|
|
||||||
|
ALTER TABLE t5_c
|
||||||
|
PARTITION BY HASH (`capfa`)
|
||||||
|
PARTITIONS 4;
|
||||||
|
|
||||||
|
ALTER TABLE t6_c
|
||||||
|
PARTITION BY LINEAR HASH (`relatta`)
|
||||||
|
PARTITIONS 4;
|
||||||
|
|
||||||
|
ALTER TABLE t7_c
|
||||||
|
PARTITION BY LINEAR KEY (`dardtestard`);
|
||||||
|
|
||||||
|
--source include/ndb_backup.inc
|
||||||
|
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
|
||||||
|
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
|
||||||
|
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
|
||||||
|
|
||||||
|
select count(*) from t1;
|
||||||
|
select count(*) from t1_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t1 union
|
||||||
|
select * from t1_c) a;
|
||||||
|
|
||||||
|
select count(*) from t2;
|
||||||
|
select count(*) from t2_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t2 union
|
||||||
|
select * from t2_c) a;
|
||||||
|
|
||||||
|
select count(*) from t3;
|
||||||
|
select count(*) from t3_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t3 union
|
||||||
|
select * from t3_c) a;
|
||||||
|
|
||||||
|
select count(*) from t4;
|
||||||
|
select count(*) from t4_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t4 union
|
||||||
|
select * from t4_c) a;
|
||||||
|
|
||||||
|
select count(*) from t5;
|
||||||
|
select count(*) from t5_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t5 union
|
||||||
|
select * from t5_c) a;
|
||||||
|
|
||||||
|
select count(*) from t6;
|
||||||
|
select count(*) from t6_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t6 union
|
||||||
|
select * from t6_c) a;
|
||||||
|
|
||||||
|
select count(*) from t7;
|
||||||
|
select count(*) from t7_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t7 union
|
||||||
|
select * from t7_c) a;
|
||||||
|
|
||||||
|
select count(*) from t8;
|
||||||
|
select count(*) from t8_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t8 union
|
||||||
|
select * from t8_c) a;
|
||||||
|
|
||||||
|
select count(*) from t9;
|
||||||
|
select count(*) from t9_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t9 union
|
||||||
|
select * from t9_c) a;
|
||||||
|
|
||||||
|
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
|
||||||
|
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 1 -m -r --ndb-nodegroup_map '(0,0)' --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
|
||||||
|
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
|
||||||
|
|
||||||
|
select count(*) from t1;
|
||||||
|
select count(*) from t1_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t1 union
|
||||||
|
select * from t1_c) a;
|
||||||
|
|
||||||
|
select count(*) from t2;
|
||||||
|
select count(*) from t2_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t2 union
|
||||||
|
select * from t2_c) a;
|
||||||
|
|
||||||
|
select count(*) from t3;
|
||||||
|
select count(*) from t3_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t3 union
|
||||||
|
select * from t3_c) a;
|
||||||
|
|
||||||
|
select count(*) from t4;
|
||||||
|
select count(*) from t4_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t4 union
|
||||||
|
select * from t4_c) a;
|
||||||
|
|
||||||
|
select count(*) from t5;
|
||||||
|
select count(*) from t5_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t5 union
|
||||||
|
select * from t5_c) a;
|
||||||
|
|
||||||
|
select count(*) from t6;
|
||||||
|
select count(*) from t6_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t6 union
|
||||||
|
select * from t6_c) a;
|
||||||
|
|
||||||
|
select count(*) from t7;
|
||||||
|
select count(*) from t7_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t7 union
|
||||||
|
select * from t7_c) a;
|
||||||
|
|
||||||
|
select count(*) from t8;
|
||||||
|
select count(*) from t8_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t8 union
|
||||||
|
select * from t8_c) a;
|
||||||
|
|
||||||
|
select count(*) from t9;
|
||||||
|
select count(*) from t9_c;
|
||||||
|
select count(*)
|
||||||
|
from (select * from t9 union
|
||||||
|
select * from t9_c) a;
|
||||||
|
|
||||||
|
#
|
||||||
|
# Drop all table except t2_c
|
||||||
|
# This to make sure that error returned from ndb_restore above is
|
||||||
|
# guaranteed to be from t2_c, this since order of tables in backup
|
||||||
|
# is none deterministic
|
||||||
|
#
|
||||||
|
drop table t1_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
|
||||||
|
--source include/ndb_backup.inc
|
||||||
|
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --core=0 -b $the_backup_id -n 1 -m -r --ndb-nodegroup_map '(0,1)' $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id 2>&1 | grep Translate || true
|
||||||
|
|
||||||
|
#
|
||||||
|
# Cleanup
|
||||||
|
#
|
||||||
|
|
||||||
|
--disable_warnings
|
||||||
|
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||||
|
drop table if exists t2_c;
|
||||||
|
--enable_warnings
|
||||||
|
|
||||||
|
#
|
||||||
|
# Test BUG#10287
|
||||||
|
#
|
||||||
|
|
||||||
|
--exec $NDB_TOOLS_DIR/ndb_select_all --no-defaults -d sys -D , SYSTAB_0 | grep 520093696, | sed "s/,$the_backup_id/,<the_backup_id>/"
|
||||||
|
|
||||||
|
# End of 4.1 tests
|
@ -9,6 +9,47 @@
|
|||||||
drop table if exists t1;
|
drop table if exists t1;
|
||||||
--enable_warnings
|
--enable_warnings
|
||||||
|
|
||||||
|
#
|
||||||
|
# BUG 16002: Handle unsigned integer functions properly
|
||||||
|
#
|
||||||
|
--error 1064
|
||||||
|
create table t1 (a bigint)
|
||||||
|
partition by range (a)
|
||||||
|
(partition p0 values less than (0xFFFFFFFFFFFFFFFF),
|
||||||
|
partition p1 values less than (10));
|
||||||
|
--error 1064
|
||||||
|
create table t1 (a bigint)
|
||||||
|
partition by list (a)
|
||||||
|
(partition p0 values in (0xFFFFFFFFFFFFFFFF),
|
||||||
|
partition p1 values in (10));
|
||||||
|
|
||||||
|
create table t1 (a bigint unsigned)
|
||||||
|
partition by range (a)
|
||||||
|
(partition p0 values less than (100),
|
||||||
|
partition p1 values less than MAXVALUE);
|
||||||
|
insert into t1 values (1);
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
|
create table t1 (a bigint unsigned)
|
||||||
|
partition by hash (a);
|
||||||
|
insert into t1 values (0xFFFFFFFFFFFFFFFD);
|
||||||
|
insert into t1 values (0xFFFFFFFFFFFFFFFE);
|
||||||
|
select * from t1 where (a + 1) < 10;
|
||||||
|
select * from t1 where (a + 1) > 10;
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
|
#
|
||||||
|
# Bug 19307: CSV engine crashes
|
||||||
|
#
|
||||||
|
--error ER_PARTITION_MERGE_ERROR
|
||||||
|
create table t1 (a int)
|
||||||
|
engine = csv
|
||||||
|
partition by list (a)
|
||||||
|
(partition p0 values in (null));
|
||||||
|
|
||||||
|
#
|
||||||
|
# Added test case
|
||||||
|
#
|
||||||
create table t1 (a int)
|
create table t1 (a int)
|
||||||
partition by key(a)
|
partition by key(a)
|
||||||
(partition p0 engine = MEMORY);
|
(partition p0 engine = MEMORY);
|
||||||
@ -908,6 +949,12 @@ create table t1 (f1 smallint)
|
|||||||
partition by list (f1) (partition p0 values in (null));
|
partition by list (f1) (partition p0 values in (null));
|
||||||
insert into t1 values (null);
|
insert into t1 values (null);
|
||||||
select * from t1 where f1 is null;
|
select * from t1 where f1 is null;
|
||||||
|
select * from t1 where f1 < 1;
|
||||||
|
select * from t1 where f1 <= NULL;
|
||||||
|
select * from t1 where f1 < NULL;
|
||||||
|
select * from t1 where f1 >= NULL;
|
||||||
|
select * from t1 where f1 > NULL;
|
||||||
|
select * from t1 where f1 > 1;
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
|
||||||
create table t1 (f1 smallint)
|
create table t1 (f1 smallint)
|
||||||
@ -974,6 +1021,23 @@ PARTITION p2 VALUES LESS THAN (30) ENGINE = MyISAM);
|
|||||||
SHOW TABLE STATUS;
|
SHOW TABLE STATUS;
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
#
|
||||||
|
#BUG 16002 Erroneus handling of unsigned partition functions
|
||||||
|
#
|
||||||
|
--error ER_PARTITION_CONST_DOMAIN_ERROR
|
||||||
|
create table t1 (a bigint unsigned)
|
||||||
|
partition by list (a)
|
||||||
|
(partition p0 values in (0-1));
|
||||||
|
|
||||||
|
create table t1 (a bigint unsigned)
|
||||||
|
partition by range (a)
|
||||||
|
(partition p0 values less than (10));
|
||||||
|
|
||||||
|
--error ER_NO_PARTITION_FOR_GIVEN_VALUE
|
||||||
|
insert into t1 values (0xFFFFFFFFFFFFFFFF);
|
||||||
|
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
#
|
#
|
||||||
#BUG 18750 Problems with partition names
|
#BUG 18750 Problems with partition names
|
||||||
#
|
#
|
||||||
|
@ -748,6 +748,10 @@ CREATE TABLE t1(a int)
|
|||||||
insert into t1 values (10);
|
insert into t1 values (10);
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
|
||||||
|
--error ER_PARTITION_CONST_DOMAIN_ERROR
|
||||||
|
create table t1 (a bigint unsigned)
|
||||||
|
partition by range (a)
|
||||||
|
(partition p0 values less than (-1));
|
||||||
#
|
#
|
||||||
# Bug 18198 Partitions: Verify that erroneus partition functions doesn't work
|
# Bug 18198 Partitions: Verify that erroneus partition functions doesn't work
|
||||||
#
|
#
|
||||||
|
@ -5,10 +5,10 @@ CREATE TABLE t1 (f_date DATE, f_varchar VARCHAR(30))
|
|||||||
PARTITION BY HASH(CAST(YEAR(f_date) AS SIGNED INTEGER)) PARTITIONS 2;
|
PARTITION BY HASH(CAST(YEAR(f_date) AS SIGNED INTEGER)) PARTITIONS 2;
|
||||||
SHOW CREATE TABLE t1;
|
SHOW CREATE TABLE t1;
|
||||||
|
|
||||||
--exec ls $MYSQLTEST_VARDIR/master-data/test/t1*
|
#--exec ls $MYSQLTEST_VARDIR/master-data/test/t1*
|
||||||
ALTER TABLE t1 COALESCE PARTITION 1;
|
ALTER TABLE t1 COALESCE PARTITION 1;
|
||||||
SHOW CREATE TABLE t1;
|
SHOW CREATE TABLE t1;
|
||||||
--exec ls $MYSQLTEST_VARDIR/master-data/test/t1*
|
#--exec ls $MYSQLTEST_VARDIR/master-data/test/t1*
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -388,6 +388,31 @@ SELECT COUNT(*) FROM t1 WHERE c3 BETWEEN '1996-12-31' AND '2000-12-31';
|
|||||||
SELECT COUNT(*) FROM t1 WHERE c3 < '2000-12-31';
|
SELECT COUNT(*) FROM t1 WHERE c3 < '2000-12-31';
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
#
|
||||||
|
# BUG 16002: Unsigned partition functions not handled correctly
|
||||||
|
#
|
||||||
|
--error ER_RANGE_NOT_INCREASING_ERROR
|
||||||
|
create table t1 (a bigint unsigned)
|
||||||
|
partition by range (a)
|
||||||
|
(partition p0 values less than (10),
|
||||||
|
partition p1 values less than (0));
|
||||||
|
|
||||||
|
create table t1 (a bigint unsigned)
|
||||||
|
partition by range (a)
|
||||||
|
(partition p0 values less than (0),
|
||||||
|
partition p1 values less than (10));
|
||||||
|
show create table t1;
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
|
create table t1 (a bigint unsigned)
|
||||||
|
partition by range (a)
|
||||||
|
(partition p0 values less than (2),
|
||||||
|
partition p1 values less than (10));
|
||||||
|
show create table t1;
|
||||||
|
--error ER_NO_PARTITION_FOR_GIVEN_VALUE
|
||||||
|
insert into t1 values (0xFFFFFFFFFFFFFFFF);
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
#
|
#
|
||||||
# BUG 18962 Errors in DROP PARTITION
|
# BUG 18962 Errors in DROP PARTITION
|
||||||
#
|
#
|
||||||
|
@ -40,14 +40,14 @@ my_string fn_ext(const char *name)
|
|||||||
DBUG_ENTER("fn_ext");
|
DBUG_ENTER("fn_ext");
|
||||||
DBUG_PRINT("mfunkt",("name: '%s'",name));
|
DBUG_PRINT("mfunkt",("name: '%s'",name));
|
||||||
|
|
||||||
#if defined(FN_DEVCHAR) || defined(FN_C_AFTER_DIR)
|
#if defined(FN_DEVCHAR) || defined(FN_C_AFTER_DIR) || defined(BASKSLASH_MBTAIL)
|
||||||
{
|
{
|
||||||
char buff[FN_REFLEN];
|
char buff[FN_REFLEN];
|
||||||
gpos=(my_string) name+dirname_part(buff,(char*) name);
|
gpos=(my_string) name+dirname_part(buff,(char*) name);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
if (!(gpos=strrchr(name,FNLIBCHAR)))
|
if (!(gpos= strrchr(name, FN_LIBCHAR)))
|
||||||
gpos=name;
|
gpos= (my_string) name;
|
||||||
#endif
|
#endif
|
||||||
pos=strchr(gpos,FN_EXTCHAR);
|
pos=strchr(gpos,FN_EXTCHAR);
|
||||||
DBUG_RETURN (pos ? pos : strend(gpos));
|
DBUG_RETURN (pos ? pos : strend(gpos));
|
||||||
|
@ -2452,6 +2452,11 @@ int ha_ndbcluster::write_row(byte *record)
|
|||||||
*/
|
*/
|
||||||
if (!m_use_write && m_ignore_dup_key)
|
if (!m_use_write && m_ignore_dup_key)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
compare if expression with that in start_bulk_insert()
|
||||||
|
start_bulk_insert will set parameters to ensure that each
|
||||||
|
write_row is committed individually
|
||||||
|
*/
|
||||||
int peek_res= peek_indexed_rows(record);
|
int peek_res= peek_indexed_rows(record);
|
||||||
|
|
||||||
if (!peek_res)
|
if (!peek_res)
|
||||||
@ -3693,6 +3698,19 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows)
|
|||||||
DBUG_PRINT("enter", ("rows: %d", (int)rows));
|
DBUG_PRINT("enter", ("rows: %d", (int)rows));
|
||||||
|
|
||||||
m_rows_inserted= (ha_rows) 0;
|
m_rows_inserted= (ha_rows) 0;
|
||||||
|
if (!m_use_write && m_ignore_dup_key)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
compare if expression with that in write_row
|
||||||
|
we have a situation where peek_indexed_rows() will be called
|
||||||
|
so we cannot batch
|
||||||
|
*/
|
||||||
|
DBUG_PRINT("info", ("Batching turned off as duplicate key is "
|
||||||
|
"ignored by using peek_row"));
|
||||||
|
m_rows_to_insert= 1;
|
||||||
|
m_bulk_insert_rows= 1;
|
||||||
|
DBUG_VOID_RETURN;
|
||||||
|
}
|
||||||
if (rows == (ha_rows) 0)
|
if (rows == (ha_rows) 0)
|
||||||
{
|
{
|
||||||
/* We don't know how many will be inserted, guess */
|
/* We don't know how many will be inserted, guess */
|
||||||
@ -4468,12 +4486,13 @@ int ha_ndbcluster::create(const char *name,
|
|||||||
TABLE *form,
|
TABLE *form,
|
||||||
HA_CREATE_INFO *info)
|
HA_CREATE_INFO *info)
|
||||||
{
|
{
|
||||||
|
THD *thd= current_thd;
|
||||||
NDBTAB tab;
|
NDBTAB tab;
|
||||||
NDBCOL col;
|
NDBCOL col;
|
||||||
uint pack_length, length, i, pk_length= 0;
|
uint pack_length, length, i, pk_length= 0;
|
||||||
const void *data, *pack_data;
|
const void *data, *pack_data;
|
||||||
bool create_from_engine= (info->table_options & HA_OPTION_CREATE_FROM_ENGINE);
|
bool create_from_engine= (info->table_options & HA_OPTION_CREATE_FROM_ENGINE);
|
||||||
bool is_truncate= (current_thd->lex->sql_command == SQLCOM_TRUNCATE);
|
bool is_truncate= (thd->lex->sql_command == SQLCOM_TRUNCATE);
|
||||||
|
|
||||||
DBUG_ENTER("ha_ndbcluster::create");
|
DBUG_ENTER("ha_ndbcluster::create");
|
||||||
DBUG_PRINT("enter", ("name: %s", name));
|
DBUG_PRINT("enter", ("name: %s", name));
|
||||||
@ -4661,10 +4680,21 @@ int ha_ndbcluster::create(const char *name,
|
|||||||
Failed to create an index,
|
Failed to create an index,
|
||||||
drop the table (and all it's indexes)
|
drop the table (and all it's indexes)
|
||||||
*/
|
*/
|
||||||
if (dict->dropTableGlobal(*m_table) == 0)
|
while (dict->dropTableGlobal(*m_table))
|
||||||
{
|
{
|
||||||
m_table = 0;
|
switch (dict->getNdbError().status)
|
||||||
|
{
|
||||||
|
case NdbError::TemporaryError:
|
||||||
|
if (!thd->killed)
|
||||||
|
continue; // retry indefinitly
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
m_table = 0;
|
||||||
|
DBUG_RETURN(my_errno);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_NDB_BINLOG
|
#ifdef HAVE_NDB_BINLOG
|
||||||
@ -4727,8 +4757,8 @@ int ha_ndbcluster::create(const char *name,
|
|||||||
*/
|
*/
|
||||||
if (share && !do_event_op)
|
if (share && !do_event_op)
|
||||||
share->flags|= NSF_NO_BINLOG;
|
share->flags|= NSF_NO_BINLOG;
|
||||||
ndbcluster_log_schema_op(current_thd, share,
|
ndbcluster_log_schema_op(thd, share,
|
||||||
current_thd->query, current_thd->query_length,
|
thd->query, thd->query_length,
|
||||||
share->db, share->table_name,
|
share->db, share->table_name,
|
||||||
m_table->getObjectId(),
|
m_table->getObjectId(),
|
||||||
m_table->getObjectVersion(),
|
m_table->getObjectVersion(),
|
||||||
@ -5177,9 +5207,9 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb,
|
|||||||
THD *thd= current_thd;
|
THD *thd= current_thd;
|
||||||
DBUG_ENTER("ha_ndbcluster::ndbcluster_delete_table");
|
DBUG_ENTER("ha_ndbcluster::ndbcluster_delete_table");
|
||||||
NDBDICT *dict= ndb->getDictionary();
|
NDBDICT *dict= ndb->getDictionary();
|
||||||
#ifdef HAVE_NDB_BINLOG
|
|
||||||
int ndb_table_id= 0;
|
int ndb_table_id= 0;
|
||||||
int ndb_table_version= 0;
|
int ndb_table_version= 0;
|
||||||
|
#ifdef HAVE_NDB_BINLOG
|
||||||
/*
|
/*
|
||||||
Don't allow drop table unless
|
Don't allow drop table unless
|
||||||
schema distribution table is setup
|
schema distribution table is setup
|
||||||
@ -5197,15 +5227,25 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb,
|
|||||||
int res= 0;
|
int res= 0;
|
||||||
if (h && h->m_table)
|
if (h && h->m_table)
|
||||||
{
|
{
|
||||||
if (dict->dropTableGlobal(*h->m_table))
|
retry_temporary_error1:
|
||||||
res= ndb_to_mysql_error(&dict->getNdbError());
|
if (dict->dropTableGlobal(*h->m_table) == 0)
|
||||||
#ifdef HAVE_NDB_BINLOG
|
|
||||||
if (res == 0)
|
|
||||||
{
|
{
|
||||||
ndb_table_id= h->m_table->getObjectId();
|
ndb_table_id= h->m_table->getObjectId();
|
||||||
ndb_table_version= h->m_table->getObjectVersion();
|
ndb_table_version= h->m_table->getObjectVersion();
|
||||||
}
|
}
|
||||||
#endif
|
else
|
||||||
|
{
|
||||||
|
switch (dict->getNdbError().status)
|
||||||
|
{
|
||||||
|
case NdbError::TemporaryError:
|
||||||
|
if (!thd->killed)
|
||||||
|
goto retry_temporary_error1; // retry indefinitly
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
res= ndb_to_mysql_error(&dict->getNdbError());
|
||||||
|
}
|
||||||
h->release_metadata(thd, ndb);
|
h->release_metadata(thd, ndb);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -5216,17 +5256,28 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb,
|
|||||||
Ndb_table_guard ndbtab_g(dict, table_name);
|
Ndb_table_guard ndbtab_g(dict, table_name);
|
||||||
if (ndbtab_g.get_table())
|
if (ndbtab_g.get_table())
|
||||||
{
|
{
|
||||||
|
retry_temporary_error2:
|
||||||
if (dict->dropTableGlobal(*ndbtab_g.get_table()) == 0)
|
if (dict->dropTableGlobal(*ndbtab_g.get_table()) == 0)
|
||||||
{
|
{
|
||||||
#ifdef HAVE_NDB_BINLOG
|
|
||||||
ndb_table_id= ndbtab_g.get_table()->getObjectId();
|
ndb_table_id= ndbtab_g.get_table()->getObjectId();
|
||||||
ndb_table_version= ndbtab_g.get_table()->getObjectVersion();
|
ndb_table_version= ndbtab_g.get_table()->getObjectVersion();
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
else if (dict->getNdbError().code == NDB_INVALID_SCHEMA_OBJECT)
|
else
|
||||||
{
|
{
|
||||||
ndbtab_g.invalidate();
|
switch (dict->getNdbError().status)
|
||||||
continue;
|
{
|
||||||
|
case NdbError::TemporaryError:
|
||||||
|
if (!thd->killed)
|
||||||
|
goto retry_temporary_error2; // retry indefinitly
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
if (dict->getNdbError().code == NDB_INVALID_SCHEMA_OBJECT)
|
||||||
|
{
|
||||||
|
ndbtab_g.invalidate();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -5848,6 +5899,7 @@ int ndbcluster_drop_database_impl(const char *path)
|
|||||||
while ((tabname=it++))
|
while ((tabname=it++))
|
||||||
{
|
{
|
||||||
tablename_to_filename(tabname, tmp, FN_REFLEN - (tmp - full_path)-1);
|
tablename_to_filename(tabname, tmp, FN_REFLEN - (tmp - full_path)-1);
|
||||||
|
VOID(pthread_mutex_lock(&LOCK_open));
|
||||||
if (ha_ndbcluster::delete_table(0, ndb, full_path, dbname, tabname))
|
if (ha_ndbcluster::delete_table(0, ndb, full_path, dbname, tabname))
|
||||||
{
|
{
|
||||||
const NdbError err= dict->getNdbError();
|
const NdbError err= dict->getNdbError();
|
||||||
@ -5857,6 +5909,7 @@ int ndbcluster_drop_database_impl(const char *path)
|
|||||||
ret= ndb_to_mysql_error(&err);
|
ret= ndb_to_mysql_error(&err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
VOID(pthread_mutex_unlock(&LOCK_open));
|
||||||
}
|
}
|
||||||
DBUG_RETURN(ret);
|
DBUG_RETURN(ret);
|
||||||
}
|
}
|
||||||
@ -6420,14 +6473,7 @@ void ha_ndbcluster::print_error(int error, myf errflag)
|
|||||||
DBUG_PRINT("enter", ("error = %d", error));
|
DBUG_PRINT("enter", ("error = %d", error));
|
||||||
|
|
||||||
if (error == HA_ERR_NO_PARTITION_FOUND)
|
if (error == HA_ERR_NO_PARTITION_FOUND)
|
||||||
{
|
m_part_info->print_no_partition_found(table);
|
||||||
char buf[100];
|
|
||||||
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
|
|
||||||
my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0),
|
|
||||||
m_part_info->part_expr->null_value ? "NULL" :
|
|
||||||
llstr(m_part_info->part_expr->val_int(), buf));
|
|
||||||
dbug_tmp_restore_column_map(table->read_set, old_map);
|
|
||||||
}
|
|
||||||
else
|
else
|
||||||
handler::print_error(error, errflag);
|
handler::print_error(error, errflag);
|
||||||
DBUG_VOID_RETURN;
|
DBUG_VOID_RETURN;
|
||||||
@ -9633,6 +9679,7 @@ int ha_ndbcluster::set_range_data(void *tab_ref, partition_info *part_info)
|
|||||||
MYF(0));
|
MYF(0));
|
||||||
uint i;
|
uint i;
|
||||||
int error= 0;
|
int error= 0;
|
||||||
|
bool unsigned_flag= part_info->part_expr->unsigned_flag;
|
||||||
DBUG_ENTER("set_range_data");
|
DBUG_ENTER("set_range_data");
|
||||||
|
|
||||||
if (!range_data)
|
if (!range_data)
|
||||||
@ -9643,6 +9690,8 @@ int ha_ndbcluster::set_range_data(void *tab_ref, partition_info *part_info)
|
|||||||
for (i= 0; i < part_info->no_parts; i++)
|
for (i= 0; i < part_info->no_parts; i++)
|
||||||
{
|
{
|
||||||
longlong range_val= part_info->range_int_array[i];
|
longlong range_val= part_info->range_int_array[i];
|
||||||
|
if (unsigned_flag)
|
||||||
|
range_val-= 0x8000000000000000ULL;
|
||||||
if (range_val < INT_MIN32 || range_val >= INT_MAX32)
|
if (range_val < INT_MIN32 || range_val >= INT_MAX32)
|
||||||
{
|
{
|
||||||
if ((i != part_info->no_parts - 1) ||
|
if ((i != part_info->no_parts - 1) ||
|
||||||
@ -9669,6 +9718,7 @@ int ha_ndbcluster::set_list_data(void *tab_ref, partition_info *part_info)
|
|||||||
* sizeof(int32), MYF(0));
|
* sizeof(int32), MYF(0));
|
||||||
uint32 *part_id, i;
|
uint32 *part_id, i;
|
||||||
int error= 0;
|
int error= 0;
|
||||||
|
bool unsigned_flag= part_info->part_expr->unsigned_flag;
|
||||||
DBUG_ENTER("set_list_data");
|
DBUG_ENTER("set_list_data");
|
||||||
|
|
||||||
if (!list_data)
|
if (!list_data)
|
||||||
@ -9680,6 +9730,8 @@ int ha_ndbcluster::set_list_data(void *tab_ref, partition_info *part_info)
|
|||||||
{
|
{
|
||||||
LIST_PART_ENTRY *list_entry= &part_info->list_array[i];
|
LIST_PART_ENTRY *list_entry= &part_info->list_array[i];
|
||||||
longlong list_val= list_entry->list_value;
|
longlong list_val= list_entry->list_value;
|
||||||
|
if (unsigned_flag)
|
||||||
|
list_val-= 0x8000000000000000ULL;
|
||||||
if (list_val < INT_MIN32 || list_val > INT_MAX32)
|
if (list_val < INT_MIN32 || list_val > INT_MAX32)
|
||||||
{
|
{
|
||||||
my_error(ER_LIMITED_PART_RANGE, MYF(0), "NDB");
|
my_error(ER_LIMITED_PART_RANGE, MYF(0), "NDB");
|
||||||
@ -9745,7 +9797,19 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info,
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
/*
|
#ifdef NOT_YET
|
||||||
|
if (!current_thd->variables.new_mode)
|
||||||
|
{
|
||||||
|
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
|
||||||
|
ER_ILLEGAL_HA_CREATE_OPTION,
|
||||||
|
ER(ER_ILLEGAL_HA_CREATE_OPTION),
|
||||||
|
ndbcluster_hton_name,
|
||||||
|
"LIST, RANGE and HASH partition disabled by default,"
|
||||||
|
" use --new option to enable");
|
||||||
|
return HA_ERR_UNSUPPORTED;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
/*
|
||||||
Create a shadow field for those tables that have user defined
|
Create a shadow field for those tables that have user defined
|
||||||
partitioning. This field stores the value of the partition
|
partitioning. This field stores the value of the partition
|
||||||
function such that NDB can handle reorganisations of the data
|
function such that NDB can handle reorganisations of the data
|
||||||
@ -9824,10 +9888,21 @@ bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info,
|
|||||||
uint i;
|
uint i;
|
||||||
const NDBTAB *tab= (const NDBTAB *) m_table;
|
const NDBTAB *tab= (const NDBTAB *) m_table;
|
||||||
|
|
||||||
|
if (current_thd->variables.ndb_use_copying_alter_table)
|
||||||
|
{
|
||||||
|
DBUG_PRINT("info", ("On-line alter table disabled"));
|
||||||
|
DBUG_RETURN(COMPATIBLE_DATA_NO);
|
||||||
|
}
|
||||||
|
|
||||||
for (i= 0; i < table->s->fields; i++)
|
for (i= 0; i < table->s->fields; i++)
|
||||||
{
|
{
|
||||||
Field *field= table->field[i];
|
Field *field= table->field[i];
|
||||||
const NDBCOL *col= tab->getColumn(field->field_name);
|
const NDBCOL *col= tab->getColumn(i);
|
||||||
|
if (field->flags & FIELD_IS_RENAMED)
|
||||||
|
{
|
||||||
|
DBUG_PRINT("info", ("Field has been renamed, copy table"));
|
||||||
|
DBUG_RETURN(COMPATIBLE_DATA_NO);
|
||||||
|
}
|
||||||
if ((field->flags & FIELD_IN_ADD_INDEX) &&
|
if ((field->flags & FIELD_IN_ADD_INDEX) &&
|
||||||
col->getStorageType() == NdbDictionary::Column::StorageTypeDisk)
|
col->getStorageType() == NdbDictionary::Column::StorageTypeDisk)
|
||||||
{
|
{
|
||||||
|
@ -286,6 +286,7 @@ ndbcluster_binlog_open_table(THD *thd, NDB_SHARE *share,
|
|||||||
int error;
|
int error;
|
||||||
DBUG_ENTER("ndbcluster_binlog_open_table");
|
DBUG_ENTER("ndbcluster_binlog_open_table");
|
||||||
|
|
||||||
|
safe_mutex_assert_owner(&LOCK_open);
|
||||||
init_tmp_table_share(table_share, share->db, 0, share->table_name,
|
init_tmp_table_share(table_share, share->db, 0, share->table_name,
|
||||||
share->key);
|
share->key);
|
||||||
if ((error= open_table_def(thd, table_share, 0)))
|
if ((error= open_table_def(thd, table_share, 0)))
|
||||||
|
@ -5097,14 +5097,7 @@ void ha_partition::print_error(int error, myf errflag)
|
|||||||
DBUG_PRINT("enter", ("error: %d", error));
|
DBUG_PRINT("enter", ("error: %d", error));
|
||||||
|
|
||||||
if (error == HA_ERR_NO_PARTITION_FOUND)
|
if (error == HA_ERR_NO_PARTITION_FOUND)
|
||||||
{
|
m_part_info->print_no_partition_found(table);
|
||||||
char buf[100];
|
|
||||||
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
|
|
||||||
my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0),
|
|
||||||
m_part_info->part_expr->null_value ? "NULL" :
|
|
||||||
llstr(m_part_info->part_expr->val_int(), buf));
|
|
||||||
dbug_tmp_restore_column_map(table->read_set, old_map);
|
|
||||||
}
|
|
||||||
else
|
else
|
||||||
m_file[0]->print_error(error, errflag);
|
m_file[0]->print_error(error, errflag);
|
||||||
DBUG_VOID_RETURN;
|
DBUG_VOID_RETURN;
|
||||||
|
@ -68,14 +68,14 @@ ulong total_ha_2pc= 0;
|
|||||||
/* size of savepoint storage area (see ha_init) */
|
/* size of savepoint storage area (see ha_init) */
|
||||||
ulong savepoint_alloc_size= 0;
|
ulong savepoint_alloc_size= 0;
|
||||||
|
|
||||||
struct show_table_alias_st sys_table_aliases[]=
|
static const LEX_STRING sys_table_aliases[]=
|
||||||
{
|
{
|
||||||
{"INNOBASE", DB_TYPE_INNODB},
|
{(char*)STRING_WITH_LEN("INNOBASE")}, {(char*)STRING_WITH_LEN("INNODB")},
|
||||||
{"NDB", DB_TYPE_NDBCLUSTER},
|
{(char*)STRING_WITH_LEN("NDB")}, {(char*)STRING_WITH_LEN("NDBCLUSTER")},
|
||||||
{"BDB", DB_TYPE_BERKELEY_DB},
|
{(char*)STRING_WITH_LEN("BDB")}, {(char*)STRING_WITH_LEN("BERKELEYDB")},
|
||||||
{"HEAP", DB_TYPE_HEAP},
|
{(char*)STRING_WITH_LEN("HEAP")}, {(char*)STRING_WITH_LEN("MEMORY")},
|
||||||
{"MERGE", DB_TYPE_MRG_MYISAM},
|
{(char*)STRING_WITH_LEN("MERGE")}, {(char*)STRING_WITH_LEN("MRG_MYISAM")},
|
||||||
{NullS, DB_TYPE_UNKNOWN}
|
{NullS, 0}
|
||||||
};
|
};
|
||||||
|
|
||||||
const char *ha_row_type[] = {
|
const char *ha_row_type[] = {
|
||||||
@ -91,15 +91,50 @@ TYPELIB tx_isolation_typelib= {array_elements(tx_isolation_names)-1,"",
|
|||||||
static TYPELIB known_extensions= {0,"known_exts", NULL, NULL};
|
static TYPELIB known_extensions= {0,"known_exts", NULL, NULL};
|
||||||
uint known_extensions_id= 0;
|
uint known_extensions_id= 0;
|
||||||
|
|
||||||
handlerton *ha_resolve_by_name(THD *thd, LEX_STRING *name)
|
|
||||||
|
/*
|
||||||
|
Return the default storage engine handlerton for thread
|
||||||
|
|
||||||
|
SYNOPSIS
|
||||||
|
ha_default_handlerton(thd)
|
||||||
|
thd current thread
|
||||||
|
|
||||||
|
RETURN
|
||||||
|
pointer to handlerton
|
||||||
|
*/
|
||||||
|
|
||||||
|
handlerton *ha_default_handlerton(THD *thd)
|
||||||
{
|
{
|
||||||
show_table_alias_st *table_alias;
|
return (thd->variables.table_type != NULL) ?
|
||||||
|
thd->variables.table_type :
|
||||||
|
(global_system_variables.table_type != NULL ?
|
||||||
|
global_system_variables.table_type : &myisam_hton);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
Return the storage engine handlerton for the supplied name
|
||||||
|
|
||||||
|
SYNOPSIS
|
||||||
|
ha_resolve_by_name(thd, name)
|
||||||
|
thd current thread
|
||||||
|
name name of storage engine
|
||||||
|
|
||||||
|
RETURN
|
||||||
|
pointer to handlerton
|
||||||
|
*/
|
||||||
|
|
||||||
|
handlerton *ha_resolve_by_name(THD *thd, const LEX_STRING *name)
|
||||||
|
{
|
||||||
|
const LEX_STRING *table_alias;
|
||||||
st_plugin_int *plugin;
|
st_plugin_int *plugin;
|
||||||
|
|
||||||
if (thd && !my_strnncoll(&my_charset_latin1,
|
redo:
|
||||||
|
/* my_strnncoll is a macro and gcc doesn't do early expansion of macro */
|
||||||
|
if (thd && !my_charset_latin1.coll->strnncoll(&my_charset_latin1,
|
||||||
(const uchar *)name->str, name->length,
|
(const uchar *)name->str, name->length,
|
||||||
(const uchar *)"DEFAULT", 7))
|
(const uchar *)STRING_WITH_LEN("DEFAULT"), 0))
|
||||||
return ha_resolve_by_legacy_type(thd, DB_TYPE_DEFAULT);
|
return ha_default_handlerton(thd);
|
||||||
|
|
||||||
if ((plugin= plugin_lock(name, MYSQL_STORAGE_ENGINE_PLUGIN)))
|
if ((plugin= plugin_lock(name, MYSQL_STORAGE_ENGINE_PLUGIN)))
|
||||||
{
|
{
|
||||||
@ -112,13 +147,15 @@ handlerton *ha_resolve_by_name(THD *thd, LEX_STRING *name)
|
|||||||
/*
|
/*
|
||||||
We check for the historical aliases.
|
We check for the historical aliases.
|
||||||
*/
|
*/
|
||||||
for (table_alias= sys_table_aliases; table_alias->type; table_alias++)
|
for (table_alias= sys_table_aliases; table_alias->str; table_alias+= 2)
|
||||||
{
|
{
|
||||||
if (!my_strnncoll(&my_charset_latin1,
|
if (!my_strnncoll(&my_charset_latin1,
|
||||||
(const uchar *)name->str, name->length,
|
(const uchar *)name->str, name->length,
|
||||||
(const uchar *)table_alias->alias,
|
(const uchar *)table_alias->str, table_alias->length))
|
||||||
strlen(table_alias->alias)))
|
{
|
||||||
return ha_resolve_by_legacy_type(thd, table_alias->type);
|
name= table_alias + 1;
|
||||||
|
goto redo;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -130,20 +167,20 @@ const char *ha_get_storage_engine(enum legacy_db_type db_type)
|
|||||||
switch (db_type) {
|
switch (db_type) {
|
||||||
case DB_TYPE_DEFAULT:
|
case DB_TYPE_DEFAULT:
|
||||||
return "DEFAULT";
|
return "DEFAULT";
|
||||||
case DB_TYPE_UNKNOWN:
|
|
||||||
return "UNKNOWN";
|
|
||||||
default:
|
default:
|
||||||
if (db_type > DB_TYPE_UNKNOWN && db_type < DB_TYPE_DEFAULT &&
|
if (db_type > DB_TYPE_UNKNOWN && db_type < DB_TYPE_DEFAULT &&
|
||||||
installed_htons[db_type])
|
installed_htons[db_type])
|
||||||
return hton2plugin[installed_htons[db_type]->slot]->name.str;
|
return hton2plugin[installed_htons[db_type]->slot]->name.str;
|
||||||
return "*NONE*";
|
/* fall through */
|
||||||
|
case DB_TYPE_UNKNOWN:
|
||||||
|
return "UNKNOWN";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static handler *create_default(TABLE_SHARE *table, MEM_ROOT *mem_root)
|
static handler *create_default(TABLE_SHARE *table, MEM_ROOT *mem_root)
|
||||||
{
|
{
|
||||||
handlerton *hton=ha_resolve_by_legacy_type(current_thd, DB_TYPE_DEFAULT);
|
handlerton *hton= ha_default_handlerton(current_thd);
|
||||||
return (hton && hton->create) ? hton->create(table, mem_root) : NULL;
|
return (hton && hton->create) ? hton->create(table, mem_root) : NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -152,10 +189,7 @@ handlerton *ha_resolve_by_legacy_type(THD *thd, enum legacy_db_type db_type)
|
|||||||
{
|
{
|
||||||
switch (db_type) {
|
switch (db_type) {
|
||||||
case DB_TYPE_DEFAULT:
|
case DB_TYPE_DEFAULT:
|
||||||
return (thd->variables.table_type != NULL) ?
|
return ha_default_handlerton(thd);
|
||||||
thd->variables.table_type :
|
|
||||||
(global_system_variables.table_type != NULL ?
|
|
||||||
global_system_variables.table_type : &myisam_hton);
|
|
||||||
case DB_TYPE_UNKNOWN:
|
case DB_TYPE_UNKNOWN:
|
||||||
return NULL;
|
return NULL;
|
||||||
default:
|
default:
|
||||||
@ -196,7 +230,7 @@ handlerton *ha_checktype(THD *thd, enum legacy_db_type database_type,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ha_resolve_by_legacy_type(thd, DB_TYPE_DEFAULT);
|
return ha_default_handlerton(thd);
|
||||||
} /* ha_checktype */
|
} /* ha_checktype */
|
||||||
|
|
||||||
|
|
||||||
|
@ -667,10 +667,6 @@ struct handlerton
|
|||||||
struct handler_iterator *fill_this_in);
|
struct handler_iterator *fill_this_in);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct show_table_alias_st {
|
|
||||||
const char *alias;
|
|
||||||
enum legacy_db_type type;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Possible flags of a handlerton */
|
/* Possible flags of a handlerton */
|
||||||
#define HTON_NO_FLAGS 0
|
#define HTON_NO_FLAGS 0
|
||||||
@ -1545,7 +1541,8 @@ extern ulong total_ha, total_ha_2pc;
|
|||||||
#define ha_rollback(thd) (ha_rollback_trans((thd), TRUE))
|
#define ha_rollback(thd) (ha_rollback_trans((thd), TRUE))
|
||||||
|
|
||||||
/* lookups */
|
/* lookups */
|
||||||
handlerton *ha_resolve_by_name(THD *thd, LEX_STRING *name);
|
handlerton *ha_default_handlerton(THD *thd);
|
||||||
|
handlerton *ha_resolve_by_name(THD *thd, const LEX_STRING *name);
|
||||||
handlerton *ha_resolve_by_legacy_type(THD *thd, enum legacy_db_type db_type);
|
handlerton *ha_resolve_by_legacy_type(THD *thd, enum legacy_db_type db_type);
|
||||||
const char *ha_get_storage_engine(enum legacy_db_type db_type);
|
const char *ha_get_storage_engine(enum legacy_db_type db_type);
|
||||||
handler *get_new_handler(TABLE_SHARE *share, MEM_ROOT *alloc,
|
handler *get_new_handler(TABLE_SHARE *share, MEM_ROOT *alloc,
|
||||||
|
@ -691,6 +691,7 @@ public:
|
|||||||
str->charset(), conv_charset, &errors))
|
str->charset(), conv_charset, &errors))
|
||||||
null_value= 1;
|
null_value= 1;
|
||||||
use_cached_value= 1;
|
use_cached_value= 1;
|
||||||
|
str_value.mark_as_const();
|
||||||
safe= (errors == 0);
|
safe= (errors == 0);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -1367,7 +1367,7 @@ void Item_func_curtime::fix_length_and_dec()
|
|||||||
{
|
{
|
||||||
TIME ltime;
|
TIME ltime;
|
||||||
|
|
||||||
decimals=0;
|
decimals= DATETIME_DEC;
|
||||||
collation.set(&my_charset_bin);
|
collation.set(&my_charset_bin);
|
||||||
store_now_in_TIME(<ime);
|
store_now_in_TIME(<ime);
|
||||||
value= TIME_to_ulonglong_time(<ime);
|
value= TIME_to_ulonglong_time(<ime);
|
||||||
@ -1414,7 +1414,7 @@ String *Item_func_now::val_str(String *str)
|
|||||||
|
|
||||||
void Item_func_now::fix_length_and_dec()
|
void Item_func_now::fix_length_and_dec()
|
||||||
{
|
{
|
||||||
decimals=0;
|
decimals= DATETIME_DEC;
|
||||||
collation.set(&my_charset_bin);
|
collation.set(&my_charset_bin);
|
||||||
|
|
||||||
store_now_in_TIME(<ime);
|
store_now_in_TIME(<ime);
|
||||||
@ -1761,7 +1761,7 @@ void Item_func_from_unixtime::fix_length_and_dec()
|
|||||||
{
|
{
|
||||||
thd= current_thd;
|
thd= current_thd;
|
||||||
collation.set(&my_charset_bin);
|
collation.set(&my_charset_bin);
|
||||||
decimals=0;
|
decimals= DATETIME_DEC;
|
||||||
max_length=MAX_DATETIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN;
|
max_length=MAX_DATETIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN;
|
||||||
maybe_null= 1;
|
maybe_null= 1;
|
||||||
thd->time_zone_used= 1;
|
thd->time_zone_used= 1;
|
||||||
|
@ -638,6 +638,7 @@ public:
|
|||||||
{
|
{
|
||||||
collation.set(&my_charset_bin);
|
collation.set(&my_charset_bin);
|
||||||
maybe_null=1;
|
maybe_null=1;
|
||||||
|
decimals= DATETIME_DEC;
|
||||||
max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN;
|
max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN;
|
||||||
}
|
}
|
||||||
enum_field_types field_type() const { return MYSQL_TYPE_TIME; }
|
enum_field_types field_type() const { return MYSQL_TYPE_TIME; }
|
||||||
|
@ -4695,6 +4695,7 @@ enum options_mysqld
|
|||||||
OPT_NDB_EXTRA_LOGGING,
|
OPT_NDB_EXTRA_LOGGING,
|
||||||
OPT_NDB_REPORT_THRESH_BINLOG_EPOCH_SLIP,
|
OPT_NDB_REPORT_THRESH_BINLOG_EPOCH_SLIP,
|
||||||
OPT_NDB_REPORT_THRESH_BINLOG_MEM_USAGE,
|
OPT_NDB_REPORT_THRESH_BINLOG_MEM_USAGE,
|
||||||
|
OPT_NDB_USE_COPYING_ALTER_TABLE,
|
||||||
OPT_SKIP_SAFEMALLOC,
|
OPT_SKIP_SAFEMALLOC,
|
||||||
OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_COMPLETION_TYPE,
|
OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_COMPLETION_TYPE,
|
||||||
OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS,
|
OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS,
|
||||||
@ -4983,11 +4984,12 @@ Disable with --skip-bdb (will save memory).",
|
|||||||
(gptr*) &default_collation_name, (gptr*) &default_collation_name,
|
(gptr*) &default_collation_name, (gptr*) &default_collation_name,
|
||||||
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
|
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
|
||||||
{"default-storage-engine", OPT_STORAGE_ENGINE,
|
{"default-storage-engine", OPT_STORAGE_ENGINE,
|
||||||
"Set the default storage engine (table type) for tables.", 0, 0,
|
"Set the default storage engine (table type) for tables.",
|
||||||
|
(gptr*)&default_storage_engine_str, (gptr*)&default_storage_engine_str,
|
||||||
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||||
{"default-table-type", OPT_STORAGE_ENGINE,
|
{"default-table-type", OPT_STORAGE_ENGINE,
|
||||||
"(deprecated) Use --default-storage-engine.",
|
"(deprecated) Use --default-storage-engine.",
|
||||||
(gptr*)default_storage_engine_str, (gptr*)default_storage_engine_str,
|
(gptr*)&default_storage_engine_str, (gptr*)&default_storage_engine_str,
|
||||||
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||||
{"default-time-zone", OPT_DEFAULT_TIME_ZONE, "Set the default time zone.",
|
{"default-time-zone", OPT_DEFAULT_TIME_ZONE, "Set the default time zone.",
|
||||||
(gptr*) &default_tz_name, (gptr*) &default_tz_name,
|
(gptr*) &default_tz_name, (gptr*) &default_tz_name,
|
||||||
@ -5430,6 +5432,12 @@ Disable with --skip-ndbcluster (will save memory).",
|
|||||||
(gptr*) &max_system_variables.ndb_index_stat_update_freq,
|
(gptr*) &max_system_variables.ndb_index_stat_update_freq,
|
||||||
0, GET_ULONG, OPT_ARG, 20, 0, ~0L, 0, 0, 0},
|
0, GET_ULONG, OPT_ARG, 20, 0, ~0L, 0, 0, 0},
|
||||||
#endif
|
#endif
|
||||||
|
{"ndb-use-copying-alter-table",
|
||||||
|
OPT_NDB_USE_COPYING_ALTER_TABLE,
|
||||||
|
"Force ndbcluster to always copy tables at alter table (should only be used if on-line alter table fails).",
|
||||||
|
(gptr*) &global_system_variables.ndb_use_copying_alter_table,
|
||||||
|
(gptr*) &global_system_variables.ndb_use_copying_alter_table,
|
||||||
|
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
|
||||||
{"new", 'n', "Use very new possible 'unsafe' functions.",
|
{"new", 'n', "Use very new possible 'unsafe' functions.",
|
||||||
(gptr*) &global_system_variables.new_mode,
|
(gptr*) &global_system_variables.new_mode,
|
||||||
(gptr*) &max_system_variables.new_mode,
|
(gptr*) &max_system_variables.new_mode,
|
||||||
|
@ -36,18 +36,32 @@ enum partition_state {
|
|||||||
PART_IS_ADDED= 8
|
PART_IS_ADDED= 8
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
This struct is used to contain the value of an element
|
||||||
|
in the VALUES IN struct. It needs to keep knowledge of
|
||||||
|
whether it is a signed/unsigned value and whether it is
|
||||||
|
NULL or not.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef struct p_elem_val
|
||||||
|
{
|
||||||
|
longlong value;
|
||||||
|
bool null_value;
|
||||||
|
bool unsigned_flag;
|
||||||
|
} part_elem_value;
|
||||||
|
|
||||||
struct st_ddl_log_memory_entry;
|
struct st_ddl_log_memory_entry;
|
||||||
|
|
||||||
class partition_element :public Sql_alloc {
|
class partition_element :public Sql_alloc {
|
||||||
public:
|
public:
|
||||||
List<partition_element> subpartitions;
|
List<partition_element> subpartitions;
|
||||||
List<longlong> list_val_list;
|
List<part_elem_value> list_val_list;
|
||||||
ulonglong part_max_rows;
|
ulonglong part_max_rows;
|
||||||
ulonglong part_min_rows;
|
ulonglong part_min_rows;
|
||||||
|
longlong range_value;
|
||||||
char *partition_name;
|
char *partition_name;
|
||||||
char *tablespace_name;
|
char *tablespace_name;
|
||||||
struct st_ddl_log_memory_entry *log_entry;
|
struct st_ddl_log_memory_entry *log_entry;
|
||||||
longlong range_value;
|
|
||||||
char* part_comment;
|
char* part_comment;
|
||||||
char* data_file_name;
|
char* data_file_name;
|
||||||
char* index_file_name;
|
char* index_file_name;
|
||||||
@ -55,14 +69,17 @@ public:
|
|||||||
enum partition_state part_state;
|
enum partition_state part_state;
|
||||||
uint16 nodegroup_id;
|
uint16 nodegroup_id;
|
||||||
bool has_null_value;
|
bool has_null_value;
|
||||||
|
bool signed_flag;/* Indicate whether this partition uses signed constants */
|
||||||
|
bool max_value; /* Indicate whether this partition uses MAXVALUE */
|
||||||
|
|
||||||
partition_element()
|
partition_element()
|
||||||
: part_max_rows(0), part_min_rows(0), partition_name(NULL),
|
: part_max_rows(0), part_min_rows(0), range_value(0),
|
||||||
tablespace_name(NULL), log_entry(NULL),
|
partition_name(NULL), tablespace_name(NULL),
|
||||||
range_value(0), part_comment(NULL),
|
log_entry(NULL), part_comment(NULL),
|
||||||
data_file_name(NULL), index_file_name(NULL),
|
data_file_name(NULL), index_file_name(NULL),
|
||||||
engine_type(NULL),part_state(PART_NORMAL),
|
engine_type(NULL), part_state(PART_NORMAL),
|
||||||
nodegroup_id(UNDEF_NODEGROUP), has_null_value(FALSE)
|
nodegroup_id(UNDEF_NODEGROUP), has_null_value(FALSE),
|
||||||
|
signed_flag(FALSE), max_value(FALSE)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
partition_element(partition_element *part_elem)
|
partition_element(partition_element *part_elem)
|
||||||
|
@ -442,9 +442,11 @@ bool partition_info::check_engine_mix(handlerton **engine_array, uint no_parts)
|
|||||||
DBUG_RETURN(TRUE);
|
DBUG_RETURN(TRUE);
|
||||||
}
|
}
|
||||||
} while (++i < no_parts);
|
} while (++i < no_parts);
|
||||||
if (engine_array[0] == &myisammrg_hton)
|
if (engine_array[0] == &myisammrg_hton ||
|
||||||
|
engine_array[0] == &tina_hton)
|
||||||
{
|
{
|
||||||
my_error(ER_PARTITION_MERGE_ERROR, MYF(0));
|
my_error(ER_PARTITION_MERGE_ERROR, MYF(0),
|
||||||
|
engine_array[0] == &myisammrg_hton ? "MyISAM Merge" : "CSV");
|
||||||
DBUG_RETURN(TRUE);
|
DBUG_RETURN(TRUE);
|
||||||
}
|
}
|
||||||
DBUG_RETURN(FALSE);
|
DBUG_RETURN(FALSE);
|
||||||
@ -474,11 +476,13 @@ bool partition_info::check_engine_mix(handlerton **engine_array, uint no_parts)
|
|||||||
bool partition_info::check_range_constants()
|
bool partition_info::check_range_constants()
|
||||||
{
|
{
|
||||||
partition_element* part_def;
|
partition_element* part_def;
|
||||||
longlong current_largest_int= LONGLONG_MIN;
|
longlong current_largest;
|
||||||
longlong part_range_value_int;
|
longlong part_range_value;
|
||||||
|
bool first= TRUE;
|
||||||
uint i;
|
uint i;
|
||||||
List_iterator<partition_element> it(partitions);
|
List_iterator<partition_element> it(partitions);
|
||||||
bool result= TRUE;
|
bool result= TRUE;
|
||||||
|
bool signed_flag= !part_expr->unsigned_flag;
|
||||||
DBUG_ENTER("partition_info::check_range_constants");
|
DBUG_ENTER("partition_info::check_range_constants");
|
||||||
DBUG_PRINT("enter", ("INT_RESULT with %d parts", no_parts));
|
DBUG_PRINT("enter", ("INT_RESULT with %d parts", no_parts));
|
||||||
|
|
||||||
@ -494,18 +498,31 @@ bool partition_info::check_range_constants()
|
|||||||
{
|
{
|
||||||
part_def= it++;
|
part_def= it++;
|
||||||
if ((i != (no_parts - 1)) || !defined_max_value)
|
if ((i != (no_parts - 1)) || !defined_max_value)
|
||||||
part_range_value_int= part_def->range_value;
|
|
||||||
else
|
|
||||||
part_range_value_int= LONGLONG_MAX;
|
|
||||||
if (likely(current_largest_int < part_range_value_int))
|
|
||||||
{
|
{
|
||||||
current_largest_int= part_range_value_int;
|
part_range_value= part_def->range_value;
|
||||||
range_int_array[i]= part_range_value_int;
|
if (!signed_flag)
|
||||||
|
part_range_value-= 0x8000000000000000ULL;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
part_range_value= LONGLONG_MAX;
|
||||||
|
if (first)
|
||||||
|
{
|
||||||
|
current_largest= part_range_value;
|
||||||
|
range_int_array[0]= part_range_value;
|
||||||
|
first= FALSE;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
my_error(ER_RANGE_NOT_INCREASING_ERROR, MYF(0));
|
if (likely(current_largest < part_range_value))
|
||||||
goto end;
|
{
|
||||||
|
current_largest= part_range_value;
|
||||||
|
range_int_array[i]= part_range_value;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
my_error(ER_RANGE_NOT_INCREASING_ERROR, MYF(0));
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} while (++i < no_parts);
|
} while (++i < no_parts);
|
||||||
result= FALSE;
|
result= FALSE;
|
||||||
@ -515,8 +532,8 @@ end:
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
A support routine for check_list_constants used by qsort to sort the
|
Support routines for check_list_constants used by qsort to sort the
|
||||||
constant list expressions.
|
constant list expressions. One routine for unsigned and one for signed.
|
||||||
|
|
||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
list_part_cmp()
|
list_part_cmp()
|
||||||
@ -566,10 +583,9 @@ bool partition_info::check_list_constants()
|
|||||||
{
|
{
|
||||||
uint i;
|
uint i;
|
||||||
uint list_index= 0;
|
uint list_index= 0;
|
||||||
longlong *list_value;
|
part_elem_value *list_value;
|
||||||
bool not_first;
|
|
||||||
bool result= TRUE;
|
bool result= TRUE;
|
||||||
longlong curr_value, prev_value;
|
longlong curr_value, prev_value, type_add, calc_value;
|
||||||
partition_element* part_def;
|
partition_element* part_def;
|
||||||
bool found_null= FALSE;
|
bool found_null= FALSE;
|
||||||
List_iterator<partition_element> list_func_it(partitions);
|
List_iterator<partition_element> list_func_it(partitions);
|
||||||
@ -607,12 +623,13 @@ bool partition_info::check_list_constants()
|
|||||||
has_null_part_id= i;
|
has_null_part_id= i;
|
||||||
found_null= TRUE;
|
found_null= TRUE;
|
||||||
}
|
}
|
||||||
List_iterator<longlong> list_val_it1(part_def->list_val_list);
|
List_iterator<part_elem_value> list_val_it1(part_def->list_val_list);
|
||||||
while (list_val_it1++)
|
while (list_val_it1++)
|
||||||
no_list_values++;
|
no_list_values++;
|
||||||
} while (++i < no_parts);
|
} while (++i < no_parts);
|
||||||
list_func_it.rewind();
|
list_func_it.rewind();
|
||||||
list_array= (LIST_PART_ENTRY*)sql_alloc(no_list_values*sizeof(LIST_PART_ENTRY));
|
list_array= (LIST_PART_ENTRY*)sql_alloc((no_list_values+1) *
|
||||||
|
sizeof(LIST_PART_ENTRY));
|
||||||
if (unlikely(list_array == NULL))
|
if (unlikely(list_array == NULL))
|
||||||
{
|
{
|
||||||
mem_alloc_error(no_list_values * sizeof(LIST_PART_ENTRY));
|
mem_alloc_error(no_list_values * sizeof(LIST_PART_ENTRY));
|
||||||
@ -620,35 +637,48 @@ bool partition_info::check_list_constants()
|
|||||||
}
|
}
|
||||||
|
|
||||||
i= 0;
|
i= 0;
|
||||||
|
/*
|
||||||
|
Fix to be able to reuse signed sort functions also for unsigned
|
||||||
|
partition functions.
|
||||||
|
*/
|
||||||
|
type_add= (longlong)(part_expr->unsigned_flag ?
|
||||||
|
0x8000000000000000ULL :
|
||||||
|
0ULL);
|
||||||
|
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
part_def= list_func_it++;
|
part_def= list_func_it++;
|
||||||
List_iterator<longlong> list_val_it2(part_def->list_val_list);
|
List_iterator<part_elem_value> list_val_it2(part_def->list_val_list);
|
||||||
while ((list_value= list_val_it2++))
|
while ((list_value= list_val_it2++))
|
||||||
{
|
{
|
||||||
list_array[list_index].list_value= *list_value;
|
calc_value= list_value->value - type_add;
|
||||||
|
list_array[list_index].list_value= calc_value;
|
||||||
list_array[list_index++].partition_id= i;
|
list_array[list_index++].partition_id= i;
|
||||||
}
|
}
|
||||||
} while (++i < no_parts);
|
} while (++i < no_parts);
|
||||||
|
|
||||||
qsort((void*)list_array, no_list_values, sizeof(LIST_PART_ENTRY),
|
if (fixed && no_list_values)
|
||||||
&list_part_cmp);
|
|
||||||
|
|
||||||
not_first= FALSE;
|
|
||||||
prev_value= 0; // prev_value initialised to quiet compiler
|
|
||||||
for (i= 0; i < no_list_values ; i++)
|
|
||||||
{
|
{
|
||||||
curr_value= list_array[i].list_value;
|
bool first= TRUE;
|
||||||
if (likely(!not_first || prev_value != curr_value))
|
qsort((void*)list_array, no_list_values, sizeof(LIST_PART_ENTRY),
|
||||||
|
&list_part_cmp);
|
||||||
|
|
||||||
|
i= prev_value= 0; //prev_value initialised to quiet compiler
|
||||||
|
do
|
||||||
{
|
{
|
||||||
prev_value= curr_value;
|
DBUG_ASSERT(i < no_list_values);
|
||||||
not_first= TRUE;
|
curr_value= list_array[i].list_value;
|
||||||
}
|
if (likely(first || prev_value != curr_value))
|
||||||
else
|
{
|
||||||
{
|
prev_value= curr_value;
|
||||||
my_error(ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR, MYF(0));
|
first= FALSE;
|
||||||
goto end;
|
}
|
||||||
}
|
else
|
||||||
|
{
|
||||||
|
my_error(ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR, MYF(0));
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
|
} while (++i < no_list_values);
|
||||||
}
|
}
|
||||||
result= FALSE;
|
result= FALSE;
|
||||||
end:
|
end:
|
||||||
@ -677,7 +707,7 @@ end:
|
|||||||
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
bool partition_info::check_partition_info(handlerton **eng_type,
|
bool partition_info::check_partition_info(THD *thd, handlerton **eng_type,
|
||||||
handler *file, ulonglong max_rows)
|
handler *file, ulonglong max_rows)
|
||||||
{
|
{
|
||||||
handlerton **engine_array= NULL;
|
handlerton **engine_array= NULL;
|
||||||
@ -784,9 +814,12 @@ bool partition_info::check_partition_info(handlerton **eng_type,
|
|||||||
list constants.
|
list constants.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (unlikely((part_type == RANGE_PARTITION && check_range_constants()) ||
|
if (fixed)
|
||||||
(part_type == LIST_PARTITION && check_list_constants())))
|
{
|
||||||
goto end;
|
if (unlikely((part_type == RANGE_PARTITION && check_range_constants()) ||
|
||||||
|
(part_type == LIST_PARTITION && check_list_constants())))
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
result= FALSE;
|
result= FALSE;
|
||||||
end:
|
end:
|
||||||
my_free((char*)engine_array,MYF(MY_ALLOW_ZERO_PTR));
|
my_free((char*)engine_array,MYF(MY_ALLOW_ZERO_PTR));
|
||||||
@ -794,4 +827,28 @@ end:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
Print error for no partition found
|
||||||
|
SYNOPSIS
|
||||||
|
print_no_partition_found()
|
||||||
|
table Table object
|
||||||
|
RETURN VALUES
|
||||||
|
NONE
|
||||||
|
*/
|
||||||
|
|
||||||
|
void partition_info::print_no_partition_found(TABLE *table)
|
||||||
|
{
|
||||||
|
char buf[100];
|
||||||
|
char *buf_ptr= (char*)&buf;
|
||||||
|
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
|
||||||
|
|
||||||
|
if (part_expr->null_value)
|
||||||
|
buf_ptr= (char*)"NULL";
|
||||||
|
else
|
||||||
|
longlong2str(part_expr->val_int(), buf,
|
||||||
|
part_expr->unsigned_flag ? 10 : -10);
|
||||||
|
my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0), buf_ptr);
|
||||||
|
dbug_tmp_restore_column_map(table->read_set, old_map);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* WITH_PARTITION_STORAGE_ENGINE */
|
#endif /* WITH_PARTITION_STORAGE_ENGINE */
|
||||||
|
@ -167,6 +167,7 @@ public:
|
|||||||
uint no_subpart_fields;
|
uint no_subpart_fields;
|
||||||
uint no_full_part_fields;
|
uint no_full_part_fields;
|
||||||
|
|
||||||
|
uint has_null_part_id;
|
||||||
/*
|
/*
|
||||||
This variable is used to calculate the partition id when using
|
This variable is used to calculate the partition id when using
|
||||||
LINEAR KEY/HASH. This functionality is kept in the MySQL Server
|
LINEAR KEY/HASH. This functionality is kept in the MySQL Server
|
||||||
@ -187,7 +188,6 @@ public:
|
|||||||
bool is_auto_partitioned;
|
bool is_auto_partitioned;
|
||||||
bool from_openfrm;
|
bool from_openfrm;
|
||||||
bool has_null_value;
|
bool has_null_value;
|
||||||
uint has_null_part_id;
|
|
||||||
|
|
||||||
|
|
||||||
partition_info()
|
partition_info()
|
||||||
@ -210,20 +210,14 @@ public:
|
|||||||
no_parts(0), no_subparts(0),
|
no_parts(0), no_subparts(0),
|
||||||
count_curr_subparts(0), part_error_code(0),
|
count_curr_subparts(0), part_error_code(0),
|
||||||
no_list_values(0), no_part_fields(0), no_subpart_fields(0),
|
no_list_values(0), no_part_fields(0), no_subpart_fields(0),
|
||||||
no_full_part_fields(0), linear_hash_mask(0),
|
no_full_part_fields(0), has_null_part_id(0), linear_hash_mask(0),
|
||||||
use_default_partitions(TRUE),
|
use_default_partitions(TRUE), use_default_no_partitions(TRUE),
|
||||||
use_default_no_partitions(TRUE),
|
use_default_subpartitions(TRUE), use_default_no_subpartitions(TRUE),
|
||||||
use_default_subpartitions(TRUE),
|
default_partitions_setup(FALSE), defined_max_value(FALSE),
|
||||||
use_default_no_subpartitions(TRUE),
|
|
||||||
default_partitions_setup(FALSE),
|
|
||||||
defined_max_value(FALSE),
|
|
||||||
list_of_part_fields(FALSE), list_of_subpart_fields(FALSE),
|
list_of_part_fields(FALSE), list_of_subpart_fields(FALSE),
|
||||||
linear_hash_ind(FALSE),
|
linear_hash_ind(FALSE), fixed(FALSE),
|
||||||
fixed(FALSE),
|
is_auto_partitioned(FALSE), from_openfrm(FALSE),
|
||||||
is_auto_partitioned(FALSE),
|
has_null_value(FALSE)
|
||||||
from_openfrm(FALSE),
|
|
||||||
has_null_value(FALSE),
|
|
||||||
has_null_part_id(0)
|
|
||||||
{
|
{
|
||||||
all_fields_in_PF.clear_all();
|
all_fields_in_PF.clear_all();
|
||||||
all_fields_in_PPF.clear_all();
|
all_fields_in_PPF.clear_all();
|
||||||
@ -255,10 +249,12 @@ public:
|
|||||||
static bool check_engine_mix(handlerton **engine_array, uint no_parts);
|
static bool check_engine_mix(handlerton **engine_array, uint no_parts);
|
||||||
bool check_range_constants();
|
bool check_range_constants();
|
||||||
bool check_list_constants();
|
bool check_list_constants();
|
||||||
bool check_partition_info(handlerton **eng_type,
|
bool check_partition_info(THD *thd, handlerton **eng_type,
|
||||||
handler *file, ulonglong max_rows);
|
handler *file, ulonglong max_rows);
|
||||||
|
void print_no_partition_found(TABLE *table);
|
||||||
private:
|
private:
|
||||||
static int list_part_cmp(const void* a, const void* b);
|
static int list_part_cmp(const void* a, const void* b);
|
||||||
|
static int list_part_cmp_unsigned(const void* a, const void* b);
|
||||||
bool set_up_default_partitions(handler *file, ulonglong max_rows,
|
bool set_up_default_partitions(handler *file, ulonglong max_rows,
|
||||||
uint start_no);
|
uint start_no);
|
||||||
bool set_up_default_subpartitions(handler *file, ulonglong max_rows);
|
bool set_up_default_subpartitions(handler *file, ulonglong max_rows);
|
||||||
|
@ -548,6 +548,8 @@ sys_ndb_index_stat_update_freq("ndb_index_stat_update_freq",
|
|||||||
&SV::ndb_index_stat_update_freq);
|
&SV::ndb_index_stat_update_freq);
|
||||||
sys_var_long_ptr
|
sys_var_long_ptr
|
||||||
sys_ndb_extra_logging("ndb_extra_logging", &ndb_extra_logging);
|
sys_ndb_extra_logging("ndb_extra_logging", &ndb_extra_logging);
|
||||||
|
sys_var_thd_bool
|
||||||
|
sys_ndb_use_copying_alter_table("ndb_use_copying_alter_table", &SV::ndb_use_copying_alter_table);
|
||||||
|
|
||||||
/* Time/date/datetime formats */
|
/* Time/date/datetime formats */
|
||||||
|
|
||||||
@ -917,6 +919,8 @@ SHOW_VAR init_vars[]= {
|
|||||||
{sys_ndb_report_thresh_binlog_mem_usage.name,
|
{sys_ndb_report_thresh_binlog_mem_usage.name,
|
||||||
(char*) &sys_ndb_report_thresh_binlog_mem_usage, SHOW_SYS},
|
(char*) &sys_ndb_report_thresh_binlog_mem_usage, SHOW_SYS},
|
||||||
#endif
|
#endif
|
||||||
|
{sys_ndb_use_copying_alter_table.name,
|
||||||
|
(char*) &sys_ndb_use_copying_alter_table, SHOW_SYS},
|
||||||
{sys_ndb_use_exact_count.name,(char*) &sys_ndb_use_exact_count, SHOW_SYS},
|
{sys_ndb_use_exact_count.name,(char*) &sys_ndb_use_exact_count, SHOW_SYS},
|
||||||
{sys_ndb_use_transactions.name,(char*) &sys_ndb_use_transactions, SHOW_SYS},
|
{sys_ndb_use_transactions.name,(char*) &sys_ndb_use_transactions, SHOW_SYS},
|
||||||
{sys_net_buffer_length.name,(char*) &sys_net_buffer_length, SHOW_SYS},
|
{sys_net_buffer_length.name,(char*) &sys_net_buffer_length, SHOW_SYS},
|
||||||
|
@ -5810,6 +5810,9 @@ ER_NDB_CANT_SWITCH_BINLOG_FORMAT
|
|||||||
eng "The NDB cluster engine does not support changing the binlog format on the fly yet"
|
eng "The NDB cluster engine does not support changing the binlog format on the fly yet"
|
||||||
ER_PARTITION_NO_TEMPORARY
|
ER_PARTITION_NO_TEMPORARY
|
||||||
eng "Cannot create temporary table with partitions"
|
eng "Cannot create temporary table with partitions"
|
||||||
|
ER_PARTITION_CONST_DOMAIN_ERROR
|
||||||
|
eng "Partition constant is out of partition function domain"
|
||||||
|
swe "Partitionskonstanten är utanför partitioneringsfunktionens domän"
|
||||||
ER_PARTITION_FUNCTION_IS_NOT_ALLOWED
|
ER_PARTITION_FUNCTION_IS_NOT_ALLOWED
|
||||||
eng "This partition function is not allowed"
|
eng "This partition function is not allowed"
|
||||||
swe "Denna partitioneringsfunktion är inte tillåten"
|
swe "Denna partitioneringsfunktion är inte tillåten"
|
||||||
@ -5830,5 +5833,5 @@ ER_EVENT_MODIFY_QUEUE_ERROR
|
|||||||
ER_EVENT_SET_VAR_ERROR
|
ER_EVENT_SET_VAR_ERROR
|
||||||
eng "Error during starting/stopping of the scheduler. Error code %u"
|
eng "Error during starting/stopping of the scheduler. Error code %u"
|
||||||
ER_PARTITION_MERGE_ERROR
|
ER_PARTITION_MERGE_ERROR
|
||||||
eng "MyISAM Merge handler cannot be used in partitioned tables"
|
eng "%s handler cannot be used in partitioned tables"
|
||||||
swe "MyISAM Merge kan inte anändas i en partitionerad tabell"
|
swe "%s kan inte användas i en partitionerad tabell"
|
||||||
|
@ -244,6 +244,7 @@ struct system_variables
|
|||||||
my_bool innodb_table_locks;
|
my_bool innodb_table_locks;
|
||||||
my_bool innodb_support_xa;
|
my_bool innodb_support_xa;
|
||||||
my_bool ndb_force_send;
|
my_bool ndb_force_send;
|
||||||
|
my_bool ndb_use_copying_alter_table;
|
||||||
my_bool ndb_use_exact_count;
|
my_bool ndb_use_exact_count;
|
||||||
my_bool ndb_use_transactions;
|
my_bool ndb_use_transactions;
|
||||||
my_bool ndb_index_stat_enable;
|
my_bool ndb_index_stat_enable;
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
/* Copyright (C) 2005 MySQL AB
|
/* Copyright (C) 2005, 2006 MySQL AB
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
This program is free software; you can redistribute it and/or modify
|
||||||
it under the terms of the GNU General Public License as published by
|
it under the terms of the GNU General Public License as published by
|
||||||
@ -61,7 +61,6 @@ static const char *equal_str= "=";
|
|||||||
static const char *end_paren_str= ")";
|
static const char *end_paren_str= ")";
|
||||||
static const char *begin_paren_str= "(";
|
static const char *begin_paren_str= "(";
|
||||||
static const char *comma_str= ",";
|
static const char *comma_str= ",";
|
||||||
static char buff[22];
|
|
||||||
|
|
||||||
int get_partition_id_list(partition_info *part_info,
|
int get_partition_id_list(partition_info *part_info,
|
||||||
uint32 *part_id,
|
uint32 *part_id,
|
||||||
@ -189,9 +188,9 @@ bool is_name_in_list(char *name,
|
|||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
partition_default_handling()
|
partition_default_handling()
|
||||||
table Table object
|
table Table object
|
||||||
table_name Table name to use when getting no_parts
|
|
||||||
db_name Database name to use when getting no_parts
|
|
||||||
part_info Partition info to set up
|
part_info Partition info to set up
|
||||||
|
is_create_table_ind Is this part of a table creation
|
||||||
|
normalized_path Normalized path name of table and database
|
||||||
|
|
||||||
RETURN VALUES
|
RETURN VALUES
|
||||||
TRUE Error
|
TRUE Error
|
||||||
@ -238,8 +237,8 @@ bool partition_default_handling(TABLE *table, partition_info *part_info,
|
|||||||
check_reorganise_list()
|
check_reorganise_list()
|
||||||
new_part_info New partition info
|
new_part_info New partition info
|
||||||
old_part_info Old partition info
|
old_part_info Old partition info
|
||||||
list_part_names The list of partition names that will go away and can be reused in the
|
list_part_names The list of partition names that will go away and
|
||||||
new table.
|
can be reused in the new table.
|
||||||
|
|
||||||
RETURN VALUES
|
RETURN VALUES
|
||||||
TRUE Inacceptable name conflict detected.
|
TRUE Inacceptable name conflict detected.
|
||||||
@ -793,6 +792,43 @@ end:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
Support function to check if all VALUES * (expression) is of the
|
||||||
|
right sign (no signed constants when unsigned partition function)
|
||||||
|
|
||||||
|
SYNOPSIS
|
||||||
|
check_signed_flag()
|
||||||
|
part_info Partition info object
|
||||||
|
|
||||||
|
RETURN VALUES
|
||||||
|
0 No errors due to sign errors
|
||||||
|
>0 Sign error
|
||||||
|
*/
|
||||||
|
|
||||||
|
int check_signed_flag(partition_info *part_info)
|
||||||
|
{
|
||||||
|
int error= 0;
|
||||||
|
uint i= 0;
|
||||||
|
if (part_info->part_type != HASH_PARTITION &&
|
||||||
|
part_info->part_expr->unsigned_flag)
|
||||||
|
{
|
||||||
|
List_iterator<partition_element> part_it(part_info->partitions);
|
||||||
|
do
|
||||||
|
{
|
||||||
|
partition_element *part_elem= part_it++;
|
||||||
|
|
||||||
|
if (part_elem->signed_flag)
|
||||||
|
{
|
||||||
|
my_error(ER_PARTITION_CONST_DOMAIN_ERROR, MYF(0));
|
||||||
|
error= ER_PARTITION_CONST_DOMAIN_ERROR;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} while (++i < part_info->no_parts);
|
||||||
|
}
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
The function uses a new feature in fix_fields where the flag
|
The function uses a new feature in fix_fields where the flag
|
||||||
GET_FIXED_FIELDS_FLAG is set for all fields in the item tree.
|
GET_FIXED_FIELDS_FLAG is set for all fields in the item tree.
|
||||||
@ -802,10 +838,11 @@ end:
|
|||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
fix_fields_part_func()
|
fix_fields_part_func()
|
||||||
thd The thread object
|
thd The thread object
|
||||||
tables A list of one table, the partitioned table
|
|
||||||
func_expr The item tree reference of the partition function
|
func_expr The item tree reference of the partition function
|
||||||
|
table The table object
|
||||||
part_info Reference to partitioning data structure
|
part_info Reference to partitioning data structure
|
||||||
sub_part Is the table subpartitioned as well
|
is_sub_part Is the table subpartitioned as well
|
||||||
|
is_field_to_be_setup Flag if we are to set-up field arrays
|
||||||
|
|
||||||
RETURN VALUE
|
RETURN VALUE
|
||||||
TRUE An error occurred, something was wrong with the
|
TRUE An error occurred, something was wrong with the
|
||||||
@ -828,26 +865,54 @@ end:
|
|||||||
on the field object.
|
on the field object.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static bool fix_fields_part_func(THD *thd, TABLE_LIST *tables,
|
bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
|
||||||
Item* func_expr, partition_info *part_info,
|
bool is_sub_part, bool is_field_to_be_setup)
|
||||||
bool is_sub_part)
|
|
||||||
{
|
{
|
||||||
|
partition_info *part_info= table->part_info;
|
||||||
|
uint dir_length, home_dir_length;
|
||||||
bool result= TRUE;
|
bool result= TRUE;
|
||||||
TABLE *table= tables->table;
|
TABLE_LIST tables;
|
||||||
TABLE_LIST *save_table_list, *save_first_table, *save_last_table;
|
TABLE_LIST *save_table_list, *save_first_table, *save_last_table;
|
||||||
int error;
|
int error;
|
||||||
Name_resolution_context *context;
|
Name_resolution_context *context;
|
||||||
const char *save_where;
|
const char *save_where;
|
||||||
|
char* db_name;
|
||||||
|
char db_name_string[FN_REFLEN];
|
||||||
DBUG_ENTER("fix_fields_part_func");
|
DBUG_ENTER("fix_fields_part_func");
|
||||||
|
|
||||||
|
if (part_info->fixed)
|
||||||
|
{
|
||||||
|
if (!(is_sub_part || (error= check_signed_flag(part_info))))
|
||||||
|
result= FALSE;
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Set-up the TABLE_LIST object to be a list with a single table
|
||||||
|
Set the object to zero to create NULL pointers and set alias
|
||||||
|
and real name to table name and get database name from file name.
|
||||||
|
*/
|
||||||
|
|
||||||
|
bzero((void*)&tables, sizeof(TABLE_LIST));
|
||||||
|
tables.alias= tables.table_name= (char*) table->s->table_name.str;
|
||||||
|
tables.table= table;
|
||||||
|
tables.next_local= 0;
|
||||||
|
tables.next_name_resolution_table= 0;
|
||||||
|
strmov(db_name_string, table->s->normalized_path.str);
|
||||||
|
dir_length= dirname_length(db_name_string);
|
||||||
|
db_name_string[dir_length - 1]= 0;
|
||||||
|
home_dir_length= dirname_length(db_name_string);
|
||||||
|
db_name= &db_name_string[home_dir_length];
|
||||||
|
tables.db= db_name;
|
||||||
|
|
||||||
context= thd->lex->current_context();
|
context= thd->lex->current_context();
|
||||||
table->map= 1; //To ensure correct calculation of const item
|
table->map= 1; //To ensure correct calculation of const item
|
||||||
table->get_fields_in_item_tree= TRUE;
|
table->get_fields_in_item_tree= TRUE;
|
||||||
save_table_list= context->table_list;
|
save_table_list= context->table_list;
|
||||||
save_first_table= context->first_name_resolution_table;
|
save_first_table= context->first_name_resolution_table;
|
||||||
save_last_table= context->last_name_resolution_table;
|
save_last_table= context->last_name_resolution_table;
|
||||||
context->table_list= tables;
|
context->table_list= &tables;
|
||||||
context->first_name_resolution_table= tables;
|
context->first_name_resolution_table= &tables;
|
||||||
context->last_name_resolution_table= NULL;
|
context->last_name_resolution_table= NULL;
|
||||||
func_expr->walk(&Item::change_context_processor, 0, (byte*) context);
|
func_expr->walk(&Item::change_context_processor, 0, (byte*) context);
|
||||||
save_where= thd->where;
|
save_where= thd->where;
|
||||||
@ -859,7 +924,8 @@ static bool fix_fields_part_func(THD *thd, TABLE_LIST *tables,
|
|||||||
if (unlikely(error))
|
if (unlikely(error))
|
||||||
{
|
{
|
||||||
DBUG_PRINT("info", ("Field in partition function not part of table"));
|
DBUG_PRINT("info", ("Field in partition function not part of table"));
|
||||||
clear_field_flag(table);
|
if (is_field_to_be_setup)
|
||||||
|
clear_field_flag(table);
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
thd->where= save_where;
|
thd->where= save_where;
|
||||||
@ -869,7 +935,13 @@ static bool fix_fields_part_func(THD *thd, TABLE_LIST *tables,
|
|||||||
clear_field_flag(table);
|
clear_field_flag(table);
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
result= set_up_field_array(table, is_sub_part);
|
if ((!is_sub_part) && (error= check_signed_flag(part_info)))
|
||||||
|
goto end;
|
||||||
|
result= FALSE;
|
||||||
|
if (is_field_to_be_setup)
|
||||||
|
result= set_up_field_array(table, is_sub_part);
|
||||||
|
if (!is_sub_part)
|
||||||
|
part_info->fixed= TRUE;
|
||||||
end:
|
end:
|
||||||
table->get_fields_in_item_tree= FALSE;
|
table->get_fields_in_item_tree= FALSE;
|
||||||
table->map= 0; //Restore old value
|
table->map= 0; //Restore old value
|
||||||
@ -1303,9 +1375,8 @@ static uint32 get_part_id_from_linear_hash(longlong hash_value, uint mask,
|
|||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
fix_partition_func()
|
fix_partition_func()
|
||||||
thd The thread object
|
thd The thread object
|
||||||
name The name of the partitioned table
|
|
||||||
table TABLE object for which partition fields are set-up
|
table TABLE object for which partition fields are set-up
|
||||||
create_table_ind Indicator of whether openfrm was called as part of
|
is_create_table_ind Indicator of whether openfrm was called as part of
|
||||||
CREATE or ALTER TABLE
|
CREATE or ALTER TABLE
|
||||||
|
|
||||||
RETURN VALUE
|
RETURN VALUE
|
||||||
@ -1325,15 +1396,10 @@ NOTES
|
|||||||
of an error that is not discovered until here.
|
of an error that is not discovered until here.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
bool fix_partition_func(THD *thd, const char* name, TABLE *table,
|
bool fix_partition_func(THD *thd, TABLE *table,
|
||||||
bool is_create_table_ind)
|
bool is_create_table_ind)
|
||||||
{
|
{
|
||||||
bool result= TRUE;
|
bool result= TRUE;
|
||||||
uint dir_length, home_dir_length;
|
|
||||||
TABLE_LIST tables;
|
|
||||||
TABLE_SHARE *share= table->s;
|
|
||||||
char db_name_string[FN_REFLEN];
|
|
||||||
char* db_name;
|
|
||||||
partition_info *part_info= table->part_info;
|
partition_info *part_info= table->part_info;
|
||||||
enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
|
enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
|
||||||
Item *thd_free_list= thd->free_list;
|
Item *thd_free_list= thd->free_list;
|
||||||
@ -1345,23 +1411,6 @@ bool fix_partition_func(THD *thd, const char* name, TABLE *table,
|
|||||||
}
|
}
|
||||||
thd->mark_used_columns= MARK_COLUMNS_NONE;
|
thd->mark_used_columns= MARK_COLUMNS_NONE;
|
||||||
DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
|
DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
|
||||||
/*
|
|
||||||
Set-up the TABLE_LIST object to be a list with a single table
|
|
||||||
Set the object to zero to create NULL pointers and set alias
|
|
||||||
and real name to table name and get database name from file name.
|
|
||||||
*/
|
|
||||||
|
|
||||||
bzero((void*)&tables, sizeof(TABLE_LIST));
|
|
||||||
tables.alias= tables.table_name= (char*) share->table_name.str;
|
|
||||||
tables.table= table;
|
|
||||||
tables.next_local= 0;
|
|
||||||
tables.next_name_resolution_table= 0;
|
|
||||||
strmov(db_name_string, name);
|
|
||||||
dir_length= dirname_length(db_name_string);
|
|
||||||
db_name_string[dir_length - 1]= 0;
|
|
||||||
home_dir_length= dirname_length(db_name_string);
|
|
||||||
db_name= &db_name_string[home_dir_length];
|
|
||||||
tables.db= db_name;
|
|
||||||
|
|
||||||
if (!is_create_table_ind ||
|
if (!is_create_table_ind ||
|
||||||
thd->lex->sql_command != SQLCOM_CREATE_TABLE)
|
thd->lex->sql_command != SQLCOM_CREATE_TABLE)
|
||||||
@ -1391,9 +1440,8 @@ bool fix_partition_func(THD *thd, const char* name, TABLE *table,
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (unlikely(fix_fields_part_func(thd, &tables,
|
if (unlikely(fix_fields_part_func(thd, part_info->subpart_expr,
|
||||||
part_info->subpart_expr, part_info,
|
table, TRUE, TRUE)))
|
||||||
TRUE)))
|
|
||||||
goto end;
|
goto end;
|
||||||
if (unlikely(part_info->subpart_expr->result_type() != INT_RESULT))
|
if (unlikely(part_info->subpart_expr->result_type() != INT_RESULT))
|
||||||
{
|
{
|
||||||
@ -1420,8 +1468,8 @@ bool fix_partition_func(THD *thd, const char* name, TABLE *table,
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (unlikely(fix_fields_part_func(thd, &tables, part_info->part_expr,
|
if (unlikely(fix_fields_part_func(thd, part_info->part_expr,
|
||||||
part_info, FALSE)))
|
table, FALSE, TRUE)))
|
||||||
goto end;
|
goto end;
|
||||||
if (unlikely(part_info->part_expr->result_type() != INT_RESULT))
|
if (unlikely(part_info->part_expr->result_type() != INT_RESULT))
|
||||||
{
|
{
|
||||||
@ -1434,6 +1482,9 @@ bool fix_partition_func(THD *thd, const char* name, TABLE *table,
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
const char *error_str;
|
const char *error_str;
|
||||||
|
if (unlikely(fix_fields_part_func(thd, part_info->part_expr,
|
||||||
|
table, FALSE, TRUE)))
|
||||||
|
goto end;
|
||||||
if (part_info->part_type == RANGE_PARTITION)
|
if (part_info->part_type == RANGE_PARTITION)
|
||||||
{
|
{
|
||||||
error_str= partition_keywords[PKW_RANGE].str;
|
error_str= partition_keywords[PKW_RANGE].str;
|
||||||
@ -1457,9 +1508,6 @@ bool fix_partition_func(THD *thd, const char* name, TABLE *table,
|
|||||||
my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), error_str);
|
my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), error_str);
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
if (unlikely(fix_fields_part_func(thd, &tables, part_info->part_expr,
|
|
||||||
part_info, FALSE)))
|
|
||||||
goto end;
|
|
||||||
if (unlikely(part_info->part_expr->result_type() != INT_RESULT))
|
if (unlikely(part_info->part_expr->result_type() != INT_RESULT))
|
||||||
{
|
{
|
||||||
my_error(ER_PARTITION_FUNC_NOT_ALLOWED_ERROR, MYF(0), part_str);
|
my_error(ER_PARTITION_FUNC_NOT_ALLOWED_ERROR, MYF(0), part_str);
|
||||||
@ -1479,7 +1527,6 @@ bool fix_partition_func(THD *thd, const char* name, TABLE *table,
|
|||||||
check_range_capable_PF(table);
|
check_range_capable_PF(table);
|
||||||
set_up_partition_key_maps(table, part_info);
|
set_up_partition_key_maps(table, part_info);
|
||||||
set_up_partition_func_pointers(part_info);
|
set_up_partition_func_pointers(part_info);
|
||||||
part_info->fixed= TRUE;
|
|
||||||
set_up_range_analysis_info(part_info);
|
set_up_range_analysis_info(part_info);
|
||||||
result= FALSE;
|
result= FALSE;
|
||||||
end:
|
end:
|
||||||
@ -1563,6 +1610,7 @@ static int add_hash(File fptr)
|
|||||||
|
|
||||||
static int add_partition(File fptr)
|
static int add_partition(File fptr)
|
||||||
{
|
{
|
||||||
|
char buff[22];
|
||||||
strxmov(buff, part_str, space_str, NullS);
|
strxmov(buff, part_str, space_str, NullS);
|
||||||
return add_string(fptr, buff);
|
return add_string(fptr, buff);
|
||||||
}
|
}
|
||||||
@ -1576,6 +1624,7 @@ static int add_subpartition(File fptr)
|
|||||||
|
|
||||||
static int add_partition_by(File fptr)
|
static int add_partition_by(File fptr)
|
||||||
{
|
{
|
||||||
|
char buff[22];
|
||||||
strxmov(buff, part_str, space_str, by_str, space_str, NullS);
|
strxmov(buff, part_str, space_str, by_str, space_str, NullS);
|
||||||
return add_string(fptr, buff);
|
return add_string(fptr, buff);
|
||||||
}
|
}
|
||||||
@ -1631,10 +1680,18 @@ static int add_name_string(File fptr, const char *name)
|
|||||||
|
|
||||||
static int add_int(File fptr, longlong number)
|
static int add_int(File fptr, longlong number)
|
||||||
{
|
{
|
||||||
|
char buff[32];
|
||||||
llstr(number, buff);
|
llstr(number, buff);
|
||||||
return add_string(fptr, buff);
|
return add_string(fptr, buff);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int add_uint(File fptr, ulonglong number)
|
||||||
|
{
|
||||||
|
char buff[32];
|
||||||
|
longlong2str(number, buff, 10);
|
||||||
|
return add_string(fptr, buff);
|
||||||
|
}
|
||||||
|
|
||||||
static int add_keyword_string(File fptr, const char *keyword,
|
static int add_keyword_string(File fptr, const char *keyword,
|
||||||
bool should_use_quotes,
|
bool should_use_quotes,
|
||||||
const char *keystr)
|
const char *keystr)
|
||||||
@ -1696,18 +1753,20 @@ static int add_partition_options(File fptr, partition_element *p_elem)
|
|||||||
return err + add_engine(fptr,p_elem->engine_type);
|
return err + add_engine(fptr,p_elem->engine_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int add_partition_values(File fptr, partition_info *part_info,
|
static int add_partition_values(File fptr, partition_info *part_info, partition_element *p_elem)
|
||||||
partition_element *p_elem)
|
|
||||||
{
|
{
|
||||||
int err= 0;
|
int err= 0;
|
||||||
|
|
||||||
if (part_info->part_type == RANGE_PARTITION)
|
if (part_info->part_type == RANGE_PARTITION)
|
||||||
{
|
{
|
||||||
err+= add_string(fptr, " VALUES LESS THAN ");
|
err+= add_string(fptr, " VALUES LESS THAN ");
|
||||||
if (p_elem->range_value != LONGLONG_MAX)
|
if (!p_elem->max_value)
|
||||||
{
|
{
|
||||||
err+= add_begin_parenthesis(fptr);
|
err+= add_begin_parenthesis(fptr);
|
||||||
err+= add_int(fptr, p_elem->range_value);
|
if (p_elem->signed_flag)
|
||||||
|
err+= add_int(fptr, p_elem->range_value);
|
||||||
|
else
|
||||||
|
err+= add_uint(fptr, p_elem->range_value);
|
||||||
err+= add_end_parenthesis(fptr);
|
err+= add_end_parenthesis(fptr);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -1716,9 +1775,10 @@ static int add_partition_values(File fptr, partition_info *part_info,
|
|||||||
else if (part_info->part_type == LIST_PARTITION)
|
else if (part_info->part_type == LIST_PARTITION)
|
||||||
{
|
{
|
||||||
uint i;
|
uint i;
|
||||||
List_iterator<longlong> list_val_it(p_elem->list_val_list);
|
List_iterator<part_elem_value> list_val_it(p_elem->list_val_list);
|
||||||
err+= add_string(fptr, " VALUES IN ");
|
err+= add_string(fptr, " VALUES IN ");
|
||||||
uint no_items= p_elem->list_val_list.elements;
|
uint no_items= p_elem->list_val_list.elements;
|
||||||
|
|
||||||
err+= add_begin_parenthesis(fptr);
|
err+= add_begin_parenthesis(fptr);
|
||||||
if (p_elem->has_null_value)
|
if (p_elem->has_null_value)
|
||||||
{
|
{
|
||||||
@ -1733,8 +1793,12 @@ static int add_partition_values(File fptr, partition_info *part_info,
|
|||||||
i= 0;
|
i= 0;
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
longlong *list_value= list_val_it++;
|
part_elem_value *list_value= list_val_it++;
|
||||||
err+= add_int(fptr, *list_value);
|
|
||||||
|
if (!list_value->unsigned_flag)
|
||||||
|
err+= add_int(fptr, list_value->value);
|
||||||
|
else
|
||||||
|
err+= add_uint(fptr, list_value->value);
|
||||||
if (i != (no_items-1))
|
if (i != (no_items-1))
|
||||||
err+= add_comma(fptr);
|
err+= add_comma(fptr);
|
||||||
} while (++i < no_items);
|
} while (++i < no_items);
|
||||||
@ -2263,15 +2327,16 @@ static uint32 get_part_id_linear_key(partition_info *part_info,
|
|||||||
|
|
||||||
|
|
||||||
int get_partition_id_list(partition_info *part_info,
|
int get_partition_id_list(partition_info *part_info,
|
||||||
uint32 *part_id,
|
uint32 *part_id,
|
||||||
longlong *func_value)
|
longlong *func_value)
|
||||||
{
|
{
|
||||||
LIST_PART_ENTRY *list_array= part_info->list_array;
|
LIST_PART_ENTRY *list_array= part_info->list_array;
|
||||||
int list_index;
|
int list_index;
|
||||||
longlong list_value;
|
|
||||||
int min_list_index= 0;
|
int min_list_index= 0;
|
||||||
int max_list_index= part_info->no_list_values - 1;
|
int max_list_index= part_info->no_list_values - 1;
|
||||||
longlong part_func_value= part_val_int(part_info->part_expr);
|
longlong part_func_value= part_val_int(part_info->part_expr);
|
||||||
|
longlong list_value;
|
||||||
|
bool unsigned_flag= part_info->part_expr->unsigned_flag;
|
||||||
DBUG_ENTER("get_partition_id_list");
|
DBUG_ENTER("get_partition_id_list");
|
||||||
|
|
||||||
if (part_info->part_expr->null_value)
|
if (part_info->part_expr->null_value)
|
||||||
@ -2284,6 +2349,8 @@ int get_partition_id_list(partition_info *part_info,
|
|||||||
goto notfound;
|
goto notfound;
|
||||||
}
|
}
|
||||||
*func_value= part_func_value;
|
*func_value= part_func_value;
|
||||||
|
if (unsigned_flag)
|
||||||
|
part_func_value-= 0x8000000000000000ULL;
|
||||||
while (max_list_index >= min_list_index)
|
while (max_list_index >= min_list_index)
|
||||||
{
|
{
|
||||||
list_index= (max_list_index + min_list_index) >> 1;
|
list_index= (max_list_index + min_list_index) >> 1;
|
||||||
@ -2350,14 +2417,19 @@ uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
|
|||||||
bool left_endpoint,
|
bool left_endpoint,
|
||||||
bool include_endpoint)
|
bool include_endpoint)
|
||||||
{
|
{
|
||||||
DBUG_ENTER("get_list_array_idx_for_endpoint");
|
|
||||||
LIST_PART_ENTRY *list_array= part_info->list_array;
|
LIST_PART_ENTRY *list_array= part_info->list_array;
|
||||||
uint list_index;
|
uint list_index;
|
||||||
longlong list_value;
|
|
||||||
uint min_list_index= 0, max_list_index= part_info->no_list_values - 1;
|
uint min_list_index= 0, max_list_index= part_info->no_list_values - 1;
|
||||||
|
longlong list_value;
|
||||||
/* Get the partitioning function value for the endpoint */
|
/* Get the partitioning function value for the endpoint */
|
||||||
longlong part_func_value= part_val_int(part_info->part_expr);
|
longlong part_func_value= part_val_int(part_info->part_expr);
|
||||||
while (max_list_index >= min_list_index)
|
bool unsigned_flag= part_info->part_expr->unsigned_flag;
|
||||||
|
DBUG_ENTER("get_list_array_idx_for_endpoint");
|
||||||
|
|
||||||
|
if (unsigned_flag)
|
||||||
|
part_func_value-= 0x8000000000000000ULL;
|
||||||
|
DBUG_ASSERT(part_info->no_list_values);
|
||||||
|
do
|
||||||
{
|
{
|
||||||
list_index= (max_list_index + min_list_index) >> 1;
|
list_index= (max_list_index + min_list_index) >> 1;
|
||||||
list_value= list_array[list_index].list_value;
|
list_value= list_array[list_index].list_value;
|
||||||
@ -2373,7 +2445,7 @@ uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
|
|||||||
{
|
{
|
||||||
DBUG_RETURN(list_index + test(left_endpoint ^ include_endpoint));
|
DBUG_RETURN(list_index + test(left_endpoint ^ include_endpoint));
|
||||||
}
|
}
|
||||||
}
|
} while (max_list_index >= min_list_index);
|
||||||
notfound:
|
notfound:
|
||||||
if (list_value < part_func_value)
|
if (list_value < part_func_value)
|
||||||
list_index++;
|
list_index++;
|
||||||
@ -2391,13 +2463,17 @@ int get_partition_id_range(partition_info *part_info,
|
|||||||
uint max_part_id= max_partition;
|
uint max_part_id= max_partition;
|
||||||
uint loc_part_id;
|
uint loc_part_id;
|
||||||
longlong part_func_value= part_val_int(part_info->part_expr);
|
longlong part_func_value= part_val_int(part_info->part_expr);
|
||||||
DBUG_ENTER("get_partition_id_int_range");
|
bool unsigned_flag= part_info->part_expr->unsigned_flag;
|
||||||
|
DBUG_ENTER("get_partition_id_range");
|
||||||
|
|
||||||
if (part_info->part_expr->null_value)
|
if (part_info->part_expr->null_value)
|
||||||
{
|
{
|
||||||
*part_id= 0;
|
*part_id= 0;
|
||||||
DBUG_RETURN(0);
|
DBUG_RETURN(0);
|
||||||
}
|
}
|
||||||
|
*func_value= part_func_value;
|
||||||
|
if (unsigned_flag)
|
||||||
|
part_func_value-= 0x8000000000000000ULL;
|
||||||
while (max_part_id > min_part_id)
|
while (max_part_id > min_part_id)
|
||||||
{
|
{
|
||||||
loc_part_id= (max_part_id + min_part_id + 1) >> 1;
|
loc_part_id= (max_part_id + min_part_id + 1) >> 1;
|
||||||
@ -2411,7 +2487,6 @@ int get_partition_id_range(partition_info *part_info,
|
|||||||
if (loc_part_id != max_partition)
|
if (loc_part_id != max_partition)
|
||||||
loc_part_id++;
|
loc_part_id++;
|
||||||
*part_id= (uint32)loc_part_id;
|
*part_id= (uint32)loc_part_id;
|
||||||
*func_value= part_func_value;
|
|
||||||
if (loc_part_id == max_partition &&
|
if (loc_part_id == max_partition &&
|
||||||
range_array[loc_part_id] != LONGLONG_MAX &&
|
range_array[loc_part_id] != LONGLONG_MAX &&
|
||||||
part_func_value >= range_array[loc_part_id])
|
part_func_value >= range_array[loc_part_id])
|
||||||
@ -2467,13 +2542,16 @@ uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
|
|||||||
bool left_endpoint,
|
bool left_endpoint,
|
||||||
bool include_endpoint)
|
bool include_endpoint)
|
||||||
{
|
{
|
||||||
DBUG_ENTER("get_partition_id_range_for_endpoint");
|
|
||||||
longlong *range_array= part_info->range_int_array;
|
longlong *range_array= part_info->range_int_array;
|
||||||
uint max_partition= part_info->no_parts - 1;
|
uint max_partition= part_info->no_parts - 1;
|
||||||
uint min_part_id= 0, max_part_id= max_partition, loc_part_id;
|
uint min_part_id= 0, max_part_id= max_partition, loc_part_id;
|
||||||
/* Get the partitioning function value for the endpoint */
|
/* Get the partitioning function value for the endpoint */
|
||||||
longlong part_func_value= part_val_int(part_info->part_expr);
|
longlong part_func_value= part_val_int(part_info->part_expr);
|
||||||
|
bool unsigned_flag= part_info->part_expr->unsigned_flag;
|
||||||
|
DBUG_ENTER("get_partition_id_range_for_endpoint");
|
||||||
|
|
||||||
|
if (unsigned_flag)
|
||||||
|
part_func_value-= 0x8000000000000000ULL;
|
||||||
while (max_part_id > min_part_id)
|
while (max_part_id > min_part_id)
|
||||||
{
|
{
|
||||||
loc_part_id= (max_part_id + min_part_id + 1) >> 1;
|
loc_part_id= (max_part_id + min_part_id + 1) >> 1;
|
||||||
@ -2486,7 +2564,7 @@ uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
|
|||||||
if (loc_part_id < max_partition &&
|
if (loc_part_id < max_partition &&
|
||||||
part_func_value >= range_array[loc_part_id+1])
|
part_func_value >= range_array[loc_part_id+1])
|
||||||
{
|
{
|
||||||
loc_part_id++;
|
loc_part_id++;
|
||||||
}
|
}
|
||||||
if (left_endpoint)
|
if (left_endpoint)
|
||||||
{
|
{
|
||||||
@ -4466,7 +4544,7 @@ the generated partition syntax in a correct manner.
|
|||||||
tab_part_info->use_default_subpartitions= FALSE;
|
tab_part_info->use_default_subpartitions= FALSE;
|
||||||
tab_part_info->use_default_no_subpartitions= FALSE;
|
tab_part_info->use_default_no_subpartitions= FALSE;
|
||||||
}
|
}
|
||||||
if (tab_part_info->check_partition_info((handlerton**)NULL,
|
if (tab_part_info->check_partition_info(thd, (handlerton**)NULL,
|
||||||
table->file, ULL(0)))
|
table->file, ULL(0)))
|
||||||
{
|
{
|
||||||
DBUG_RETURN(TRUE);
|
DBUG_RETURN(TRUE);
|
||||||
@ -6305,6 +6383,18 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
|
|||||||
part_iter->get_next= get_next_partition_id_list;
|
part_iter->get_next= get_next_partition_id_list;
|
||||||
part_iter->part_info= part_info;
|
part_iter->part_info= part_info;
|
||||||
part_iter->ret_null_part= part_iter->ret_null_part_orig= FALSE;
|
part_iter->ret_null_part= part_iter->ret_null_part_orig= FALSE;
|
||||||
|
if (max_endpoint_val == 0)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
We handle this special case without optimisations since it is
|
||||||
|
of little practical value but causes a great number of complex
|
||||||
|
checks later in the code.
|
||||||
|
*/
|
||||||
|
part_iter->part_nums.start= part_iter->part_nums.end= 0;
|
||||||
|
part_iter->part_nums.cur= 0;
|
||||||
|
part_iter->ret_null_part= part_iter->ret_null_part_orig= TRUE;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
DBUG_ASSERT(0);
|
DBUG_ASSERT(0);
|
||||||
|
@ -65,9 +65,8 @@ int get_part_for_delete(const byte *buf, const byte *rec0,
|
|||||||
partition_info *part_info, uint32 *part_id);
|
partition_info *part_info, uint32 *part_id);
|
||||||
void prune_partition_set(const TABLE *table, part_id_range *part_spec);
|
void prune_partition_set(const TABLE *table, part_id_range *part_spec);
|
||||||
bool check_partition_info(partition_info *part_info,handlerton **eng_type,
|
bool check_partition_info(partition_info *part_info,handlerton **eng_type,
|
||||||
handler *file, ulonglong max_rows);
|
TABLE *table, handler *file, ulonglong max_rows);
|
||||||
bool fix_partition_func(THD *thd, const char *name, TABLE *table,
|
bool fix_partition_func(THD *thd, TABLE *table, bool create_table_ind);
|
||||||
bool create_table_ind);
|
|
||||||
char *generate_partition_syntax(partition_info *part_info,
|
char *generate_partition_syntax(partition_info *part_info,
|
||||||
uint *buf_length, bool use_sql_alloc,
|
uint *buf_length, bool use_sql_alloc,
|
||||||
bool show_partition_options);
|
bool show_partition_options);
|
||||||
@ -91,6 +90,8 @@ uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
|
|||||||
uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
|
uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
|
||||||
bool left_endpoint,
|
bool left_endpoint,
|
||||||
bool include_endpoint);
|
bool include_endpoint);
|
||||||
|
bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
|
||||||
|
bool is_sub_part, bool is_field_to_be_setup);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
A "Get next" function for partition iterator.
|
A "Get next" function for partition iterator.
|
||||||
|
@ -23,7 +23,7 @@ extern struct st_mysql_plugin *mysqld_builtins[];
|
|||||||
|
|
||||||
char *opt_plugin_dir_ptr;
|
char *opt_plugin_dir_ptr;
|
||||||
char opt_plugin_dir[FN_REFLEN];
|
char opt_plugin_dir[FN_REFLEN];
|
||||||
LEX_STRING plugin_type_names[MYSQL_MAX_PLUGIN_TYPE_NUM]=
|
const LEX_STRING plugin_type_names[MYSQL_MAX_PLUGIN_TYPE_NUM]=
|
||||||
{
|
{
|
||||||
{ (char *)STRING_WITH_LEN("UDF") },
|
{ (char *)STRING_WITH_LEN("UDF") },
|
||||||
{ (char *)STRING_WITH_LEN("STORAGE ENGINE") },
|
{ (char *)STRING_WITH_LEN("STORAGE ENGINE") },
|
||||||
@ -63,7 +63,7 @@ static HASH plugin_hash[MYSQL_MAX_PLUGIN_TYPE_NUM];
|
|||||||
static rw_lock_t THR_LOCK_plugin;
|
static rw_lock_t THR_LOCK_plugin;
|
||||||
static bool initialized= 0;
|
static bool initialized= 0;
|
||||||
|
|
||||||
static struct st_plugin_dl *plugin_dl_find(LEX_STRING *dl)
|
static struct st_plugin_dl *plugin_dl_find(const LEX_STRING *dl)
|
||||||
{
|
{
|
||||||
uint i;
|
uint i;
|
||||||
DBUG_ENTER("plugin_dl_find");
|
DBUG_ENTER("plugin_dl_find");
|
||||||
@ -112,7 +112,7 @@ static inline void free_plugin_mem(struct st_plugin_dl *p)
|
|||||||
my_free((gptr)p->plugins, MYF(MY_ALLOW_ZERO_PTR));
|
my_free((gptr)p->plugins, MYF(MY_ALLOW_ZERO_PTR));
|
||||||
}
|
}
|
||||||
|
|
||||||
static st_plugin_dl *plugin_dl_add(LEX_STRING *dl, int report)
|
static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report)
|
||||||
{
|
{
|
||||||
#ifdef HAVE_DLOPEN
|
#ifdef HAVE_DLOPEN
|
||||||
char dlpath[FN_REFLEN];
|
char dlpath[FN_REFLEN];
|
||||||
@ -294,7 +294,7 @@ static st_plugin_dl *plugin_dl_add(LEX_STRING *dl, int report)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void plugin_dl_del(LEX_STRING *dl)
|
static void plugin_dl_del(const LEX_STRING *dl)
|
||||||
{
|
{
|
||||||
#ifdef HAVE_DLOPEN
|
#ifdef HAVE_DLOPEN
|
||||||
uint i;
|
uint i;
|
||||||
@ -322,7 +322,7 @@ static void plugin_dl_del(LEX_STRING *dl)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static struct st_plugin_int *plugin_find_internal(LEX_STRING *name, int type)
|
static struct st_plugin_int *plugin_find_internal(const LEX_STRING *name, int type)
|
||||||
{
|
{
|
||||||
uint i;
|
uint i;
|
||||||
DBUG_ENTER("plugin_find_internal");
|
DBUG_ENTER("plugin_find_internal");
|
||||||
@ -345,7 +345,7 @@ static struct st_plugin_int *plugin_find_internal(LEX_STRING *name, int type)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
my_bool plugin_is_ready(LEX_STRING *name, int type)
|
my_bool plugin_is_ready(const LEX_STRING *name, int type)
|
||||||
{
|
{
|
||||||
my_bool rc= FALSE;
|
my_bool rc= FALSE;
|
||||||
struct st_plugin_int *plugin;
|
struct st_plugin_int *plugin;
|
||||||
@ -359,7 +359,7 @@ my_bool plugin_is_ready(LEX_STRING *name, int type)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
struct st_plugin_int *plugin_lock(LEX_STRING *name, int type)
|
struct st_plugin_int *plugin_lock(const LEX_STRING *name, int type)
|
||||||
{
|
{
|
||||||
struct st_plugin_int *rc;
|
struct st_plugin_int *rc;
|
||||||
DBUG_ENTER("plugin_lock");
|
DBUG_ENTER("plugin_lock");
|
||||||
@ -396,7 +396,7 @@ static st_plugin_int *plugin_insert_or_reuse(struct st_plugin_int *plugin)
|
|||||||
struct st_plugin_int *));
|
struct st_plugin_int *));
|
||||||
}
|
}
|
||||||
|
|
||||||
static my_bool plugin_add(LEX_STRING *name, LEX_STRING *dl, int report)
|
static my_bool plugin_add(const LEX_STRING *name, const LEX_STRING *dl, int report)
|
||||||
{
|
{
|
||||||
struct st_plugin_int tmp;
|
struct st_plugin_int tmp;
|
||||||
struct st_mysql_plugin *plugin;
|
struct st_mysql_plugin *plugin;
|
||||||
@ -479,7 +479,7 @@ err:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void plugin_del(LEX_STRING *name)
|
static void plugin_del(const LEX_STRING *name)
|
||||||
{
|
{
|
||||||
uint i;
|
uint i;
|
||||||
struct st_plugin_int *plugin;
|
struct st_plugin_int *plugin;
|
||||||
@ -811,7 +811,7 @@ void plugin_free(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
my_bool mysql_install_plugin(THD *thd, LEX_STRING *name, LEX_STRING *dl)
|
my_bool mysql_install_plugin(THD *thd, const LEX_STRING *name, const LEX_STRING *dl)
|
||||||
{
|
{
|
||||||
TABLE_LIST tables;
|
TABLE_LIST tables;
|
||||||
TABLE *table;
|
TABLE *table;
|
||||||
@ -866,7 +866,7 @@ err:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
my_bool mysql_uninstall_plugin(THD *thd, LEX_STRING *name)
|
my_bool mysql_uninstall_plugin(THD *thd, const LEX_STRING *name)
|
||||||
{
|
{
|
||||||
TABLE *table;
|
TABLE *table;
|
||||||
TABLE_LIST tables;
|
TABLE_LIST tables;
|
||||||
|
@ -66,15 +66,15 @@ typedef int (*plugin_type_init)(struct st_plugin_int *);
|
|||||||
|
|
||||||
extern char *opt_plugin_dir_ptr;
|
extern char *opt_plugin_dir_ptr;
|
||||||
extern char opt_plugin_dir[FN_REFLEN];
|
extern char opt_plugin_dir[FN_REFLEN];
|
||||||
extern LEX_STRING plugin_type_names[];
|
extern const LEX_STRING plugin_type_names[];
|
||||||
extern int plugin_init(void);
|
extern int plugin_init(void);
|
||||||
extern void plugin_load(void);
|
extern void plugin_load(void);
|
||||||
extern void plugin_free(void);
|
extern void plugin_free(void);
|
||||||
extern my_bool plugin_is_ready(LEX_STRING *name, int type);
|
extern my_bool plugin_is_ready(const LEX_STRING *name, int type);
|
||||||
extern st_plugin_int *plugin_lock(LEX_STRING *name, int type);
|
extern st_plugin_int *plugin_lock(const LEX_STRING *name, int type);
|
||||||
extern void plugin_unlock(struct st_plugin_int *plugin);
|
extern void plugin_unlock(struct st_plugin_int *plugin);
|
||||||
extern my_bool mysql_install_plugin(THD *thd, LEX_STRING *name, LEX_STRING *dl);
|
extern my_bool mysql_install_plugin(THD *thd, const LEX_STRING *name, const LEX_STRING *dl);
|
||||||
extern my_bool mysql_uninstall_plugin(THD *thd, LEX_STRING *name);
|
extern my_bool mysql_uninstall_plugin(THD *thd, const LEX_STRING *name);
|
||||||
|
|
||||||
extern my_bool plugin_register_builtin(struct st_mysql_plugin *plugin);
|
extern my_bool plugin_register_builtin(struct st_mysql_plugin *plugin);
|
||||||
|
|
||||||
|
@ -2396,7 +2396,7 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
|
|||||||
ST_SCHEMA_TABLE *schema_table= tables->schema_table;
|
ST_SCHEMA_TABLE *schema_table= tables->schema_table;
|
||||||
SELECT_LEX sel;
|
SELECT_LEX sel;
|
||||||
INDEX_FIELD_VALUES idx_field_vals;
|
INDEX_FIELD_VALUES idx_field_vals;
|
||||||
char path[FN_REFLEN], *end, *base_name, *file_name;
|
char path[FN_REFLEN], *end, *base_name, *orig_base_name, *file_name;
|
||||||
uint len;
|
uint len;
|
||||||
bool with_i_schema;
|
bool with_i_schema;
|
||||||
enum enum_schema_tables schema_table_idx;
|
enum enum_schema_tables schema_table_idx;
|
||||||
@ -2476,7 +2476,7 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
|
|||||||
|
|
||||||
partial_cond= make_cond_for_info_schema(cond, tables);
|
partial_cond= make_cond_for_info_schema(cond, tables);
|
||||||
it.rewind(); /* To get access to new elements in basis list */
|
it.rewind(); /* To get access to new elements in basis list */
|
||||||
while ((base_name= it++) ||
|
while ((orig_base_name= base_name= it++) ||
|
||||||
/*
|
/*
|
||||||
generate error for non existing database.
|
generate error for non existing database.
|
||||||
(to save old behaviour for SHOW TABLES FROM db)
|
(to save old behaviour for SHOW TABLES FROM db)
|
||||||
@ -2507,6 +2507,8 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
|
|||||||
if (mysql_find_files(thd, &files, base_name,
|
if (mysql_find_files(thd, &files, base_name,
|
||||||
path, idx_field_vals.table_value, 0))
|
path, idx_field_vals.table_value, 0))
|
||||||
goto err;
|
goto err;
|
||||||
|
if (lower_case_table_names)
|
||||||
|
orig_base_name= thd->strdup(base_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
List_iterator_fast<char> it_files(files);
|
List_iterator_fast<char> it_files(files);
|
||||||
@ -2575,7 +2577,7 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
|
|||||||
in this case.
|
in this case.
|
||||||
*/
|
*/
|
||||||
res= schema_table->process_table(thd, show_table_list, table,
|
res= schema_table->process_table(thd, show_table_list, table,
|
||||||
res, base_name,
|
res, orig_base_name,
|
||||||
show_table_list->alias);
|
show_table_list->alias);
|
||||||
close_tables_for_reopen(thd, &show_table_list);
|
close_tables_for_reopen(thd, &show_table_list);
|
||||||
DBUG_ASSERT(!lex->query_tables_own_last);
|
DBUG_ASSERT(!lex->query_tables_own_last);
|
||||||
@ -4000,8 +4002,8 @@ static int get_schema_partitions_record(THD *thd, struct st_table_list *tables,
|
|||||||
}
|
}
|
||||||
else if (part_info->part_type == LIST_PARTITION)
|
else if (part_info->part_type == LIST_PARTITION)
|
||||||
{
|
{
|
||||||
List_iterator<longlong> list_val_it(part_elem->list_val_list);
|
List_iterator<part_elem_value> list_val_it(part_elem->list_val_list);
|
||||||
longlong *list_value;
|
part_elem_value *list_value;
|
||||||
uint no_items= part_elem->list_val_list.elements;
|
uint no_items= part_elem->list_val_list.elements;
|
||||||
tmp_str.length(0);
|
tmp_str.length(0);
|
||||||
tmp_res.length(0);
|
tmp_res.length(0);
|
||||||
@ -4013,7 +4015,10 @@ static int get_schema_partitions_record(THD *thd, struct st_table_list *tables,
|
|||||||
}
|
}
|
||||||
while ((list_value= list_val_it++))
|
while ((list_value= list_val_it++))
|
||||||
{
|
{
|
||||||
tmp_res.set(*list_value, cs);
|
if (!list_value->unsigned_flag)
|
||||||
|
tmp_res.set(list_value->value, cs);
|
||||||
|
else
|
||||||
|
tmp_res.set((ulonglong)list_value->value, cs);
|
||||||
tmp_str.append(tmp_res);
|
tmp_str.append(tmp_res);
|
||||||
if (--no_items != 0)
|
if (--no_items != 0)
|
||||||
tmp_str.append(",");
|
tmp_str.append(",");
|
||||||
|
@ -3144,7 +3144,7 @@ bool mysql_create_table_internal(THD *thd,
|
|||||||
}
|
}
|
||||||
DBUG_PRINT("info", ("db_type = %d",
|
DBUG_PRINT("info", ("db_type = %d",
|
||||||
ha_legacy_type(part_info->default_engine_type)));
|
ha_legacy_type(part_info->default_engine_type)));
|
||||||
if (part_info->check_partition_info(&engine_type, file,
|
if (part_info->check_partition_info(thd, &engine_type, file,
|
||||||
create_info->max_rows))
|
create_info->max_rows))
|
||||||
goto err;
|
goto err;
|
||||||
part_info->default_engine_type= engine_type;
|
part_info->default_engine_type= engine_type;
|
||||||
@ -4709,6 +4709,14 @@ static uint compare_tables(TABLE *table, List<create_field> *create_list,
|
|||||||
At the moment we can't handle altering temporary tables without a copy.
|
At the moment we can't handle altering temporary tables without a copy.
|
||||||
We also test if OPTIMIZE TABLE was given and was mapped to alter table.
|
We also test if OPTIMIZE TABLE was given and was mapped to alter table.
|
||||||
In that case we always do full copy.
|
In that case we always do full copy.
|
||||||
|
|
||||||
|
There was a bug prior to mysql-4.0.25. Number of null fields was
|
||||||
|
calculated incorrectly. As a result frm and data files gets out of
|
||||||
|
sync after fast alter table. There is no way to determine by which
|
||||||
|
mysql version (in 4.0 and 4.1 branches) table was created, thus we
|
||||||
|
disable fast alter table for all tables created by mysql versions
|
||||||
|
prior to 5.0 branch.
|
||||||
|
See BUG#6236.
|
||||||
*/
|
*/
|
||||||
if (table->s->fields != create_list->elements ||
|
if (table->s->fields != create_list->elements ||
|
||||||
table->s->db_type != create_info->db_type ||
|
table->s->db_type != create_info->db_type ||
|
||||||
@ -4718,6 +4726,7 @@ static uint compare_tables(TABLE *table, List<create_field> *create_list,
|
|||||||
create_info->used_fields & HA_CREATE_USED_DEFAULT_CHARSET ||
|
create_info->used_fields & HA_CREATE_USED_DEFAULT_CHARSET ||
|
||||||
(alter_info->flags & (ALTER_RECREATE | ALTER_FOREIGN_KEY)) ||
|
(alter_info->flags & (ALTER_RECREATE | ALTER_FOREIGN_KEY)) ||
|
||||||
order_num ||
|
order_num ||
|
||||||
|
!table->s->mysql_version ||
|
||||||
(table->s->frm_version < FRM_VER_TRUE_VARCHAR && varchar))
|
(table->s->frm_version < FRM_VER_TRUE_VARCHAR && varchar))
|
||||||
DBUG_RETURN(ALTER_TABLE_DATA_CHANGED);
|
DBUG_RETURN(ALTER_TABLE_DATA_CHANGED);
|
||||||
|
|
||||||
@ -4744,6 +4753,13 @@ static uint compare_tables(TABLE *table, List<create_field> *create_list,
|
|||||||
create_info->row_type != ROW_TYPE_FIXED)
|
create_info->row_type != ROW_TYPE_FIXED)
|
||||||
create_info->table_options|= HA_OPTION_PACK_RECORD;
|
create_info->table_options|= HA_OPTION_PACK_RECORD;
|
||||||
|
|
||||||
|
/* Check if field was renamed */
|
||||||
|
field->flags&= ~FIELD_IS_RENAMED;
|
||||||
|
if (my_strcasecmp(system_charset_info,
|
||||||
|
field->field_name,
|
||||||
|
new_field->field_name))
|
||||||
|
field->flags|= FIELD_IS_RENAMED;
|
||||||
|
|
||||||
/* Evaluate changes bitmap and send to check_if_incompatible_data() */
|
/* Evaluate changes bitmap and send to check_if_incompatible_data() */
|
||||||
if (!(tmp= field->is_equal(new_field)))
|
if (!(tmp= field->is_equal(new_field)))
|
||||||
DBUG_RETURN(ALTER_TABLE_DATA_CHANGED);
|
DBUG_RETURN(ALTER_TABLE_DATA_CHANGED);
|
||||||
|
@ -30,7 +30,7 @@ int mysql_alter_tablespace(THD *thd, st_alter_tablespace *ts_info)
|
|||||||
*/
|
*/
|
||||||
if (hton == NULL || hton->state != SHOW_OPTION_YES)
|
if (hton == NULL || hton->state != SHOW_OPTION_YES)
|
||||||
{
|
{
|
||||||
hton= ha_resolve_by_legacy_type(thd, DB_TYPE_DEFAULT);
|
hton= ha_default_handlerton(thd);
|
||||||
if (ts_info->storage_engine != 0)
|
if (ts_info->storage_engine != 0)
|
||||||
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
|
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
|
||||||
ER_WARN_USING_OTHER_HANDLER,
|
ER_WARN_USING_OTHER_HANDLER,
|
||||||
|
@ -42,12 +42,6 @@
|
|||||||
#include <myisam.h>
|
#include <myisam.h>
|
||||||
#include <myisammrg.h>
|
#include <myisammrg.h>
|
||||||
|
|
||||||
typedef struct p_elem_val
|
|
||||||
{
|
|
||||||
longlong value;
|
|
||||||
bool null_value;
|
|
||||||
} part_elem_value;
|
|
||||||
|
|
||||||
int yylex(void *yylval, void *yythd);
|
int yylex(void *yylval, void *yythd);
|
||||||
|
|
||||||
const LEX_STRING null_lex_str={0,0};
|
const LEX_STRING null_lex_str={0,0};
|
||||||
@ -3553,7 +3547,7 @@ opt_part_values:
|
|||||||
;
|
;
|
||||||
|
|
||||||
part_func_max:
|
part_func_max:
|
||||||
MAX_VALUE_SYM
|
max_value_sym
|
||||||
{
|
{
|
||||||
LEX *lex= Lex;
|
LEX *lex= Lex;
|
||||||
if (lex->part_info->defined_max_value)
|
if (lex->part_info->defined_max_value)
|
||||||
@ -3562,6 +3556,7 @@ part_func_max:
|
|||||||
YYABORT;
|
YYABORT;
|
||||||
}
|
}
|
||||||
lex->part_info->defined_max_value= TRUE;
|
lex->part_info->defined_max_value= TRUE;
|
||||||
|
lex->part_info->curr_part_elem->max_value= TRUE;
|
||||||
lex->part_info->curr_part_elem->range_value= LONGLONG_MAX;
|
lex->part_info->curr_part_elem->range_value= LONGLONG_MAX;
|
||||||
}
|
}
|
||||||
| part_range_func
|
| part_range_func
|
||||||
@ -3579,10 +3574,18 @@ part_func_max:
|
|||||||
}
|
}
|
||||||
;
|
;
|
||||||
|
|
||||||
|
max_value_sym:
|
||||||
|
MAX_VALUE_SYM
|
||||||
|
| '(' MAX_VALUE_SYM ')'
|
||||||
|
;
|
||||||
|
|
||||||
part_range_func:
|
part_range_func:
|
||||||
'(' part_bit_expr ')'
|
'(' part_bit_expr ')'
|
||||||
{
|
{
|
||||||
Lex->part_info->curr_part_elem->range_value= $2->value;
|
partition_info *part_info= Lex->part_info;
|
||||||
|
if (!($2->unsigned_flag))
|
||||||
|
part_info->curr_part_elem->signed_flag= TRUE;
|
||||||
|
part_info->curr_part_elem->range_value= $2->value;
|
||||||
}
|
}
|
||||||
;
|
;
|
||||||
|
|
||||||
@ -3595,9 +3598,12 @@ part_list_item:
|
|||||||
part_bit_expr
|
part_bit_expr
|
||||||
{
|
{
|
||||||
part_elem_value *value_ptr= $1;
|
part_elem_value *value_ptr= $1;
|
||||||
|
partition_info *part_info= Lex->part_info;
|
||||||
|
if (!value_ptr->unsigned_flag)
|
||||||
|
part_info->curr_part_elem->signed_flag= TRUE;
|
||||||
if (!value_ptr->null_value &&
|
if (!value_ptr->null_value &&
|
||||||
Lex->part_info->curr_part_elem->
|
part_info->curr_part_elem->
|
||||||
list_val_list.push_back((longlong*) &value_ptr->value))
|
list_val_list.push_back(value_ptr))
|
||||||
{
|
{
|
||||||
mem_alloc_error(sizeof(part_elem_value));
|
mem_alloc_error(sizeof(part_elem_value));
|
||||||
YYABORT;
|
YYABORT;
|
||||||
@ -3638,6 +3644,10 @@ part_bit_expr:
|
|||||||
}
|
}
|
||||||
thd->where= save_where;
|
thd->where= save_where;
|
||||||
value_ptr->value= part_expr->val_int();
|
value_ptr->value= part_expr->val_int();
|
||||||
|
value_ptr->unsigned_flag= TRUE;
|
||||||
|
if (!part_expr->unsigned_flag &&
|
||||||
|
value_ptr->value < 0)
|
||||||
|
value_ptr->unsigned_flag= FALSE;
|
||||||
if ((value_ptr->null_value= part_expr->null_value))
|
if ((value_ptr->null_value= part_expr->null_value))
|
||||||
{
|
{
|
||||||
if (Lex->part_info->curr_part_elem->has_null_value)
|
if (Lex->part_info->curr_part_elem->has_null_value)
|
||||||
|
@ -1481,8 +1481,7 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
|
|||||||
outparam->part_info->is_auto_partitioned= share->auto_partitioned;
|
outparam->part_info->is_auto_partitioned= share->auto_partitioned;
|
||||||
DBUG_PRINT("info", ("autopartitioned: %u", share->auto_partitioned));
|
DBUG_PRINT("info", ("autopartitioned: %u", share->auto_partitioned));
|
||||||
if (!tmp)
|
if (!tmp)
|
||||||
tmp= fix_partition_func(thd, share->normalized_path.str, outparam,
|
tmp= fix_partition_func(thd, outparam, is_create_table);
|
||||||
is_create_table);
|
|
||||||
*root_ptr= old_root;
|
*root_ptr= old_root;
|
||||||
if (tmp)
|
if (tmp)
|
||||||
goto err;
|
goto err;
|
||||||
|
@ -1839,8 +1839,7 @@ int Dbtup::interpreterNextLab(Signal* signal,
|
|||||||
/* ------------------------------------------------------------- */
|
/* ------------------------------------------------------------- */
|
||||||
TregMemBuffer[theRegister]= 0x50;
|
TregMemBuffer[theRegister]= 0x50;
|
||||||
// arithmetic conversion if big-endian
|
// arithmetic conversion if big-endian
|
||||||
* (Int64*)(TregMemBuffer+theRegister+2)=
|
* (Int64*)(TregMemBuffer+theRegister+2)= TregMemBuffer[theRegister+1];
|
||||||
TregMemBuffer[theRegister+1];
|
|
||||||
} else if (TnoDataRW == 3) {
|
} else if (TnoDataRW == 3) {
|
||||||
/* ------------------------------------------------------------- */
|
/* ------------------------------------------------------------- */
|
||||||
// Three words read means that we get the instruction plus two
|
// Three words read means that we get the instruction plus two
|
||||||
|
@ -79,7 +79,8 @@ NdbEventOperationImpl::NdbEventOperationImpl(NdbEventOperation &f,
|
|||||||
NdbEventOperation(*this),
|
NdbEventOperation(*this),
|
||||||
m_facade(&f),
|
m_facade(&f),
|
||||||
m_ndb(theNdb),
|
m_ndb(theNdb),
|
||||||
m_state(EO_ERROR)
|
m_state(EO_ERROR),
|
||||||
|
m_oid(~(Uint32)0)
|
||||||
{
|
{
|
||||||
DBUG_ENTER("NdbEventOperationImpl::NdbEventOperationImpl");
|
DBUG_ENTER("NdbEventOperationImpl::NdbEventOperationImpl");
|
||||||
|
|
||||||
@ -88,7 +89,11 @@ NdbEventOperationImpl::NdbEventOperationImpl(NdbEventOperation &f,
|
|||||||
assert(myDict != NULL);
|
assert(myDict != NULL);
|
||||||
|
|
||||||
const NdbDictionary::Event *myEvnt = myDict->getEvent(eventName);
|
const NdbDictionary::Event *myEvnt = myDict->getEvent(eventName);
|
||||||
if (!myEvnt) { m_error.code= myDict->getNdbError().code; DBUG_VOID_RETURN; }
|
if (!myEvnt)
|
||||||
|
{
|
||||||
|
m_error.code= myDict->getNdbError().code;
|
||||||
|
DBUG_VOID_RETURN;
|
||||||
|
}
|
||||||
|
|
||||||
init(myEvnt->m_impl);
|
init(myEvnt->m_impl);
|
||||||
DBUG_VOID_RETURN;
|
DBUG_VOID_RETURN;
|
||||||
@ -99,7 +104,8 @@ NdbEventOperationImpl::NdbEventOperationImpl(Ndb *theNdb,
|
|||||||
NdbEventOperation(*this),
|
NdbEventOperation(*this),
|
||||||
m_facade(this),
|
m_facade(this),
|
||||||
m_ndb(theNdb),
|
m_ndb(theNdb),
|
||||||
m_state(EO_ERROR)
|
m_state(EO_ERROR),
|
||||||
|
m_oid(~(Uint32)0)
|
||||||
{
|
{
|
||||||
DBUG_ENTER("NdbEventOperationImpl::NdbEventOperationImpl [evnt]");
|
DBUG_ENTER("NdbEventOperationImpl::NdbEventOperationImpl [evnt]");
|
||||||
init(evnt);
|
init(evnt);
|
||||||
@ -113,7 +119,6 @@ NdbEventOperationImpl::init(NdbEventImpl& evnt)
|
|||||||
|
|
||||||
m_magic_number = 0;
|
m_magic_number = 0;
|
||||||
mi_type = 0;
|
mi_type = 0;
|
||||||
m_oid = ~(Uint32)0;
|
|
||||||
m_change_mask = 0;
|
m_change_mask = 0;
|
||||||
#ifdef VM_TRACE
|
#ifdef VM_TRACE
|
||||||
m_data_done_count = 0;
|
m_data_done_count = 0;
|
||||||
@ -173,6 +178,9 @@ NdbEventOperationImpl::~NdbEventOperationImpl()
|
|||||||
DBUG_ENTER("NdbEventOperationImpl::~NdbEventOperationImpl");
|
DBUG_ENTER("NdbEventOperationImpl::~NdbEventOperationImpl");
|
||||||
m_magic_number= 0;
|
m_magic_number= 0;
|
||||||
|
|
||||||
|
if (m_oid == ~(Uint32)0)
|
||||||
|
DBUG_VOID_RETURN;
|
||||||
|
|
||||||
stop();
|
stop();
|
||||||
// m_bufferHandle->dropSubscribeEvent(m_bufferId);
|
// m_bufferHandle->dropSubscribeEvent(m_bufferId);
|
||||||
; // ToDo? We should send stop signal here
|
; // ToDo? We should send stop signal here
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
### BEGIN INIT INFO
|
### BEGIN INIT INFO
|
||||||
# Provides: mysql
|
# Provides: mysql
|
||||||
# Required-Start: $local_fs $network $remote_fs
|
# Required-Start: $local_fs $network $remote_fs
|
||||||
|
# Should-Start: ypbind nscd ldap ntpd xntpd
|
||||||
# Required-Stop: $local_fs $network $remote_fs
|
# Required-Stop: $local_fs $network $remote_fs
|
||||||
# Default-Start: 2 3 4 5
|
# Default-Start: 2 3 4 5
|
||||||
# Default-Stop: 0 1 6
|
# Default-Stop: 0 1 6
|
||||||
|
Loading…
x
Reference in New Issue
Block a user