Merge grichter@bk-internal.mysql.com:/home/bk/mysql-5.0
into lmy002.wdf.sap.corp:/home/georg/work/mysql/prod/mysql-5.0
This commit is contained in:
commit
35b66a40c2
@ -285,6 +285,7 @@ inline double ulonglong2double(ulonglong value)
|
||||
*(((long *) T)+1) = *(((long*) &V)+1); }
|
||||
#define float4get(V,M) { *((long *) &(V)) = *((long*) (M)); }
|
||||
#define floatstore(T,V) memcpy((byte*)(T), (byte*)(&V), sizeof(float))
|
||||
#define floatget(V,M) memcpy((byte*)(&V), (byte*)(M), sizeof(float))
|
||||
#define float8get(V,M) doubleget((V),(M))
|
||||
#define float4store(V,M) memcpy((byte*) V,(byte*) (&M),sizeof(float))
|
||||
#define float8store(V,M) doublestore((V),(M))
|
||||
|
4
mysql-test/include/have_gbk.inc
Normal file
4
mysql-test/include/have_gbk.inc
Normal file
@ -0,0 +1,4 @@
|
||||
-- require r/have_gbk.require
|
||||
disable_query_log;
|
||||
show collation like "gbk_chinese_ci";
|
||||
enable_query_log;
|
@ -37,7 +37,7 @@ Note 1051 Unknown table 't1'
|
||||
create table t1 (ordid int(8) not null auto_increment, ord varchar(50) not null, primary key (ord,ordid)) engine=heap;
|
||||
ERROR 42000: Incorrect table definition; there can be only one auto column and it must be defined as a key
|
||||
create table not_existing_database.test (a int);
|
||||
Got one of the listed errors
|
||||
ERROR 42000: Unknown database 'not_existing_database'
|
||||
create table `a/a` (a int);
|
||||
ERROR 42000: Incorrect table name 'a/a'
|
||||
create table `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa` (aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa int);
|
||||
@ -358,7 +358,7 @@ create table t3 like t1;
|
||||
create table t3 like mysqltest.t3;
|
||||
ERROR 42S01: Table 't3' already exists
|
||||
create table non_existing_database.t1 like t1;
|
||||
Got one of the listed errors
|
||||
ERROR 42000: Unknown database 'non_existing_database'
|
||||
create table t3 like non_existing_table;
|
||||
ERROR 42S02: Unknown table 'non_existing_table'
|
||||
create temporary table t3 like t1;
|
||||
|
131
mysql-test/r/ctype_gbk.result
Normal file
131
mysql-test/r/ctype_gbk.result
Normal file
@ -0,0 +1,131 @@
|
||||
drop table if exists t1;
|
||||
SET @test_character_set= 'gbk';
|
||||
SET @test_collation= 'gbk_chinese_ci';
|
||||
SET @safe_character_set_server= @@character_set_server;
|
||||
SET @safe_collation_server= @@collation_server;
|
||||
SET character_set_server= @test_character_set;
|
||||
SET collation_server= @test_collation;
|
||||
CREATE DATABASE d1;
|
||||
USE d1;
|
||||
CREATE TABLE t1 (c CHAR(10), KEY(c));
|
||||
SHOW FULL COLUMNS FROM t1;
|
||||
Field Type Collation Null Key Default Extra Privileges Comment
|
||||
c char(10) gbk_chinese_ci YES MUL NULL
|
||||
INSERT INTO t1 VALUES ('aaa'),('aaaa'),('aaaaa');
|
||||
SELECT c as want3results FROM t1 WHERE c LIKE 'aaa%';
|
||||
want3results
|
||||
aaa
|
||||
aaaa
|
||||
aaaaa
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (c1 varchar(15), KEY c1 (c1(2)));
|
||||
SHOW FULL COLUMNS FROM t1;
|
||||
Field Type Collation Null Key Default Extra Privileges Comment
|
||||
c1 varchar(15) gbk_chinese_ci YES MUL NULL
|
||||
INSERT INTO t1 VALUES ('location'),('loberge'),('lotre'),('boabab');
|
||||
SELECT c1 as want3results from t1 where c1 like 'l%';
|
||||
want3results
|
||||
location
|
||||
loberge
|
||||
lotre
|
||||
SELECT c1 as want3results from t1 where c1 like 'lo%';
|
||||
want3results
|
||||
location
|
||||
loberge
|
||||
lotre
|
||||
SELECT c1 as want1result from t1 where c1 like 'loc%';
|
||||
want1result
|
||||
location
|
||||
SELECT c1 as want1result from t1 where c1 like 'loca%';
|
||||
want1result
|
||||
location
|
||||
SELECT c1 as want1result from t1 where c1 like 'locat%';
|
||||
want1result
|
||||
location
|
||||
SELECT c1 as want1result from t1 where c1 like 'locati%';
|
||||
want1result
|
||||
location
|
||||
SELECT c1 as want1result from t1 where c1 like 'locatio%';
|
||||
want1result
|
||||
location
|
||||
SELECT c1 as want1result from t1 where c1 like 'location%';
|
||||
want1result
|
||||
location
|
||||
DROP TABLE t1;
|
||||
DROP DATABASE d1;
|
||||
USE test;
|
||||
SET character_set_server= @safe_character_set_server;
|
||||
SET collation_server= @safe_collation_server;
|
||||
SET NAMES gbk;
|
||||
SET collation_connection='gbk_chinese_ci';
|
||||
create table t1 select repeat('a',4000) a;
|
||||
delete from t1;
|
||||
insert into t1 values ('a'), ('a '), ('a\t');
|
||||
select collation(a),hex(a) from t1 order by a;
|
||||
collation(a) hex(a)
|
||||
gbk_chinese_ci 6109
|
||||
gbk_chinese_ci 61
|
||||
gbk_chinese_ci 6120
|
||||
drop table t1;
|
||||
create table t1 engine=innodb select repeat('a',50) as c1;
|
||||
alter table t1 add index(c1(5));
|
||||
insert into t1 values ('abcdefg'),('abcde100'),('abcde110'),('abcde111');
|
||||
select collation(c1) from t1 limit 1;
|
||||
collation(c1)
|
||||
gbk_chinese_ci
|
||||
select c1 from t1 where c1 like 'abcdef%' order by c1;
|
||||
c1
|
||||
abcdefg
|
||||
select c1 from t1 where c1 like 'abcde1%' order by c1;
|
||||
c1
|
||||
abcde100
|
||||
abcde110
|
||||
abcde111
|
||||
select c1 from t1 where c1 like 'abcde11%' order by c1;
|
||||
c1
|
||||
abcde110
|
||||
abcde111
|
||||
select c1 from t1 where c1 like 'abcde111%' order by c1;
|
||||
c1
|
||||
abcde111
|
||||
drop table t1;
|
||||
SET collation_connection='gbk_bin';
|
||||
create table t1 select repeat('a',4000) a;
|
||||
delete from t1;
|
||||
insert into t1 values ('a'), ('a '), ('a\t');
|
||||
select collation(a),hex(a) from t1 order by a;
|
||||
collation(a) hex(a)
|
||||
gbk_bin 6109
|
||||
gbk_bin 61
|
||||
gbk_bin 6120
|
||||
drop table t1;
|
||||
create table t1 engine=innodb select repeat('a',50) as c1;
|
||||
alter table t1 add index(c1(5));
|
||||
insert into t1 values ('abcdefg'),('abcde100'),('abcde110'),('abcde111');
|
||||
select collation(c1) from t1 limit 1;
|
||||
collation(c1)
|
||||
gbk_bin
|
||||
select c1 from t1 where c1 like 'abcdef%' order by c1;
|
||||
c1
|
||||
abcdefg
|
||||
select c1 from t1 where c1 like 'abcde1%' order by c1;
|
||||
c1
|
||||
abcde100
|
||||
abcde110
|
||||
abcde111
|
||||
select c1 from t1 where c1 like 'abcde11%' order by c1;
|
||||
c1
|
||||
abcde110
|
||||
abcde111
|
||||
select c1 from t1 where c1 like 'abcde111%' order by c1;
|
||||
c1
|
||||
abcde111
|
||||
drop table t1;
|
||||
SET NAMES gbk;
|
||||
CREATE TABLE t1 (a text) character set gbk;
|
||||
INSERT INTO t1 VALUES (0xA3A0),(0xA1A1);
|
||||
SELECT hex(a) FROM t1 ORDER BY a;
|
||||
hex(a)
|
||||
A1A1
|
||||
A3A0
|
||||
DROP TABLE t1;
|
@ -879,3 +879,18 @@ drop table t1;
|
||||
select hex(29223372036854775809), hex(-29223372036854775809);
|
||||
hex(29223372036854775809) hex(-29223372036854775809)
|
||||
FFFFFFFFFFFFFFFF FFFFFFFFFFFFFFFF
|
||||
create table t1 (i int);
|
||||
insert into t1 values (1000000000),(1);
|
||||
select lpad(i, 7, ' ') as t from t1;
|
||||
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
|
||||
def t 253 7 7 Y 128 31 63
|
||||
t
|
||||
1000000
|
||||
1
|
||||
select rpad(i, 7, ' ') as t from t1;
|
||||
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
|
||||
def t 253 7 7 Y 128 31 63
|
||||
t
|
||||
1000000
|
||||
1
|
||||
drop table t1;
|
||||
|
2
mysql-test/r/have_gbk.require
Normal file
2
mysql-test/r/have_gbk.require
Normal file
@ -0,0 +1,2 @@
|
||||
Collation Charset Id Default Compiled Sortlen
|
||||
gbk_chinese_ci gbk 28 Yes Yes 1
|
@ -354,3 +354,28 @@ t1 CREATE TABLE `t1` (
|
||||
KEY `a` (`a`,`b`)
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
drop table t1;
|
||||
create table t1 (a int not null primary key, b varchar(20) not null unique);
|
||||
desc t1;
|
||||
Field Type Null Key Default Extra
|
||||
a int(11) NO PRI
|
||||
b varchar(20) NO UNI
|
||||
drop table t1;
|
||||
create table t1 (a int not null primary key, b int not null unique);
|
||||
desc t1;
|
||||
Field Type Null Key Default Extra
|
||||
a int(11) NO PRI
|
||||
b int(11) NO UNI
|
||||
drop table t1;
|
||||
create table t1 (a int not null primary key, b varchar(20) not null, unique (b(10)));
|
||||
desc t1;
|
||||
Field Type Null Key Default Extra
|
||||
a int(11) NO PRI
|
||||
b varchar(20) NO UNI
|
||||
drop table t1;
|
||||
create table t1 (a int not null primary key, b varchar(20) not null, c varchar(20) not null, unique(b(10),c(10)));
|
||||
desc t1;
|
||||
Field Type Null Key Default Extra
|
||||
a int(11) NO PRI
|
||||
b varchar(20) NO MUL
|
||||
c varchar(20) NO
|
||||
drop table t1;
|
||||
|
File diff suppressed because one or more lines are too long
@ -39,7 +39,7 @@ drop table if exists t1;
|
||||
--error 1075
|
||||
create table t1 (ordid int(8) not null auto_increment, ord varchar(50) not null, primary key (ord,ordid)) engine=heap;
|
||||
|
||||
-- error 1044,1
|
||||
-- error 1049
|
||||
create table not_existing_database.test (a int);
|
||||
--error 1103
|
||||
create table `a/a` (a int);
|
||||
@ -305,7 +305,7 @@ select * from t2;
|
||||
create table t3 like t1;
|
||||
--error 1050
|
||||
create table t3 like mysqltest.t3;
|
||||
--error ER_DBACCESS_DENIED_ERROR,1
|
||||
--error 1049
|
||||
create table non_existing_database.t1 like t1;
|
||||
--error 1051
|
||||
create table t3 like non_existing_table;
|
||||
|
30
mysql-test/t/ctype_gbk.test
Normal file
30
mysql-test/t/ctype_gbk.test
Normal file
@ -0,0 +1,30 @@
|
||||
-- source include/have_gbk.inc
|
||||
|
||||
#
|
||||
# Tests with the gbk character set
|
||||
#
|
||||
--disable_warnings
|
||||
drop table if exists t1;
|
||||
--enable_warnings
|
||||
|
||||
SET @test_character_set= 'gbk';
|
||||
SET @test_collation= 'gbk_chinese_ci';
|
||||
-- source include/ctype_common.inc
|
||||
|
||||
SET NAMES gbk;
|
||||
SET collation_connection='gbk_chinese_ci';
|
||||
-- source include/ctype_filesort.inc
|
||||
-- source include/ctype_innodb_like.inc
|
||||
SET collation_connection='gbk_bin';
|
||||
-- source include/ctype_filesort.inc
|
||||
-- source include/ctype_innodb_like.inc
|
||||
|
||||
#
|
||||
# Bug#11987 mysql will truncate the text when
|
||||
# the text contain GBK char:"0xA3A0" and "0xA1"
|
||||
#
|
||||
SET NAMES gbk;
|
||||
CREATE TABLE t1 (a text) character set gbk;
|
||||
INSERT INTO t1 VALUES (0xA3A0),(0xA1A1);
|
||||
SELECT hex(a) FROM t1 ORDER BY a;
|
||||
DROP TABLE t1;
|
@ -613,3 +613,14 @@ drop table t1;
|
||||
# Bug #9854 hex() and out of range handling
|
||||
#
|
||||
select hex(29223372036854775809), hex(-29223372036854775809);
|
||||
|
||||
#
|
||||
# Bug #11311: Incorrect length returned from LPAD() and RPAD()
|
||||
#
|
||||
create table t1 (i int);
|
||||
insert into t1 values (1000000000),(1);
|
||||
--enable_metadata
|
||||
select lpad(i, 7, ' ') as t from t1;
|
||||
select rpad(i, 7, ' ') as t from t1;
|
||||
--disable_metadata
|
||||
drop table t1;
|
||||
|
@ -337,3 +337,19 @@ show create table t1;
|
||||
alter table t1 modify a varchar(20);
|
||||
show create table t1;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Bug #11227: Incorrectly reporting 'MUL' vs. 'UNI' on varchar
|
||||
#
|
||||
create table t1 (a int not null primary key, b varchar(20) not null unique);
|
||||
desc t1;
|
||||
drop table t1;
|
||||
create table t1 (a int not null primary key, b int not null unique);
|
||||
desc t1;
|
||||
drop table t1;
|
||||
create table t1 (a int not null primary key, b varchar(20) not null, unique (b(10)));
|
||||
desc t1;
|
||||
drop table t1;
|
||||
create table t1 (a int not null primary key, b varchar(20) not null, c varchar(20) not null, unique(b(10),c(10)));
|
||||
desc t1;
|
||||
drop table t1;
|
||||
|
@ -616,6 +616,7 @@ set character_set_results=cp1251;
|
||||
SELECT a,'Â','â'='Â' FROM t1;
|
||||
show status like "Qcache_hits";
|
||||
show status like "Qcache_queries_in_cache";
|
||||
SET NAMES default;
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
@ -711,9 +712,10 @@ repair table t1;
|
||||
show status like 'qcache_queries_in_cache';
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Bug #9549: Make sure cached queries that span more than one cache block
|
||||
# are handled properly in the embedded server.
|
||||
|
||||
#
|
||||
# We just want a small query cache, so we can fragment it easily
|
||||
set GLOBAL query_cache_size=64*1024;
|
||||
# This actually gives us a usable cache size of about 48K
|
||||
@ -755,6 +757,8 @@ select a from t1;
|
||||
flush query cache;
|
||||
|
||||
drop table t1, t2;
|
||||
set GLOBAL query_cache_size=1355776
|
||||
|
||||
|
||||
#
|
||||
# Query with warning prohibited to query cache (BUG#9414)
|
||||
@ -832,4 +836,43 @@ drop procedure p1//
|
||||
drop table t1//
|
||||
delimiter ;//
|
||||
|
||||
#
|
||||
# query in QC from normal execution and SP (BUG#6897)
|
||||
#
|
||||
flush query cache;
|
||||
reset query cache;
|
||||
flush status;
|
||||
delimiter //;
|
||||
create table t1 (s1 int)//
|
||||
create procedure f1 () begin
|
||||
select sql_cache * from t1;
|
||||
select sql_cache * from t1;
|
||||
end;//
|
||||
delimiter ;//
|
||||
call f1();
|
||||
show status like "Qcache_queries_in_cache";
|
||||
show status like "Qcache_inserts";
|
||||
show status like "Qcache_hits";
|
||||
call f1();
|
||||
show status like "Qcache_queries_in_cache";
|
||||
show status like "Qcache_inserts";
|
||||
show status like "Qcache_hits";
|
||||
call f1();
|
||||
select sql_cache * from t1;
|
||||
show status like "Qcache_queries_in_cache";
|
||||
show status like "Qcache_inserts";
|
||||
show status like "Qcache_hits";
|
||||
insert into t1 values (1);
|
||||
select sql_cache * from t1;
|
||||
show status like "Qcache_queries_in_cache";
|
||||
show status like "Qcache_inserts";
|
||||
show status like "Qcache_hits";
|
||||
call f1();
|
||||
call f1();
|
||||
select sql_cache * from t1;
|
||||
show status like "Qcache_queries_in_cache";
|
||||
show status like "Qcache_inserts";
|
||||
show status like "Qcache_hits";
|
||||
drop procedure f1;
|
||||
drop table t1;
|
||||
set GLOBAL query_cache_size=0;
|
||||
|
@ -388,7 +388,6 @@ wait_for_lock(struct st_lock_list *wait, THR_LOCK_DATA *data,
|
||||
{
|
||||
struct st_my_thread_var *thread_var= my_thread_var;
|
||||
pthread_cond_t *cond= &thread_var->suspend;
|
||||
struct timeval now;
|
||||
struct timespec wait_timeout;
|
||||
enum enum_thr_lock_result result= THR_LOCK_ABORTED;
|
||||
my_bool can_deadlock= test(data->owner->info->n_cursors);
|
||||
@ -406,11 +405,7 @@ wait_for_lock(struct st_lock_list *wait, THR_LOCK_DATA *data,
|
||||
data->cond= cond;
|
||||
|
||||
if (can_deadlock)
|
||||
{
|
||||
gettimeofday(&now, 0);
|
||||
wait_timeout.tv_sec= now.tv_sec + table_lock_wait_timeout;
|
||||
wait_timeout.tv_nsec= now.tv_usec * 1000;
|
||||
}
|
||||
set_timespec(wait_timeout, table_lock_wait_timeout);
|
||||
while (!thread_var->abort || in_wait_list)
|
||||
{
|
||||
int rc= can_deadlock ? pthread_cond_timedwait(cond, &data->lock->mutex,
|
||||
|
@ -28,6 +28,8 @@ public:
|
||||
UtilTransactions(Ndb* ndb,
|
||||
const char * tableName, const char * indexName = 0);
|
||||
|
||||
int closeTransaction(Ndb*);
|
||||
|
||||
int clearTable(Ndb*,
|
||||
int records = 0,
|
||||
int parallelism = 0);
|
||||
|
@ -53,11 +53,7 @@ HugoOperations::setTransactionId(Uint64 id){
|
||||
|
||||
int HugoOperations::closeTransaction(Ndb* pNdb){
|
||||
|
||||
if (pTrans != NULL){
|
||||
pNdb->closeTransaction(pTrans);
|
||||
pTrans = NULL;
|
||||
}
|
||||
pTrans = NULL;
|
||||
UtilTransactions::closeTransaction(pNdb);
|
||||
|
||||
m_result_sets.clear();
|
||||
m_executed_result_sets.clear();
|
||||
|
@ -109,7 +109,7 @@ UtilTransactions::clearTable3(Ndb* pNdb,
|
||||
err = pTrans->getNdbError();
|
||||
if(err.status == NdbError::TemporaryError){
|
||||
ERR(err);
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
NdbSleep_MilliSleep(50);
|
||||
par = 1;
|
||||
goto restart;
|
||||
@ -126,7 +126,7 @@ UtilTransactions::clearTable3(Ndb* pNdb,
|
||||
err = pTrans->getNdbError();
|
||||
if(err.status == NdbError::TemporaryError){
|
||||
ERR(err);
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
NdbSleep_MilliSleep(50);
|
||||
continue;
|
||||
}
|
||||
@ -150,7 +150,7 @@ UtilTransactions::clearTable3(Ndb* pNdb,
|
||||
if(check == -1){
|
||||
if(err.status == NdbError::TemporaryError){
|
||||
ERR(err);
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
NdbSleep_MilliSleep(50);
|
||||
par = 1;
|
||||
goto restart;
|
||||
@ -162,20 +162,20 @@ UtilTransactions::clearTable3(Ndb* pNdb,
|
||||
err = pTrans->getNdbError();
|
||||
if(err.status == NdbError::TemporaryError){
|
||||
ERR(err);
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
NdbSleep_MilliSleep(50);
|
||||
par = 1;
|
||||
goto restart;
|
||||
}
|
||||
goto failed;
|
||||
}
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_OK;
|
||||
}
|
||||
return NDBT_FAILED;
|
||||
|
||||
failed:
|
||||
if(pTrans != 0) pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
ERR(err);
|
||||
return (err.code != 0 ? err.code : NDBT_FAILED);
|
||||
}
|
||||
@ -219,20 +219,20 @@ UtilTransactions::copyTableData(Ndb* pNdb,
|
||||
pOp = pTrans->getNdbScanOperation(tab.getName());
|
||||
if (pOp == NULL) {
|
||||
ERR(pTrans->getNdbError());
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
if( pOp->readTuples(NdbScanOperation::LM_Read, parallelism) ) {
|
||||
ERR(pTrans->getNdbError());
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
check = pOp->interpret_exit_ok();
|
||||
if( check == -1 ) {
|
||||
ERR(pTrans->getNdbError());
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
@ -241,7 +241,7 @@ UtilTransactions::copyTableData(Ndb* pNdb,
|
||||
if ((row.attributeStore(a) =
|
||||
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
|
||||
ERR(pTrans->getNdbError());
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
}
|
||||
@ -249,7 +249,7 @@ UtilTransactions::copyTableData(Ndb* pNdb,
|
||||
check = pTrans->execute(NoCommit);
|
||||
if( check == -1 ) {
|
||||
ERR(pTrans->getNdbError());
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
@ -258,7 +258,7 @@ UtilTransactions::copyTableData(Ndb* pNdb,
|
||||
do {
|
||||
insertedRows++;
|
||||
if (addRowToInsert(pNdb, pTrans, row, destName) != 0){
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
} while((eof = pOp->nextResult(false)) == 0);
|
||||
@ -268,7 +268,7 @@ UtilTransactions::copyTableData(Ndb* pNdb,
|
||||
if( check == -1 ) {
|
||||
const NdbError err = pTrans->getNdbError();
|
||||
ERR(err);
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
}
|
||||
@ -277,7 +277,7 @@ UtilTransactions::copyTableData(Ndb* pNdb,
|
||||
|
||||
if (err.status == NdbError::TemporaryError){
|
||||
ERR(err);
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
NdbSleep_MilliSleep(50);
|
||||
// If error = 488 there should be no limit on number of retry attempts
|
||||
if (err.code != 488)
|
||||
@ -285,11 +285,11 @@ UtilTransactions::copyTableData(Ndb* pNdb,
|
||||
continue;
|
||||
}
|
||||
ERR(err);
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
|
||||
g_info << insertedRows << " rows copied" << endl;
|
||||
|
||||
@ -375,7 +375,7 @@ UtilTransactions::scanReadRecords(Ndb* pNdb,
|
||||
pOp = getScanOperation(pTrans);
|
||||
if (pOp == NULL) {
|
||||
const NdbError err = pNdb->getNdbError();
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
|
||||
if (err.status == NdbError::TemporaryError){
|
||||
ERR(err);
|
||||
@ -389,14 +389,14 @@ UtilTransactions::scanReadRecords(Ndb* pNdb,
|
||||
|
||||
if( pOp->readTuples(lm, 0, parallelism) ) {
|
||||
ERR(pTrans->getNdbError());
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
check = pOp->interpret_exit_ok();
|
||||
if( check == -1 ) {
|
||||
ERR(pTrans->getNdbError());
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
@ -408,7 +408,7 @@ UtilTransactions::scanReadRecords(Ndb* pNdb,
|
||||
if ((row.attributeStore(attrib_list[a]) =
|
||||
pOp->getValue(tab.getColumn(attrib_list[a])->getName())) == 0) {
|
||||
ERR(pTrans->getNdbError());
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
}
|
||||
@ -421,13 +421,13 @@ UtilTransactions::scanReadRecords(Ndb* pNdb,
|
||||
|
||||
if (err.status == NdbError::TemporaryError){
|
||||
ERR(err);
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
NdbSleep_MilliSleep(50);
|
||||
retryAttempt++;
|
||||
continue;
|
||||
}
|
||||
ERR(err);
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
@ -447,17 +447,17 @@ UtilTransactions::scanReadRecords(Ndb* pNdb,
|
||||
|
||||
if (err.status == NdbError::TemporaryError){
|
||||
ERR(err);
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
NdbSleep_MilliSleep(50);
|
||||
retryAttempt++;
|
||||
continue;
|
||||
}
|
||||
ERR(err);
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
g_info << rows << " rows have been read" << endl;
|
||||
if (records != 0 && rows != records){
|
||||
g_info << "Check expected number of records failed" << endl
|
||||
@ -496,13 +496,13 @@ UtilTransactions::selectCount(Ndb* pNdb,
|
||||
pOp = getScanOperation(pTrans);
|
||||
if (pOp == NULL) {
|
||||
ERR(pTrans->getNdbError());
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
if( pOp->readTuples(lm) ) {
|
||||
ERR(pTrans->getNdbError());
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
@ -515,7 +515,7 @@ UtilTransactions::selectCount(Ndb* pNdb,
|
||||
check = pOp->interpret_exit_ok();
|
||||
if( check == -1 ) {
|
||||
ERR(pTrans->getNdbError());
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
}
|
||||
@ -524,7 +524,7 @@ UtilTransactions::selectCount(Ndb* pNdb,
|
||||
check = pTrans->execute(NoCommit);
|
||||
if( check == -1 ) {
|
||||
ERR(pTrans->getNdbError());
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
@ -539,17 +539,17 @@ UtilTransactions::selectCount(Ndb* pNdb,
|
||||
const NdbError err = pTrans->getNdbError();
|
||||
|
||||
if (err.status == NdbError::TemporaryError){
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
NdbSleep_MilliSleep(50);
|
||||
retryAttempt++;
|
||||
continue;
|
||||
}
|
||||
ERR(err);
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
|
||||
if (count_rows != NULL){
|
||||
*count_rows = rows;
|
||||
@ -653,7 +653,7 @@ restart:
|
||||
pOp = pTrans->getNdbScanOperation(tab.getName());
|
||||
if (pOp == NULL) {
|
||||
const NdbError err = pNdb->getNdbError();
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
ERR(err);
|
||||
|
||||
if (err.status == NdbError::TemporaryError){
|
||||
@ -673,14 +673,14 @@ restart:
|
||||
|
||||
if( rs != 0 ) {
|
||||
ERR(pTrans->getNdbError());
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
check = pOp->interpret_exit_ok();
|
||||
if( check == -1 ) {
|
||||
ERR(pTrans->getNdbError());
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
@ -689,7 +689,7 @@ restart:
|
||||
if ((row.attributeStore(a) =
|
||||
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
|
||||
ERR(pTrans->getNdbError());
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
}
|
||||
@ -700,13 +700,13 @@ restart:
|
||||
|
||||
if (err.status == NdbError::TemporaryError){
|
||||
ERR(err);
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
NdbSleep_MilliSleep(50);
|
||||
retryAttempt++;
|
||||
continue;
|
||||
}
|
||||
ERR(err);
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
@ -733,13 +733,13 @@ restart:
|
||||
|
||||
if (err.status == NdbError::TemporaryError){
|
||||
ERR(err);
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
NdbSleep_MilliSleep(50);
|
||||
retryAttempt++;
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
}
|
||||
@ -748,17 +748,17 @@ restart:
|
||||
|
||||
if (err.status == NdbError::TemporaryError){
|
||||
ERR(err);
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
NdbSleep_MilliSleep(50);
|
||||
retryAttempt++;
|
||||
continue;
|
||||
}
|
||||
ERR(err);
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
|
||||
return NDBT_OK;
|
||||
}
|
||||
@ -1057,20 +1057,20 @@ UtilTransactions::verifyOrderedIndex(Ndb* pNdb,
|
||||
pOp = pTrans->getNdbScanOperation(tab.getName());
|
||||
if (pOp == NULL) {
|
||||
ERR(pTrans->getNdbError());
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
if( pOp->readTuples(NdbScanOperation::LM_Read, 0, parallelism) ) {
|
||||
ERR(pTrans->getNdbError());
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
check = pOp->interpret_exit_ok();
|
||||
if( check == -1 ) {
|
||||
ERR(pTrans->getNdbError());
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
@ -1085,13 +1085,13 @@ UtilTransactions::verifyOrderedIndex(Ndb* pNdb,
|
||||
|
||||
if (err.status == NdbError::TemporaryError){
|
||||
ERR(err);
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
NdbSleep_MilliSleep(50);
|
||||
retryAttempt++;
|
||||
continue;
|
||||
}
|
||||
ERR(err);
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
@ -1146,7 +1146,7 @@ UtilTransactions::verifyOrderedIndex(Ndb* pNdb,
|
||||
g_err << "Error when comapring records" << endl;
|
||||
g_err << " scanRow: \n" << scanRow.c_str().c_str() << endl;
|
||||
g_err << " pkRow: \n" << pkRow.c_str().c_str() << endl;
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
@ -1156,7 +1156,7 @@ UtilTransactions::verifyOrderedIndex(Ndb* pNdb,
|
||||
if((res= iop->nextResult()) != 0){
|
||||
g_err << "Failed to find row using index: " << res << endl;
|
||||
ERR(pTrans->getNdbError());
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
@ -1164,14 +1164,14 @@ UtilTransactions::verifyOrderedIndex(Ndb* pNdb,
|
||||
g_err << "Error when comapring records" << endl;
|
||||
g_err << " scanRow: \n" << scanRow.c_str().c_str() << endl;
|
||||
g_err << " indexRow: \n" << indexRow.c_str().c_str() << endl;
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
if(iop->nextResult() == 0){
|
||||
g_err << "Found extra row!!" << endl;
|
||||
g_err << " indexRow: \n" << indexRow.c_str().c_str() << endl;
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
}
|
||||
@ -1184,18 +1184,18 @@ UtilTransactions::verifyOrderedIndex(Ndb* pNdb,
|
||||
if (err.status == NdbError::TemporaryError){
|
||||
ERR(err);
|
||||
iop = 0;
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
NdbSleep_MilliSleep(50);
|
||||
retryAttempt++;
|
||||
rows--;
|
||||
continue;
|
||||
}
|
||||
ERR(err);
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
pNdb->closeTransaction(pTrans);
|
||||
closeTransaction(pNdb);
|
||||
|
||||
return NDBT_OK;
|
||||
}
|
||||
@ -1303,6 +1303,16 @@ UtilTransactions::getOperation(NdbConnection* pTrans,
|
||||
|
||||
#include <HugoOperations.hpp>
|
||||
|
||||
int
|
||||
UtilTransactions::closeTransaction(Ndb* pNdb)
|
||||
{
|
||||
if (pTrans != NULL){
|
||||
pNdb->closeTransaction(pTrans);
|
||||
pTrans = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
UtilTransactions::compare(Ndb* pNdb, const char* tab_name2, int flags){
|
||||
|
||||
@ -1313,7 +1323,6 @@ UtilTransactions::compare(Ndb* pNdb, const char* tab_name2, int flags){
|
||||
|
||||
HugoCalculator calc(tab);
|
||||
NDBT_ResultRow row(tab);
|
||||
NdbTransaction* pTrans= 0;
|
||||
const NdbDictionary::Table* tmp= pNdb->getDictionary()->getTable(tab_name2);
|
||||
if(tmp == 0)
|
||||
{
|
||||
|
@ -765,7 +765,6 @@ uint ha_federated::convert_row_to_internal_format(byte *record, MYSQL_ROW row)
|
||||
(*field)->move_field(-old_ptr);
|
||||
}
|
||||
|
||||
DBUG_DUMP("record", record, table->s->reclength);
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
|
@ -2076,7 +2076,6 @@ void Item_func_rpad::fix_length_and_dec()
|
||||
{
|
||||
ulonglong length= ((ulonglong) args[1]->val_int() *
|
||||
collation.collation->mbmaxlen);
|
||||
length= max((ulonglong) args[0]->max_length, length);
|
||||
if (length >= MAX_BLOB_WIDTH)
|
||||
{
|
||||
length= MAX_BLOB_WIDTH;
|
||||
@ -2164,7 +2163,6 @@ void Item_func_lpad::fix_length_and_dec()
|
||||
{
|
||||
ulonglong length= ((ulonglong) args[1]->val_int() *
|
||||
collation.collation->mbmaxlen);
|
||||
length= max((ulonglong) args[0]->max_length, length);
|
||||
if (length >= MAX_BLOB_WIDTH)
|
||||
{
|
||||
length= MAX_BLOB_WIDTH;
|
||||
|
@ -526,6 +526,7 @@ struct Query_cache_query_flags
|
||||
{
|
||||
unsigned int client_long_flag:1;
|
||||
unsigned int client_protocol_41:1;
|
||||
unsigned int more_results_exists:1;
|
||||
uint character_set_client_num;
|
||||
uint character_set_results_num;
|
||||
uint collation_connection_num;
|
||||
@ -1245,10 +1246,13 @@ void unlock_table_names(THD *thd, TABLE_LIST *table_list,
|
||||
void unireg_init(ulong options);
|
||||
void unireg_end(void);
|
||||
bool mysql_create_frm(THD *thd, my_string file_name,
|
||||
const char *table, const char* db,
|
||||
HA_CREATE_INFO *create_info,
|
||||
List<create_field> &create_field,
|
||||
uint key_count,KEY *key_info,handler *db_type);
|
||||
int rea_create_table(THD *thd, my_string file_name,HA_CREATE_INFO *create_info,
|
||||
int rea_create_table(THD *thd, my_string file_name,
|
||||
const char *table, const char* db,
|
||||
HA_CREATE_INFO *create_info,
|
||||
List<create_field> &create_field,
|
||||
uint key_count,KEY *key_info);
|
||||
int format_number(uint inputflag,uint max_length,my_string pos,uint length,
|
||||
@ -1315,7 +1319,8 @@ ulong make_new_entry(File file,uchar *fileinfo,TYPELIB *formnames,
|
||||
const char *newname);
|
||||
ulong next_io_size(ulong pos);
|
||||
void append_unescaped(String *res, const char *pos, uint length);
|
||||
int create_frm(THD *thd, char *name,uint reclength,uchar *fileinfo,
|
||||
int create_frm(THD *thd, char *name, const char *table, const char *db,
|
||||
uint reclength,uchar *fileinfo,
|
||||
HA_CREATE_INFO *create_info, uint keys);
|
||||
void update_create_info_from_table(HA_CREATE_INFO *info, TABLE *form);
|
||||
int rename_file_ext(const char * from,const char * to,const char * ext);
|
||||
|
@ -4094,20 +4094,20 @@ ER_ERROR_DURING_CHECKPOINT
|
||||
swe "Fick fel %d vid CHECKPOINT"
|
||||
ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ CHECKPOINT"
|
||||
ER_NEW_ABORTING_CONNECTION 08S01
|
||||
cze "Spojen-Bí %ld do databáze: '%-.64s' u¾ivatel: '%-.32s' stroj: `%-.64s' (%-.64s) bylo pøeru¹eno"
|
||||
dan "Afbrød forbindelsen %ld til databasen '%-.64s' bruger: '%-.32s' vært: `%-.64s' (%-.64s)"
|
||||
nla "Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.32s' host: `%-.64s' (%-.64s)"
|
||||
eng "Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)"
|
||||
est "Ühendus katkestatud %ld andmebaas: '%-.64s' kasutaja: '%-.32s' masin: `%-.64s' (%-.64s)"
|
||||
fre "Connection %ld avortée vers la bd: '%-.64s' utilisateur: '%-.32s' hôte: `%-.64s' (%-.64s)"
|
||||
ger "Verbindungsabbruch %ld zur Datenbank '%-.64s'. Benutzer: '%-.32s', Host: `%-.64s' (%-.64s)"
|
||||
cze "Spojen-Bí %ld do databáze: '%-.64s' u¾ivatel: '%-.32s' stroj: '%-.64s' (%-.64s) bylo pøeru¹eno"
|
||||
dan "Afbrød forbindelsen %ld til databasen '%-.64s' bruger: '%-.32s' vært: '%-.64s' (%-.64s)"
|
||||
nla "Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.32s' host: '%-.64s' (%-.64s)"
|
||||
eng "Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: '%-.64s' (%-.64s)"
|
||||
est "Ühendus katkestatud %ld andmebaas: '%-.64s' kasutaja: '%-.32s' masin: '%-.64s' (%-.64s)"
|
||||
fre "Connection %ld avortée vers la bd: '%-.64s' utilisateur: '%-.32s' hôte: '%-.64s' (%-.64s)"
|
||||
ger "Verbindungsabbruch %ld zur Datenbank '%-.64s'. Benutzer: '%-.32s', Host: '%-.64s' (%-.64s)"
|
||||
ita "Interrotta la connessione %ld al db: ''%-.64s' utente: '%-.32s' host: '%-.64s' (%-.64s)"
|
||||
por "Conexão %ld abortada para banco de dados '%-.64s' - usuário '%-.32s' - 'host' `%-.64s' ('%-.64s')"
|
||||
rus "ðÒÅÒ×ÁÎÏ ÓÏÅÄÉÎÅÎÉÅ %ld Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' Ó ÈÏÓÔÁ `%-.64s' (%-.64s)"
|
||||
serbian "Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' a host: `%-.64s' (%-.64s)"
|
||||
spa "Abortada conexión %ld para db: '%-.64s' usuario: '%-.32s' servidor: `%-.64s' (%-.64s)"
|
||||
por "Conexão %ld abortada para banco de dados '%-.64s' - usuário '%-.32s' - 'host' '%-.64s' ('%-.64s')"
|
||||
rus "ðÒÅÒ×ÁÎÏ ÓÏÅÄÉÎÅÎÉÅ %ld Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' Ó ÈÏÓÔÁ '%-.64s' (%-.64s)"
|
||||
serbian "Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' a host: '%-.64s' (%-.64s)"
|
||||
spa "Abortada conexión %ld para db: '%-.64s' usuario: '%-.32s' servidor: '%-.64s' (%-.64s)"
|
||||
swe "Avbröt länken för tråd %ld till db '%-.64s', användare '%-.32s', host '%-.64s' (%-.64s)"
|
||||
ukr "ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.64s' ËÏÒÉÓÔÕ×ÁÞ: '%-.32s' ÈÏÓÔ: `%-.64s' (%-.64s)"
|
||||
ukr "ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.64s' ËÏÒÉÓÔÕ×ÁÞ: '%-.32s' ÈÏÓÔ: '%-.64s' (%-.64s)"
|
||||
ER_DUMP_NOT_IMPLEMENTED
|
||||
cze "Handler tabulky nepodporuje bin-Bární dump"
|
||||
dan "Denne tabeltype unserstøtter ikke binært tabeldump"
|
||||
@ -5384,3 +5384,5 @@ ER_CANT_CREATE_FEDERATED_TABLE
|
||||
eng "Can't create federated table. Foreign data src error : '%-.64s'"
|
||||
ER_TRG_IN_WRONG_SCHEMA
|
||||
eng "Trigger in wrong schema"
|
||||
ER_STACK_OVERRUN_NEED_MORE
|
||||
eng "Thread stack overrun: %ld bytes used of a %ld byte stack, and %ld bytes needed. Use 'mysqld -O thread_stack=#' to specify a bigger stack."
|
||||
|
@ -774,10 +774,11 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used)
|
||||
Query_cache_query_flags flags;
|
||||
// fill all gaps between fields with 0 to get repeatable key
|
||||
bzero(&flags, QUERY_CACHE_FLAGS_SIZE);
|
||||
flags.client_long_flag= (thd->client_capabilities & CLIENT_LONG_FLAG ?
|
||||
1 : 0);
|
||||
flags.client_protocol_41= (thd->client_capabilities & CLIENT_PROTOCOL_41 ?
|
||||
1 : 0);
|
||||
flags.client_long_flag= test(thd->client_capabilities & CLIENT_LONG_FLAG);
|
||||
flags.client_protocol_41= test(thd->client_capabilities &
|
||||
CLIENT_PROTOCOL_41);
|
||||
flags.more_results_exists= test(thd->server_status &
|
||||
SERVER_MORE_RESULTS_EXISTS);
|
||||
flags.character_set_client_num=
|
||||
thd->variables.character_set_client->number;
|
||||
flags.character_set_results_num=
|
||||
@ -791,6 +792,20 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used)
|
||||
flags.sql_mode= thd->variables.sql_mode;
|
||||
flags.max_sort_length= thd->variables.max_sort_length;
|
||||
flags.group_concat_max_len= thd->variables.group_concat_max_len;
|
||||
DBUG_PRINT("qcache", ("long %d, 4.1: %d, more results %d, \
|
||||
CS client: %u, CS result: %u, CS conn: %u, limit: %lu, TZ: 0x%lx, \
|
||||
sql mode: 0x%lx, sort len: %lu, conncat len: %lu",
|
||||
(int)flags.client_long_flag,
|
||||
(int)flags.client_protocol_41,
|
||||
(int)flags.more_results_exists,
|
||||
flags.character_set_client_num,
|
||||
flags.character_set_results_num,
|
||||
flags.collation_connection_num,
|
||||
flags.limit,
|
||||
(ulong)flags.time_zone,
|
||||
flags.sql_mode,
|
||||
flags.max_sort_length,
|
||||
flags.group_concat_max_len));
|
||||
STRUCT_LOCK(&structure_guard_mutex);
|
||||
|
||||
if (query_cache_size == 0)
|
||||
@ -973,10 +988,11 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
|
||||
|
||||
// fill all gaps between fields with 0 to get repeatable key
|
||||
bzero(&flags, QUERY_CACHE_FLAGS_SIZE);
|
||||
flags.client_long_flag= (thd->client_capabilities & CLIENT_LONG_FLAG ?
|
||||
1 : 0);
|
||||
flags.client_protocol_41= (thd->client_capabilities & CLIENT_PROTOCOL_41 ?
|
||||
1 : 0);
|
||||
flags.client_long_flag= test(thd->client_capabilities & CLIENT_LONG_FLAG);
|
||||
flags.client_protocol_41= test(thd->client_capabilities &
|
||||
CLIENT_PROTOCOL_41);
|
||||
flags.more_results_exists= test(thd->server_status &
|
||||
SERVER_MORE_RESULTS_EXISTS);
|
||||
flags.character_set_client_num= thd->variables.character_set_client->number;
|
||||
flags.character_set_results_num=
|
||||
(thd->variables.character_set_results ?
|
||||
@ -988,6 +1004,20 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
|
||||
flags.sql_mode= thd->variables.sql_mode;
|
||||
flags.max_sort_length= thd->variables.max_sort_length;
|
||||
flags.group_concat_max_len= thd->variables.group_concat_max_len;
|
||||
DBUG_PRINT("qcache", ("long %d, 4.1: %d, more results %d, \
|
||||
CS client: %u, CS result: %u, CS conn: %u, limit: %lu, TZ: 0x%lx, \
|
||||
sql mode: 0x%lx, sort len: %lu, conncat len: %lu",
|
||||
(int)flags.client_long_flag,
|
||||
(int)flags.client_protocol_41,
|
||||
(int)flags.more_results_exists,
|
||||
flags.character_set_client_num,
|
||||
flags.character_set_results_num,
|
||||
flags.collation_connection_num,
|
||||
flags.limit,
|
||||
(ulong)flags.time_zone,
|
||||
flags.sql_mode,
|
||||
flags.max_sort_length,
|
||||
flags.group_concat_max_len));
|
||||
memcpy((void *)(sql + (tot_length - QUERY_CACHE_FLAGS_SIZE)),
|
||||
&flags, QUERY_CACHE_FLAGS_SIZE);
|
||||
query_block = (Query_cache_block *) hash_search(&queries, (byte*) sql,
|
||||
|
@ -5087,8 +5087,9 @@ bool check_stack_overrun(THD *thd, long margin,
|
||||
if ((stack_used=used_stack(thd->thread_stack,(char*) &stack_used)) >=
|
||||
(long) (thread_stack - margin))
|
||||
{
|
||||
sprintf(errbuff[0],ER(ER_STACK_OVERRUN),stack_used,thread_stack);
|
||||
my_message(ER_STACK_OVERRUN,errbuff[0],MYF(0));
|
||||
sprintf(errbuff[0],ER(ER_STACK_OVERRUN_NEED_MORE),
|
||||
stack_used,thread_stack,margin);
|
||||
my_message(ER_STACK_OVERRUN_NEED_MORE,errbuff[0],MYF(0));
|
||||
thd->fatal_error();
|
||||
return 1;
|
||||
}
|
||||
|
@ -1620,12 +1620,10 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
|
||||
create_info->data_file_name= create_info->index_file_name= 0;
|
||||
create_info->table_options=db_options;
|
||||
|
||||
if (rea_create_table(thd, path, create_info, fields, key_count,
|
||||
if (rea_create_table(thd, path, table_name, db,
|
||||
create_info, fields, key_count,
|
||||
key_info_buffer))
|
||||
{
|
||||
/* my_error(ER_CANT_CREATE_TABLE,MYF(0),table_name,my_errno); */
|
||||
goto end;
|
||||
}
|
||||
if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
|
||||
{
|
||||
/* Open table and put in temporary table list */
|
||||
@ -2660,8 +2658,14 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
|
||||
/*
|
||||
Create a new table by copying from source table
|
||||
*/
|
||||
if (my_copy(src_path, dst_path, MYF(MY_WME|MY_DONT_OVERWRITE_FILE)))
|
||||
if (my_copy(src_path, dst_path, MYF(MY_DONT_OVERWRITE_FILE)))
|
||||
{
|
||||
if (my_errno == ENOENT)
|
||||
my_error(ER_BAD_DB_ERROR,MYF(0),db);
|
||||
else
|
||||
my_error(ER_CANT_CREATE_FILE,MYF(0),dst_path,my_errno);
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
As mysql_truncate don't work on a new table at this stage of
|
||||
|
21
sql/table.cc
21
sql/table.cc
@ -702,10 +702,9 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
|
||||
key_part->key_part_flag|= HA_BIT_PART;
|
||||
|
||||
if (i == 0 && key != primary_key)
|
||||
field->flags |=
|
||||
((keyinfo->flags & HA_NOSAME) &&
|
||||
field->key_length() ==
|
||||
keyinfo->key_length ? UNIQUE_KEY_FLAG : MULTIPLE_KEY_FLAG);
|
||||
field->flags |= ((keyinfo->flags & HA_NOSAME) &&
|
||||
(keyinfo->key_parts == 1)) ?
|
||||
UNIQUE_KEY_FLAG : MULTIPLE_KEY_FLAG;
|
||||
if (i == 0)
|
||||
field->key_start.set_bit(key);
|
||||
if (field->key_length() == key_part->length &&
|
||||
@ -1343,8 +1342,9 @@ void append_unescaped(String *res, const char *pos, uint length)
|
||||
|
||||
/* Create a .frm file */
|
||||
|
||||
File create_frm(THD *thd, register my_string name, uint reclength,
|
||||
uchar *fileinfo, HA_CREATE_INFO *create_info, uint keys)
|
||||
File create_frm(THD *thd, register my_string name, const char *table,
|
||||
const char *db, uint reclength, uchar *fileinfo,
|
||||
HA_CREATE_INFO *create_info, uint keys)
|
||||
{
|
||||
register File file;
|
||||
ulong length;
|
||||
@ -1367,7 +1367,7 @@ File create_frm(THD *thd, register my_string name, uint reclength,
|
||||
*/
|
||||
set_if_smaller(create_info->raid_chunks, 255);
|
||||
|
||||
if ((file= my_create(name, CREATE_MODE, create_flags, MYF(MY_WME))) >= 0)
|
||||
if ((file= my_create(name, CREATE_MODE, create_flags, MYF(0))) >= 0)
|
||||
{
|
||||
uint key_length, tmp_key_length;
|
||||
uint tmp;
|
||||
@ -1414,6 +1414,13 @@ File create_frm(THD *thd, register my_string name, uint reclength,
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (my_errno == ENOENT)
|
||||
my_error(ER_BAD_DB_ERROR,MYF(0),db);
|
||||
else
|
||||
my_error(ER_CANT_CREATE_TABLE,MYF(0),table,my_errno);
|
||||
}
|
||||
return (file);
|
||||
} /* create_frm */
|
||||
|
||||
|
@ -55,6 +55,8 @@ static bool make_empty_rec(THD *thd, int file, enum db_type table_type,
|
||||
mysql_create_frm()
|
||||
thd Thread handler
|
||||
file_name Name of file (including database and .frm)
|
||||
table Name of table
|
||||
db Name of database
|
||||
create_info create info parameters
|
||||
create_fields Fields to create
|
||||
keys number of keys to create
|
||||
@ -67,6 +69,7 @@ static bool make_empty_rec(THD *thd, int file, enum db_type table_type,
|
||||
*/
|
||||
|
||||
bool mysql_create_frm(THD *thd, my_string file_name,
|
||||
const char *table, const char *db,
|
||||
HA_CREATE_INFO *create_info,
|
||||
List<create_field> &create_fields,
|
||||
uint keys, KEY *key_info,
|
||||
@ -113,7 +116,7 @@ bool mysql_create_frm(THD *thd, my_string file_name,
|
||||
}
|
||||
reclength=uint2korr(forminfo+266);
|
||||
|
||||
if ((file=create_frm(thd, file_name, reclength, fileinfo,
|
||||
if ((file=create_frm(thd, file_name, table, db, reclength, fileinfo,
|
||||
create_info, keys)) < 0)
|
||||
{
|
||||
my_free((gptr) screen_buff,MYF(0));
|
||||
@ -211,9 +214,11 @@ err3:
|
||||
Create a frm (table definition) file and the tables
|
||||
|
||||
SYNOPSIS
|
||||
mysql_create_frm()
|
||||
rea_create_table()
|
||||
thd Thread handler
|
||||
file_name Name of file (including database and .frm)
|
||||
table Name of table
|
||||
db Name of database
|
||||
create_info create info parameters
|
||||
create_fields Fields to create
|
||||
keys number of keys to create
|
||||
@ -226,13 +231,14 @@ err3:
|
||||
*/
|
||||
|
||||
int rea_create_table(THD *thd, my_string file_name,
|
||||
const char *table, const char *db,
|
||||
HA_CREATE_INFO *create_info,
|
||||
List<create_field> &create_fields,
|
||||
uint keys, KEY *key_info)
|
||||
{
|
||||
DBUG_ENTER("rea_create_table");
|
||||
|
||||
if (mysql_create_frm(thd, file_name, create_info,
|
||||
if (mysql_create_frm(thd, file_name, table, db, create_info,
|
||||
create_fields, keys, key_info, NULL))
|
||||
DBUG_RETURN(1);
|
||||
if (!create_info->frm_only && ha_create_table(file_name,create_info,0))
|
||||
|
@ -9938,6 +9938,43 @@ my_mb_wc_gbk(CHARSET_INFO *cs __attribute__((unused)),
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Returns well formed length of a GBK string.
|
||||
*/
|
||||
static
|
||||
uint my_well_formed_len_gbk(CHARSET_INFO *cs __attribute__((unused)),
|
||||
const char *b, const char *e,
|
||||
uint pos, int *error)
|
||||
{
|
||||
const char *b0= b;
|
||||
const char *emb= e - 1; /* Last possible end of an MB character */
|
||||
|
||||
*error= 0;
|
||||
while (pos-- && b < e)
|
||||
{
|
||||
if ((uchar) b[0] < 128)
|
||||
{
|
||||
/* Single byte ascii character */
|
||||
b++;
|
||||
}
|
||||
else if ((b < emb) && isgbkcode((uchar)*b, (uchar)b[1]))
|
||||
{
|
||||
/* Double byte character */
|
||||
b+= 2;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Wrong byte sequence */
|
||||
*error= 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return b - b0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
static MY_COLLATION_HANDLER my_collation_ci_handler =
|
||||
{
|
||||
NULL, /* init */
|
||||
@ -9960,7 +9997,7 @@ static MY_CHARSET_HANDLER my_charset_handler=
|
||||
mbcharlen_gbk,
|
||||
my_numchars_mb,
|
||||
my_charpos_mb,
|
||||
my_well_formed_len_mb,
|
||||
my_well_formed_len_gbk,
|
||||
my_lengthsp_8bit,
|
||||
my_numcells_8bit,
|
||||
my_mb_wc_gbk,
|
||||
|
Loading…
x
Reference in New Issue
Block a user