From b4417c75722b051d7ca93cf387c17c8edb323938 Mon Sep 17 00:00:00 2001 From: "msvensson@neptunus.(none)" <> Date: Mon, 21 Feb 2005 14:04:54 +0100 Subject: [PATCH 01/15] Fix so that ndb-cache-check-time is measured in milliseconds --- sql/ha_ndbcluster.cc | 13 +++++++++++-- sql/mysqld.cc | 4 ++-- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 62bc1cc41b9..297c36580d6 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -5746,7 +5746,7 @@ extern "C" pthread_handler_decl(ndb_util_thread_func, } List util_open_tables; - set_timespec(abstime, ndb_cache_check_time); + set_timespec(abstime, 0); for (;;) { @@ -5764,12 +5764,21 @@ extern "C" pthread_handler_decl(ndb_util_thread_func, if (ndb_cache_check_time == 0) { + /* Wake up in 10 seconds to check if value has changed */ set_timespec(abstime, 10); continue; } /* Set new time to wake up */ - set_timespec(abstime, ndb_cache_check_time); + struct timeval tv; + gettimeofday(&tv,0); + abstime.tv_sec= tv.tv_sec + (ndb_cache_check_time / 1000); + abstime.tv_nsec= tv.tv_usec * 1000 + (ndb_cache_check_time % 1000); + if (abstime.tv_nsec >= 1000000000) + { + abstime.tv_sec += 1; + abstime.tv_nsec -= 1000000000; + } /* Lock mutex and fill list with pointers to all open tables */ NDB_SHARE *share; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 9559ed55b3c..4ffe40155d6 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -4646,8 +4646,8 @@ Disable with --skip-ndbcluster (will save memory).", (gptr*) &opt_ndb_optimized_node_selection, (gptr*) &opt_ndb_optimized_node_selection, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, - { "ndb_cache_check_time", OPT_NDB_CACHE_CHECK_TIME, - "A dedicated thread is created to update cached commit count value at the given interval.", + { "ndb-cache-check-time", OPT_NDB_CACHE_CHECK_TIME, + "A dedicated thread is created to update cached commit count value at the given interval(milliseconds).", (gptr*) &opt_ndb_cache_check_time, (gptr*) &opt_ndb_cache_check_time, 0, GET_ULONG, REQUIRED_ARG, 0, 0, LONG_TIMEOUT, 0, 1, 0}, #endif From c94952d1f0744011f0c225a42cd97f025e56d75b Mon Sep 17 00:00:00 2001 From: "msvensson@neptunus.(none)" <> Date: Thu, 10 Mar 2005 10:43:11 +0100 Subject: [PATCH 02/15] Removed unused variable --- sql/sql_class.h | 1 - 1 file changed, 1 deletion(-) diff --git a/sql/sql_class.h b/sql/sql_class.h index e793f5776d7..0f4a9ab357a 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1129,7 +1129,6 @@ public: table_map used_tables; USER_CONN *user_connect; CHARSET_INFO *db_charset; - List temporary_tables_should_be_free; // list of temporary tables /* FIXME: this, and some other variables like 'count_cuted_fields' maybe should be statement/cursor local, that is, moved to Statement From 19cc40262425f80aa6434b9f60056961d6530ac0 Mon Sep 17 00:00:00 2001 From: "msvensson@neptunus.(none)" <> Date: Thu, 10 Mar 2005 10:46:19 +0100 Subject: [PATCH 03/15] Fix uninitialised variable in Dbacc --- ndb/src/kernel/blocks/dbacc/DbaccMain.cpp | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp index dd68599f4f6..a16c0da369b 100644 --- a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp +++ b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp @@ -2449,14 +2449,14 @@ void Dbacc::execACC_COMMITREQ(Signal* signal) operationRecPtr.p->transactionstate = IDLE; operationRecPtr.p->operation = ZUNDEFINED_OP; if(Toperation != ZREAD){ + rootfragrecptr.i = fragrecptr.p->myroot; + ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec); rootfragrecptr.p->m_commit_count++; if (Toperation != ZINSERT) { if (Toperation != ZDELETE) { return; } else { jam(); - rootfragrecptr.i = fragrecptr.p->myroot; - ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec); rootfragrecptr.p->noOfElements--; fragrecptr.p->slack += operationRecPtr.p->insertDeleteLen; if (fragrecptr.p->slack > fragrecptr.p->slackCheck) { @@ -2476,8 +2476,6 @@ void Dbacc::execACC_COMMITREQ(Signal* signal) }//if } else { jam(); /* EXPAND PROCESS HANDLING */ - rootfragrecptr.i = fragrecptr.p->myroot; - ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec); rootfragrecptr.p->noOfElements++; fragrecptr.p->slack -= operationRecPtr.p->insertDeleteLen; if (fragrecptr.p->slack >= (1u << 31)) { From 75e1b9e7652707568d5be014bcd23848d0425ce4 Mon Sep 17 00:00:00 2001 From: "reggie@mdk10.(none)" <> Date: Mon, 14 Mar 2005 16:47:35 -0600 Subject: [PATCH 04/15] Bug #6660 mysqldump creates bad pathnames on Windows This really should not happen on Windows and part of the problem not fixed here is why show create table includes data directory when being run on Windows. However, this patch fixes the bug in mysqldump.c mysqldump.c: Added fixPaths function to convert \ to / in data directory and index directory entries only on Windows --- BitKeeper/etc/logging_ok | 1 + client/mysqldump.c | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index f64d9ca4042..8a0c32e37d1 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -190,6 +190,7 @@ ramil@mysql.com ranger@regul.home.lan rburnett@build.mysql.com reggie@bob.(none) +reggie@mdk10.(none) root@home.(none) root@mc04.(none) root@x3.internalnet diff --git a/client/mysqldump.c b/client/mysqldump.c index a53dc319b2e..5ac5efb5128 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -1080,6 +1080,27 @@ static void print_xml_row(FILE *xml_file, const char *row_name, check_io(xml_file); } + +/* fixPaths -- on Windows only, this function will iterate through the output + of show create table and change any \ characters that appear in the data directory + or index directory elements to be / + + RETURN + void +*/ +static void fixPaths(char *buf, int buflen) +{ +#ifdef __WIN__ + int i = 0; + for (i=0; i < buflen; i++) + { + if (buf[i] != '\\') continue; + if (i != 0 && buf[i-1] == '\\') continue; + if (i != (buflen-1) && buf[i+1] == '\\') continue; + buf[i] = '/';} +#endif +} + /* getStructure -- retrievs database structure, prints out corresponding CREATE statement and fills out insert_pat. @@ -1159,6 +1180,7 @@ static uint getTableStructure(char *table, char* db) tableRes=mysql_store_result(sock); row=mysql_fetch_row(tableRes); + fixPaths(row[1], strlen(row[1])); // this really only does something on Windows fprintf(sql_file, "%s;\n", row[1]); check_io(sql_file); mysql_free_result(tableRes); From 46613bafbe9ee1d25885c78a4960cfb23aa77f75 Mon Sep 17 00:00:00 2001 From: "ramil@mysql.com" <> Date: Tue, 15 Mar 2005 13:32:12 +0400 Subject: [PATCH 05/15] A fix (bug #8489: Strange auto_increment behaviour with HEAP table). --- heap/hp_create.c | 3 ++- include/heap.h | 2 +- mysql-test/r/heap.result | 42 ++++++++++++++++++++++++++++++++++++++++ mysql-test/t/heap.test | 29 +++++++++++++++++++++++++++ sql/ha_heap.cc | 12 ++++++++---- 5 files changed, 82 insertions(+), 6 deletions(-) diff --git a/heap/hp_create.c b/heap/hp_create.c index af32fefea1b..b1b132a16fb 100644 --- a/heap/hp_create.c +++ b/heap/hp_create.c @@ -137,6 +137,8 @@ int heap_create(const char *name, uint keys, HP_KEYDEF *keydef, keyinfo->write_key= hp_write_key; keyinfo->hash_buckets= 0; } + if ((keyinfo->flag & HA_AUTO_KEY) && create_info->with_auto_increment) + share->auto_key= i + 1; } share->min_records= min_records; share->max_records= max_records; @@ -147,7 +149,6 @@ int heap_create(const char *name, uint keys, HP_KEYDEF *keydef, share->keys= keys; share->max_key_length= max_length; share->changed= 0; - share->auto_key= create_info->auto_key; share->auto_key_type= create_info->auto_key_type; share->auto_increment= create_info->auto_increment; /* Must be allocated separately for rename to work */ diff --git a/include/heap.h b/include/heap.h index ac2b38d1f2d..51f7b0cfa6a 100644 --- a/include/heap.h +++ b/include/heap.h @@ -183,10 +183,10 @@ typedef struct st_heap_info typedef struct st_heap_create_info { - uint auto_key; uint auto_key_type; ulong max_table_size; ulonglong auto_increment; + my_bool with_auto_increment; } HP_CREATE_INFO; /* Prototypes for heap-functions */ diff --git a/mysql-test/r/heap.result b/mysql-test/r/heap.result index b1cd17b444c..29207a4ae98 100644 --- a/mysql-test/r/heap.result +++ b/mysql-test/r/heap.result @@ -249,3 +249,45 @@ a 3 2 drop table t1; +create table t1 (a bigint unsigned auto_increment primary key, b int, +key (b, a)) engine=heap; +insert t1 (b) values (1); +insert t1 (b) values (1); +insert t1 (b) values (1); +insert t1 (b) values (1); +insert t1 (b) values (1); +insert t1 (b) values (1); +insert t1 (b) values (1); +insert t1 (b) values (1); +select * from t1; +a b +1 1 +2 1 +3 1 +4 1 +5 1 +6 1 +7 1 +8 1 +drop table t1; +create table t1 (a int not null, b int not null auto_increment, +primary key(a, b), key(b)) engine=heap; +insert t1 (a) values (1); +insert t1 (a) values (1); +insert t1 (a) values (1); +insert t1 (a) values (1); +insert t1 (a) values (1); +insert t1 (a) values (1); +insert t1 (a) values (1); +insert t1 (a) values (1); +select * from t1; +a b +1 1 +1 2 +1 3 +1 4 +1 5 +1 6 +1 7 +1 8 +drop table t1; diff --git a/mysql-test/t/heap.test b/mysql-test/t/heap.test index bc0b28370ec..e082993a58e 100644 --- a/mysql-test/t/heap.test +++ b/mysql-test/t/heap.test @@ -195,3 +195,32 @@ delete from t1 where a is null; insert into t1 values ('2'), ('3'); select * from t1; drop table t1; + +# +# Bug #8489: Strange auto_increment behaviour +# + +create table t1 (a bigint unsigned auto_increment primary key, b int, + key (b, a)) engine=heap; +insert t1 (b) values (1); +insert t1 (b) values (1); +insert t1 (b) values (1); +insert t1 (b) values (1); +insert t1 (b) values (1); +insert t1 (b) values (1); +insert t1 (b) values (1); +insert t1 (b) values (1); +select * from t1; +drop table t1; +create table t1 (a int not null, b int not null auto_increment, + primary key(a, b), key(b)) engine=heap; +insert t1 (a) values (1); +insert t1 (a) values (1); +insert t1 (a) values (1); +insert t1 (a) values (1); +insert t1 (a) values (1); +insert t1 (a) values (1); +insert t1 (a) values (1); +insert t1 (a) values (1); +select * from t1; +drop table t1; diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc index 3c2249ce281..c483ab8fffa 100644 --- a/sql/ha_heap.cc +++ b/sql/ha_heap.cc @@ -446,6 +446,7 @@ int ha_heap::create(const char *name, TABLE *table_arg, HA_KEYSEG *seg; char buff[FN_REFLEN]; int error; + bool found_real_auto_increment= 0; for (key= parts= 0; key < table_arg->keys; key++) parts+= table_arg->key_info[key].key_parts; @@ -506,17 +507,20 @@ int ha_heap::create(const char *name, TABLE *table_arg, seg->null_bit= 0; seg->null_pos= 0; } + // We have to store field->key_type() as seg->type can differ from it if (field->flags & AUTO_INCREMENT_FLAG) - { - auto_key= key + 1; auto_key_type= field->key_type(); - } } } + if (table_arg->found_next_number_field) + { + keydef[table_arg->next_number_index].flag|= HA_AUTO_KEY; + found_real_auto_increment= table_arg->next_number_key_offset == 0; + } mem_per_row+= MY_ALIGN(table_arg->reclength + 1, sizeof(char*)); HP_CREATE_INFO hp_create_info; - hp_create_info.auto_key= auto_key; hp_create_info.auto_key_type= auto_key_type; + hp_create_info.with_auto_increment= found_real_auto_increment; hp_create_info.auto_increment= (create_info->auto_increment_value ? create_info->auto_increment_value - 1 : 0); hp_create_info.max_table_size=current_thd->variables.max_heap_table_size; From 04328c3dfa5de82abe7095d61d361d1c08b94a6f Mon Sep 17 00:00:00 2001 From: "ramil@mysql.com" <> Date: Tue, 15 Mar 2005 15:32:11 +0400 Subject: [PATCH 06/15] A fix (bug #8799: Killed filesorts can fail inited==RND assertion in ha_rnd_end). --- sql/filesort.cc | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/sql/filesort.cc b/sql/filesort.cc index c6af8cfc1b7..75b114fc140 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -452,8 +452,11 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, if (*killed) { DBUG_PRINT("info",("Sort killed by user")); - (void) file->extra(HA_EXTRA_NO_CACHE); - file->ha_rnd_end(); + if (!indexfile && !quick_select) + { + (void) file->extra(HA_EXTRA_NO_CACHE); + file->ha_rnd_end(); + } DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */ } if (error == 0) From 2bdd710d3ce18a39eb8db29ff9e77c35c65c2454 Mon Sep 17 00:00:00 2001 From: "msvensson@neptunus.(none)" <> Date: Tue, 15 Mar 2005 15:03:25 +0100 Subject: [PATCH 07/15] WL#2269 Enable query cache for NDB part 2 -This is mostly fixes for correct behaviour when using query cache + transactions + the thread that fetches commit count from NDB at regular intervals. The major fix is to add a list in thd_ndb, that keeps a list of NDB_SHARE's that were modified by transaction and then "clearing" them in ndbcluster_commit. --- mysql-test/r/ndb_cache2.result | 538 +++++++++++++++++++++++++++++---- mysql-test/t/ndb_cache2.test | 280 +++++++++++++++-- sql/ha_ndbcluster.cc | 302 ++++++++++++------ sql/ha_ndbcluster.h | 5 +- sql/set_var.cc | 8 +- 5 files changed, 958 insertions(+), 175 deletions(-) diff --git a/mysql-test/r/ndb_cache2.result b/mysql-test/r/ndb_cache2.result index 2815674a20a..2876002f864 100644 --- a/mysql-test/r/ndb_cache2.result +++ b/mysql-test/r/ndb_cache2.result @@ -1,14 +1,45 @@ -drop table if exists t1; +drop table if exists t1, t2, t3, t4, t5; set GLOBAL query_cache_type=on; set GLOBAL query_cache_size=1355776; -set GLOBAL ndb_cache_check_time=1; +set GLOBAL ndb_cache_check_time=100; reset query cache; flush status; -CREATE TABLE t1 ( pk int not null primary key, -a int, b int not null, c varchar(20)) ENGINE=ndbcluster; +CREATE TABLE t1 ( +pk int not null primary key, +a1 int, +b1 int not null, +c1 varchar(20) +) ENGINE=ndb; +CREATE TABLE t2 ( +pk int not null primary key, +a2 int, +b2 int not null +) ENGINE=ndb; +CREATE TABLE t3 ( +pk int not null primary key, +a3 int, +b3 int not null, +c3 int not null, +d3 varchar(20) +) ENGINE=ndb; +CREATE TABLE t4 ( +a4 int, +b4 int not null, +c4 char(20) +) ENGINE=ndbcluster; +CREATE TABLE t5 ( +pk int not null primary key, +a5 int, +b5 int not null, +c5 varchar(255) +) ENGINE=ndbcluster; insert into t1 value (1, 2, 3, 'First row'); +insert into t2 value (1, 2, 3); +insert into t3 value (1, 2, 3, 4, '3 - First row'); +insert into t4 value (2, 3, '4 - First row'); +insert into t5 value (1, 2, 3, '5 - First row'); select * from t1; -pk a b c +pk a1 b1 c1 1 2 3 First row show status like "Qcache_queries_in_cache"; Variable_name Value @@ -20,14 +51,14 @@ show status like "Qcache_hits"; Variable_name Value Qcache_hits 0 select * from t1; -pk a b c +pk a1 b1 c1 1 2 3 First row show status like "Qcache_hits"; Variable_name Value Qcache_hits 1 -update t1 set a=3 where pk=1; +update t1 set a1=3 where pk=1; select * from t1; -pk a b c +pk a1 b1 c1 1 3 3 First row show status like "Qcache_inserts"; Variable_name Value @@ -38,7 +69,7 @@ Qcache_hits 1 insert into t1 value (2, 7, 8, 'Second row'); insert into t1 value (4, 5, 6, 'Fourth row'); select * from t1 order by pk desc; -pk a b c +pk a1 b1 c1 4 5 6 Fourth row 2 7 8 Second row 1 3 3 First row @@ -49,15 +80,15 @@ show status like "Qcache_hits"; Variable_name Value Qcache_hits 1 select * from t1 order by pk desc; -pk a b c +pk a1 b1 c1 4 5 6 Fourth row 2 7 8 Second row 1 3 3 First row show status like "Qcache_hits"; Variable_name Value Qcache_hits 2 -select * from t1 where b=3; -pk a b c +select * from t1 where b1=3; +pk a1 b1 c1 1 3 3 First row show status like "Qcache_queries_in_cache"; Variable_name Value @@ -65,44 +96,44 @@ Qcache_queries_in_cache 2 show status like "Qcache_hits"; Variable_name Value Qcache_hits 2 -select * from t1 where b=3; -pk a b c +select * from t1 where b1=3; +pk a1 b1 c1 1 3 3 First row show status like "Qcache_hits"; Variable_name Value Qcache_hits 3 -delete from t1 where c='Fourth row'; +delete from t1 where c1='Fourth row'; show status like "Qcache_queries_in_cache"; Variable_name Value Qcache_queries_in_cache 0 -select * from t1 where b=3; -pk a b c +select * from t1 where b1=3; +pk a1 b1 c1 1 3 3 First row show status like "Qcache_hits"; Variable_name Value Qcache_hits 3 use test; select * from t1 order by pk desc; -pk a b c +pk a1 b1 c1 2 7 8 Second row 1 3 3 First row -select * from t1 where b=3; -pk a b c +select * from t1 where b1=3; +pk a1 b1 c1 1 3 3 First row show status like "Qcache_hits"; Variable_name Value Qcache_hits 4 -update t1 set a=4 where b=3; +update t1 set a1=4 where b1=3; use test; show status like "Qcache_queries_in_cache"; Variable_name Value Qcache_queries_in_cache 0 select * from t1 order by pk desc; -pk a b c +pk a1 b1 c1 2 7 8 Second row 1 4 3 First row select * from t1 order by pk desc; -pk a b c +pk a1 b1 c1 2 7 8 Second row 1 4 3 First row show status like "Qcache_inserts"; @@ -112,11 +143,11 @@ show status like "Qcache_hits"; Variable_name Value Qcache_hits 5 select * from t1 order by pk desc; -pk a b c +pk a1 b1 c1 2 7 8 Second row 1 4 3 First row select * from t1 order by pk desc; -pk a b c +pk a1 b1 c1 2 7 8 Second row 1 4 3 First row show status like "Qcache_queries_in_cache"; @@ -128,64 +159,463 @@ Qcache_inserts 7 show status like "Qcache_hits"; Variable_name Value Qcache_hits 7 +select * from t2; +pk a2 b2 +1 2 3 +select * from t3; +pk a3 b3 c3 d3 +1 2 3 4 3 - First row +select * from t4; +a4 b4 c4 +2 3 4 - First row +select * from t5; +pk a5 b5 c5 +1 2 3 5 - First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 5 +flush status; begin; -update t1 set a=5 where pk=1; +update t1 set a1=5 where pk=1; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 4 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 0 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1 order by pk desc; +pk a1 b1 c1 +2 7 8 Second row +1 4 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 5 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 1 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +commit; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 5 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 1 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1 order by pk desc; +pk a1 b1 c1 +2 7 8 Second row +1 5 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1 order by pk desc; +pk a1 b1 c1 +2 7 8 Second row +1 5 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 5 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +flush status; +begin; +update t1 set a1=6 where pk=1; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 4 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 0 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1 order by pk desc; +pk a1 b1 c1 +2 7 8 Second row +1 5 3 First row +select * from t1 order by pk desc; +pk a1 b1 c1 +2 7 8 Second row +1 5 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 5 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 1 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +select * from t1 order by pk desc; +pk a1 b1 c1 +2 7 8 Second row +1 6 3 First row +select * from t1 order by pk desc; +pk a1 b1 c1 +2 7 8 Second row +1 6 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 5 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 1 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +commit; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 5 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 1 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +select * from t1 order by pk desc; +pk a1 b1 c1 +2 7 8 Second row +1 6 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +select * from t1 order by pk desc; +pk a1 b1 c1 +2 7 8 Second row +1 6 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 5 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 2 +flush status; +begin; +insert into t1 set pk=5, a1=6, b1=3, c1="New row"; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 4 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 0 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1 where pk=5; +pk a1 b1 c1 +select * from t1 order by pk desc; +pk a1 b1 c1 +2 7 8 Second row +1 6 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 6 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1 where pk=5; +pk a1 b1 c1 +5 6 3 New row +select * from t1 where pk=5; +pk a1 b1 c1 +5 6 3 New row +select * from t1 order by pk desc; +pk a1 b1 c1 +5 6 3 New row +2 7 8 Second row +1 6 3 First row +select * from t1 order by pk desc; +pk a1 b1 c1 +5 6 3 New row +2 7 8 Second row +1 6 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 6 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +commit; +select * from t1 order by pk desc; +pk a1 b1 c1 +5 6 3 New row +2 7 8 Second row +1 6 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 5 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 3 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +flush status; +begin; +delete from t1 where pk=2; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 4 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 0 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1 where pk=2; +pk a1 b1 c1 +2 7 8 Second row +select * from t1 order by pk desc; +pk a1 b1 c1 +5 6 3 New row +2 7 8 Second row +1 6 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 6 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1 where pk=2; +pk a1 b1 c1 +select * from t1 order by pk desc; +pk a1 b1 c1 +5 6 3 New row +1 6 3 First row +select * from t1 order by pk desc; +pk a1 b1 c1 +5 6 3 New row +1 6 3 First row +select * from t1 where pk=2; +pk a1 b1 c1 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 6 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +commit; +select * from t1 order by pk desc; +pk a1 b1 c1 +5 6 3 New row +1 6 3 First row +select * from t1 where pk=2; +pk a1 b1 c1 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 6 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 4 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +flush status; +begin; +update t1 set a1=9 where pk=1; +update t2 set a2=9 where pk=1; +update t3 set a3=9 where pk=1; +update t4 set a4=9 where a4=2; +update t5 set a5=9 where pk=1; show status like "Qcache_queries_in_cache"; Variable_name Value Qcache_queries_in_cache 0 show status like "Qcache_inserts"; Variable_name Value -Qcache_inserts 7 +Qcache_inserts 0 show status like "Qcache_hits"; Variable_name Value -Qcache_hits 7 +Qcache_hits 0 select * from t1 order by pk desc; -pk a b c -2 7 8 Second row -1 4 3 First row +pk a1 b1 c1 +5 6 3 New row +1 6 3 First row +select * from t2; +pk a2 b2 +1 2 3 +select * from t3; +pk a3 b3 c3 d3 +1 2 3 4 3 - First row +select * from t4; +a4 b4 c4 +2 3 4 - First row +select * from t5; +pk a5 b5 c5 +1 2 3 5 - First row show status like "Qcache_queries_in_cache"; Variable_name Value -Qcache_queries_in_cache 1 +Qcache_queries_in_cache 5 show status like "Qcache_inserts"; Variable_name Value -Qcache_inserts 8 +Qcache_inserts 5 show status like "Qcache_hits"; Variable_name Value -Qcache_hits 7 +Qcache_hits 0 +select * from t1 order by pk desc; +pk a1 b1 c1 +5 6 3 New row +1 9 3 First row +select * from t1 order by pk desc; +pk a1 b1 c1 +5 6 3 New row +1 9 3 First row +select * from t2; +pk a2 b2 +1 9 3 +select * from t3; +pk a3 b3 c3 d3 +1 9 3 4 3 - First row +select * from t4; +a4 b4 c4 +9 3 4 - First row +select * from t5; +pk a5 b5 c5 +1 9 3 5 - First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 5 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 5 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 commit; +select * from t1 order by pk desc; +pk a1 b1 c1 +5 6 3 New row +1 9 3 First row +select * from t2; +pk a2 b2 +1 9 3 +select * from t3; +pk a3 b3 c3 d3 +1 9 3 4 3 - First row +select * from t4; +a4 b4 c4 +9 3 4 - First row +select * from t5; +pk a5 b5 c5 +1 9 3 5 - First row show status like "Qcache_queries_in_cache"; Variable_name Value -Qcache_queries_in_cache 1 +Qcache_queries_in_cache 5 show status like "Qcache_inserts"; Variable_name Value -Qcache_inserts 8 +Qcache_inserts 10 show status like "Qcache_hits"; Variable_name Value -Qcache_hits 7 +Qcache_hits 0 select * from t1 order by pk desc; -pk a b c -2 7 8 Second row -1 5 3 First row -show status like "Qcache_inserts"; -Variable_name Value -Qcache_inserts 9 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 7 -select * from t1 order by pk desc; -pk a b c -2 7 8 Second row -1 5 3 First row +pk a1 b1 c1 +5 6 3 New row +1 9 3 First row +select * from t2; +pk a2 b2 +1 9 3 +select * from t3; +pk a3 b3 c3 d3 +1 9 3 4 3 - First row +select * from t4; +a4 b4 c4 +9 3 4 - First row +select * from t5; +pk a5 b5 c5 +1 9 3 5 - First row show status like "Qcache_queries_in_cache"; Variable_name Value -Qcache_queries_in_cache 1 +Qcache_queries_in_cache 5 show status like "Qcache_inserts"; Variable_name Value -Qcache_inserts 9 +Qcache_inserts 10 show status like "Qcache_hits"; Variable_name Value -Qcache_hits 8 -drop table t1; +Qcache_hits 5 +select * from t1 order by pk desc; +pk a1 b1 c1 +5 6 3 New row +1 9 3 First row +select * from t2; +pk a2 b2 +1 9 3 +select * from t3; +pk a3 b3 c3 d3 +1 9 3 4 3 - First row +select * from t4; +a4 b4 c4 +9 3 4 - First row +select * from t5; +pk a5 b5 c5 +1 9 3 5 - First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 5 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 10 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 10 +select * from t1 order by pk desc; +pk a1 b1 c1 +5 6 3 New row +1 9 3 First row +select * from t2; +pk a2 b2 +1 9 3 +select * from t3; +pk a3 b3 c3 d3 +1 9 3 4 3 - First row +select * from t4; +a4 b4 c4 +9 3 4 - First row +select * from t5; +pk a5 b5 c5 +1 9 3 5 - First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 5 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 10 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 15 +drop table t1, t2, t3, t4, t5; show status like "Qcache_queries_in_cache"; Variable_name Value Qcache_queries_in_cache 0 diff --git a/mysql-test/t/ndb_cache2.test b/mysql-test/t/ndb_cache2.test index 173095e9f1f..7f960e5ef3a 100644 --- a/mysql-test/t/ndb_cache2.test +++ b/mysql-test/t/ndb_cache2.test @@ -2,7 +2,7 @@ -- source include/have_ndb.inc --disable_warnings -drop table if exists t1; +drop table if exists t1, t2, t3, t4, t5; --enable_warnings @@ -10,19 +10,47 @@ drop table if exists t1; set GLOBAL query_cache_type=on; set GLOBAL query_cache_size=1355776; # Turn on thread that will fetch commit count for open tables -set GLOBAL ndb_cache_check_time=1; +set GLOBAL ndb_cache_check_time=100; reset query cache; flush status; -# Wait for thread to wake up and start "working" -sleep 20; - -# Create test table in NDB -CREATE TABLE t1 ( pk int not null primary key, - a int, b int not null, c varchar(20)) ENGINE=ndbcluster; +# Create test tables in NDB +CREATE TABLE t1 ( + pk int not null primary key, + a1 int, + b1 int not null, + c1 varchar(20) +) ENGINE=ndb; +CREATE TABLE t2 ( + pk int not null primary key, + a2 int, + b2 int not null +) ENGINE=ndb; +CREATE TABLE t3 ( + pk int not null primary key, + a3 int, + b3 int not null, + c3 int not null, + d3 varchar(20) +) ENGINE=ndb; +CREATE TABLE t4 ( + a4 int, + b4 int not null, + c4 char(20) +) ENGINE=ndbcluster; +CREATE TABLE t5 ( + pk int not null primary key, + a5 int, + b5 int not null, + c5 varchar(255) +) ENGINE=ndbcluster; insert into t1 value (1, 2, 3, 'First row'); +insert into t2 value (1, 2, 3); +insert into t3 value (1, 2, 3, 4, '3 - First row'); +insert into t4 value (2, 3, '4 - First row'); +insert into t5 value (1, 2, 3, '5 - First row'); -# Perform one query which should be inerted in query cache +# Perform one query which should be inserted in query cache select * from t1; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; @@ -33,7 +61,7 @@ select * from t1; show status like "Qcache_hits"; # Update the table and make sure the correct data is returned -update t1 set a=3 where pk=1; +update t1 set a1=3 where pk=1; select * from t1; show status like "Qcache_inserts"; show status like "Qcache_hits"; @@ -48,18 +76,18 @@ select * from t1 order by pk desc; show status like "Qcache_hits"; # Perform a "new" query and make sure the query cache is not hit -select * from t1 where b=3; +select * from t1 where b1=3; show status like "Qcache_queries_in_cache"; show status like "Qcache_hits"; # Same query again... -select * from t1 where b=3; +select * from t1 where b1=3; show status like "Qcache_hits"; # Delete from the table -delete from t1 where c='Fourth row'; +delete from t1 where c1='Fourth row'; show status like "Qcache_queries_in_cache"; -select * from t1 where b=3; +select * from t1 where b1=3; show status like "Qcache_hits"; # Start another connection and check that the query cache is hit @@ -67,11 +95,11 @@ connect (con1,localhost,root,,); connection con1; use test; select * from t1 order by pk desc; -select * from t1 where b=3; +select * from t1 where b1=3; show status like "Qcache_hits"; -# Update the table and switch to other connection -update t1 set a=4 where b=3; +# Update the table and switch to other connection +update t1 set a1=4 where b1=3; connect (con2,localhost,root,,); connection con2; use test; @@ -87,10 +115,23 @@ show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; -# Use transactions and make sure the query cache is not updated until -# transaction is commited +# Load all tables into cache +select * from t2; +select * from t3; +select * from t4; +select * from t5; +show status like "Qcache_queries_in_cache"; + +##################################################################### +# Start transaction and perform update +# Switch to other transaction and check that update does not show up +# Switch back and commit transaction +# Switch to other transaction and check that update shows up +##################################################################### +connection con1; +flush status; begin; -update t1 set a=5 where pk=1; +update t1 set a1=5 where pk=1; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; @@ -101,8 +142,6 @@ show status like "Qcache_inserts"; show status like "Qcache_hits"; connection con1; commit; -# Sleep to let the query cache thread update commit count -sleep 10; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; @@ -116,8 +155,203 @@ show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; -drop table t1; +##################################################################### +# Start transaction and perform update +# Switch to other transaction and check that update does not show up +# Switch back, perform selects and commit transaction +# Switch to other transaction and check that update shows up +##################################################################### +connection con1; +flush status; +begin; +update t1 set a1=6 where pk=1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con2; +select * from t1 order by pk desc; +select * from t1 order by pk desc; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con1; +# The two queries below will not hit cache since transaction is ongoing +select * from t1 order by pk desc; +select * from t1 order by pk desc; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +commit; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con2; +select * from t1 order by pk desc; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con1; +select * from t1 order by pk desc; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +##################################################################### +# Start transaction and perform insert +# Switch to other transaction and check that insert does not show up +# Switch back, perform selects and commit transaction +# Switch to other transaction and check that update shows up +##################################################################### +connection con1; +flush status; +begin; +insert into t1 set pk=5, a1=6, b1=3, c1="New row"; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con2; +select * from t1 where pk=5; +select * from t1 order by pk desc; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con1; +# The below four queries will not be cached, trans is ongoing +select * from t1 where pk=5; +select * from t1 where pk=5; +select * from t1 order by pk desc; +select * from t1 order by pk desc; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +commit; + +connection con2; +select * from t1 order by pk desc; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +connection con1; + +##################################################################### +# Start transaction and perform delete +# Switch to other transaction and check that delete does not show up +# Switch back, perform selects and commit transaction +# Switch to other transaction and check that update shows up +##################################################################### +connection con1; +flush status; +begin; +delete from t1 where pk=2; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con2; +select * from t1 where pk=2; +select * from t1 order by pk desc; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con1; +# The below four queries will not be cached, trans is ongoing +select * from t1 where pk=2; +select * from t1 order by pk desc; +select * from t1 order by pk desc; +select * from t1 where pk=2; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +commit; + +connection con2; +select * from t1 order by pk desc; +select * from t1 where pk=2; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +connection con1; + +##################################################################### +# Start a transaction which updates all tables +# Switch to other transaction and check updates does not show up +# Switch back, perform selects and commit transaction +# Switch to other transaction and check that update shows up +##################################################################### +flush status; +begin; +update t1 set a1=9 where pk=1; +update t2 set a2=9 where pk=1; +update t3 set a3=9 where pk=1; +update t4 set a4=9 where a4=2; +update t5 set a5=9 where pk=1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con2; +select * from t1 order by pk desc; +select * from t2; +select * from t3; +select * from t4; +select * from t5; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con1; +# The below five queries will not be cached, trans is ongoing +select * from t1 order by pk desc; +select * from t1 order by pk desc; +select * from t2; +select * from t3; +select * from t4; +select * from t5; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +commit; + +connection con2; +select * from t1 order by pk desc; +select * from t2; +select * from t3; +select * from t4; +select * from t5; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +connection con1; +select * from t1 order by pk desc; +select * from t2; +select * from t3; +select * from t4; +select * from t5; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +select * from t1 order by pk desc; +select * from t2; +select * from t3; +select * from t4; +select * from t5; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +connection con2; +select * from t1 order by pk desc; +select * from t2; +select * from t3; +select * from t4; +select * from t5; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +drop table t1, t2, t3, t4, t5; + +# There should be no queries in cache, when tables have been dropped show status like "Qcache_queries_in_cache"; SET GLOBAL query_cache_size=0; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index f04b024d842..db4507916b1 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -286,7 +286,8 @@ Thd_ndb::~Thd_ndb() { if (ndb) delete ndb; - ndb= 0; + ndb= NULL; + changed_tables.empty(); } inline @@ -1954,7 +1955,7 @@ int ha_ndbcluster::write_row(byte *record) if (peek_res != HA_ERR_KEY_NOT_FOUND) DBUG_RETURN(peek_res); } - + statistic_increment(thd->status_var.ha_write_count, &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); @@ -2003,6 +2004,8 @@ int ha_ndbcluster::write_row(byte *record) } } + m_rows_changed++; + /* Execute write operation NOTE When doing inserts with many values in @@ -2196,6 +2199,8 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) } } + m_rows_changed++; + // Set non-key attribute(s) for (i= 0; i < table->s->fields; i++) { @@ -2278,7 +2283,9 @@ int ha_ndbcluster::delete_row(const byte *record) return res; } } - + + m_rows_changed++; + // Execute delete operation if (execute_no_commit(this,trans) != 0) { no_uncommitted_rows_execute_failure(); @@ -3181,14 +3188,14 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) Check that this handler instance has a connection set up to the Ndb object of thd */ - if (check_ndb_connection()) + if (check_ndb_connection(thd)) DBUG_RETURN(1); - + Thd_ndb *thd_ndb= get_thd_ndb(thd); Ndb *ndb= thd_ndb->ndb; - DBUG_PRINT("enter", ("transaction.thd_ndb->lock_count: %d", - thd_ndb->lock_count)); + DBUG_PRINT("enter", ("thd: %x, thd_ndb: %x, thd_ndb->lock_count: %d", + thd, thd_ndb, thd_ndb->lock_count)); if (lock_type != F_UNLCK) { @@ -3196,7 +3203,6 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) if (!thd_ndb->lock_count++) { PRINT_OPTION_FLAGS(thd); - if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN | OPTION_TABLE_LOCK))) { // Autocommit transaction @@ -3264,9 +3270,10 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) m_active_trans= thd_ndb->all ? thd_ndb->all : thd_ndb->stmt; DBUG_ASSERT(m_active_trans); // Start of transaction + m_rows_changed= 0; m_retrieve_all_fields= FALSE; m_retrieve_primary_key= FALSE; - m_ops_pending= 0; + m_ops_pending= 0; { NDBDICT *dict= ndb->getDictionary(); const NDBTAB *tab; @@ -3278,10 +3285,28 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) m_table_info= tab_info; } no_uncommitted_rows_init(thd); - } - else + } + else { DBUG_PRINT("info", ("lock_type == F_UNLCK")); + + if (ndb_cache_check_time && m_rows_changed) + { + DBUG_PRINT("info", ("Rows has changed and util thread is running")); + if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) + { + DBUG_PRINT("info", ("Add share to list of tables to be invalidated")); + /* NOTE push_back allocates memory using transactions mem_root! */ + thd_ndb->changed_tables.push_back(m_share, &thd->transaction.mem_root); + } + + pthread_mutex_lock(&m_share->mutex); + DBUG_PRINT("info", ("Invalidating commit_count")); + m_share->commit_count= 0; + m_share->commit_count_lock++; + pthread_mutex_unlock(&m_share->mutex); + } + if (!--thd_ndb->lock_count) { DBUG_PRINT("trans", ("Last external_lock")); @@ -3301,6 +3326,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) } m_table= NULL; m_table_info= NULL; + /* This is the place to make sure this handler instance no longer are connected to the active transaction. @@ -3374,7 +3400,7 @@ int ha_ndbcluster::start_stmt(THD *thd) /* - Commit a transaction started in NDB + Commit a transaction started in NDB */ int ndbcluster_commit(THD *thd, bool all) @@ -3386,7 +3412,7 @@ int ndbcluster_commit(THD *thd, bool all) DBUG_ENTER("ndbcluster_commit"); DBUG_PRINT("transaction",("%s", - trans == thd_ndb->stmt ? + trans == thd_ndb->stmt ? "stmt" : "all")); DBUG_ASSERT(ndb && trans); @@ -3394,18 +3420,31 @@ int ndbcluster_commit(THD *thd, bool all) { const NdbError err= trans->getNdbError(); const NdbOperation *error_op= trans->getNdbErrorOperation(); - ERR_PRINT(err); + ERR_PRINT(err); res= ndb_to_mysql_error(&err); - if (res != -1) + if (res != -1) ndbcluster_print_error(res, error_op); } ndb->closeTransaction(trans); - + if(all) thd_ndb->all= NULL; else thd_ndb->stmt= NULL; - + + /* Clear commit_count for tables changed by transaction */ + NDB_SHARE* share; + List_iterator_fast it(thd_ndb->changed_tables); + while ((share= it++)) + { + pthread_mutex_lock(&share->mutex); + DBUG_PRINT("info", ("Invalidate commit_count for %s, share->commit_count: %d ", share->table_name, share->commit_count)); + share->commit_count= 0; + share->commit_count_lock++; + pthread_mutex_unlock(&share->mutex); + } + thd_ndb->changed_tables.empty(); + DBUG_RETURN(res); } @@ -3443,6 +3482,9 @@ int ndbcluster_rollback(THD *thd, bool all) else thd_ndb->stmt= NULL; + /* Clear list of tables changed by transaction */ + thd_ndb->changed_tables.empty(); + DBUG_RETURN(res); } @@ -4135,6 +4177,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_rows_to_insert(1), m_rows_inserted(0), m_bulk_insert_rows(1024), + m_rows_changed(0), m_bulk_insert_not_flushed(FALSE), m_ops_pending(0), m_skip_auto_increment(TRUE), @@ -4147,9 +4190,9 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_autoincrement_prefetch(32), m_transaction_on(TRUE), m_multi_cursor(NULL) -{ +{ int i; - + DBUG_ENTER("ha_ndbcluster"); m_tabname[0]= '\0'; @@ -4309,9 +4352,8 @@ Ndb* check_ndb_in_thd(THD* thd) -int ha_ndbcluster::check_ndb_connection() +int ha_ndbcluster::check_ndb_connection(THD* thd) { - THD* thd= current_thd; Ndb *ndb; DBUG_ENTER("check_ndb_connection"); @@ -4385,33 +4427,31 @@ int ndbcluster_discover(THD* thd, const char *db, const char *name, /* Check if a table exists in NDB - + */ int ndbcluster_table_exists(THD* thd, const char *db, const char *name) { - uint len; - const void* data; const NDBTAB* tab; Ndb* ndb; DBUG_ENTER("ndbcluster_table_exists"); - DBUG_PRINT("enter", ("db: %s, name: %s", db, name)); + DBUG_PRINT("enter", ("db: %s, name: %s", db, name)); if (!(ndb= check_ndb_in_thd(thd))) - DBUG_RETURN(HA_ERR_NO_CONNECTION); + DBUG_RETURN(HA_ERR_NO_CONNECTION); ndb->setDatabaseName(db); NDBDICT* dict= ndb->getDictionary(); dict->set_local_table_data_size(sizeof(Ndb_table_local_info)); dict->invalidateTable(name); if (!(tab= dict->getTable(name))) - { + { const NdbError err= dict->getNdbError(); if (err.code == 709) DBUG_RETURN(0); ERR_RETURN(err); } - + DBUG_PRINT("info", ("Found table %s", tab->getName())); DBUG_RETURN(1); } @@ -4929,38 +4969,65 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname, { DBUG_ENTER("ndb_get_commitcount"); + char name[FN_REFLEN]; + NDB_SHARE *share; + (void)strxnmov(name, FN_REFLEN, "./",dbname,"/",tabname,NullS); + DBUG_PRINT("enter", ("name: %s", name)); + pthread_mutex_lock(&ndbcluster_mutex); + if (!(share=(NDB_SHARE*) hash_search(&ndbcluster_open_tables, + (byte*) name, + strlen(name)))) + { + pthread_mutex_unlock(&ndbcluster_mutex); + DBUG_PRINT("info", ("Table %s not found in ndbcluster_open_tables", + name)); + DBUG_RETURN(1); + } + share->use_count++; + pthread_mutex_unlock(&ndbcluster_mutex); + + pthread_mutex_lock(&share->mutex); if (ndb_cache_check_time > 0) { - /* Use cached commit_count from share */ - char name[FN_REFLEN]; - NDB_SHARE *share; - (void)strxnmov(name, FN_REFLEN, - "./",dbname,"/",tabname,NullS); - DBUG_PRINT("info", ("name: %s", name)); - pthread_mutex_lock(&ndbcluster_mutex); - if (!(share=(NDB_SHARE*) hash_search(&ndbcluster_open_tables, - (byte*) name, - strlen(name)))) + if (share->commit_count != 0) { - pthread_mutex_unlock(&ndbcluster_mutex); - DBUG_RETURN(1); + *commit_count= share->commit_count; + DBUG_PRINT("info", ("Getting commit_count: %llu from share", + share->commit_count)); + pthread_mutex_unlock(&share->mutex); + free_share(share); + DBUG_RETURN(0); } - *commit_count= share->commit_count; - DBUG_PRINT("info", ("commit_count: %d", *commit_count)); - pthread_mutex_unlock(&ndbcluster_mutex); - DBUG_RETURN(0); } - - /* Get commit_count from NDB */ + DBUG_PRINT("info", ("Get commit_count from NDB")); Ndb *ndb; if (!(ndb= check_ndb_in_thd(thd))) DBUG_RETURN(1); ndb->setDatabaseName(dbname); + uint lock= share->commit_count_lock; + pthread_mutex_unlock(&share->mutex); struct Ndb_statistics stat; if (ndb_get_table_statistics(ndb, tabname, &stat)) + { + free_share(share); DBUG_RETURN(1); - *commit_count= stat.commit_count; + } + + pthread_mutex_lock(&share->mutex); + if(share->commit_count_lock == lock) + { + DBUG_PRINT("info", ("Setting commit_count to %llu", stat.commit_count)); + share->commit_count= stat.commit_count; + *commit_count= stat.commit_count; + } + else + { + DBUG_PRINT("info", ("Discarding commit_count, comit_count_lock changed")); + *commit_count= 0; + } + pthread_mutex_unlock(&share->mutex); + free_share(share); DBUG_RETURN(0); } @@ -5007,27 +5074,37 @@ ndbcluster_cache_retrieval_allowed(THD *thd, char *dbname= full_name; char *tabname= dbname+strlen(dbname)+1; - DBUG_PRINT("enter",("dbname=%s, tabname=%s, autocommit=%d", - dbname, tabname, is_autocommit)); + DBUG_PRINT("enter", ("dbname: %s, tabname: %s, is_autocommit: %d", + dbname, tabname, is_autocommit)); if (!is_autocommit) + { + DBUG_PRINT("exit", ("No, don't use cache in transaction")); DBUG_RETURN(FALSE); + } if (ndb_get_commitcount(thd, dbname, tabname, &commit_count)) { - *engine_data+= 1; /* invalidate */ + *engine_data= 0; /* invalidate */ + DBUG_PRINT("exit", ("No, could not retrieve commit_count")); DBUG_RETURN(FALSE); } - DBUG_PRINT("info", ("*engine_data=%llu, commit_count=%llu", + DBUG_PRINT("info", ("*engine_data: %llu, commit_count: %llu", *engine_data, commit_count)); - if (*engine_data != commit_count) + if (commit_count == 0) + { + *engine_data= 0; /* invalidate */ + DBUG_PRINT("exit", ("No, local commit has been performed")); + DBUG_RETURN(FALSE); + } + else if (*engine_data != commit_count) { *engine_data= commit_count; /* invalidate */ - DBUG_PRINT("exit",("Do not use cache, commit_count has changed")); - DBUG_RETURN(FALSE); - } + DBUG_PRINT("exit", ("No, commit_count has changed")); + DBUG_RETURN(FALSE); + } - DBUG_PRINT("exit",("OK to use cache, *engine_data=%llu",*engine_data)); + DBUG_PRINT("exit", ("OK to use cache, engine_data: %llu", *engine_data)); DBUG_RETURN(TRUE); } @@ -5063,22 +5140,27 @@ ha_ndbcluster::register_query_cache_table(THD *thd, DBUG_ENTER("ha_ndbcluster::register_query_cache_table"); bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); - DBUG_PRINT("enter",("dbname=%s, tabname=%s, is_autocommit=%d", - m_dbname,m_tabname,is_autocommit)); + + DBUG_PRINT("enter",("dbname: %s, tabname: %s, is_autocommit: %d", + m_dbname, m_tabname, is_autocommit)); + if (!is_autocommit) + { + DBUG_PRINT("exit", ("Can't register table during transaction")) DBUG_RETURN(FALSE); + } Uint64 commit_count; if (ndb_get_commitcount(thd, m_dbname, m_tabname, &commit_count)) { *engine_data= 0; - DBUG_PRINT("error", ("Could not get commitcount")) + DBUG_PRINT("exit", ("Error, could not get commitcount")) DBUG_RETURN(FALSE); } *engine_data= commit_count; *engine_callback= ndbcluster_cache_retrieval_allowed; - DBUG_PRINT("exit",("*engine_data=%llu", *engine_data)); - DBUG_RETURN(TRUE); + DBUG_PRINT("exit", ("commit_count: %llu", commit_count)); + DBUG_RETURN(commit_count > 0); } @@ -5121,14 +5203,21 @@ static NDB_SHARE* get_share(const char *table_name) thr_lock_init(&share->lock); pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST); share->commit_count= 0; + share->commit_count_lock= 0; + } + else + { + DBUG_PRINT("error", ("Failed to alloc share")); + pthread_mutex_unlock(&ndbcluster_mutex); + return 0; } } - DBUG_PRINT("share", - ("table_name: %s, length: %d, use_count: %d, commit_count: %d", - share->table_name, share->table_name_length, share->use_count, - share->commit_count)); - share->use_count++; + + DBUG_PRINT("share", + ("table_name: %s, length: %d, use_count: %d, commit_count: %d", + share->table_name, share->table_name_length, share->use_count, + share->commit_count)); pthread_mutex_unlock(&ndbcluster_mutex); return share; } @@ -5139,7 +5228,7 @@ static void free_share(NDB_SHARE *share) pthread_mutex_lock(&ndbcluster_mutex); if (!--share->use_count) { - hash_delete(&ndbcluster_open_tables, (byte*) share); + hash_delete(&ndbcluster_open_tables, (byte*) share); thr_lock_delete(&share->lock); pthread_mutex_destroy(&share->mutex); my_free((gptr) share, MYF(0)); @@ -5283,6 +5372,7 @@ ndb_get_table_statistics(Ndb* ndb, const char * table, if (check == -1) break; + Uint32 count= 0; Uint64 sum_rows= 0; Uint64 sum_commits= 0; Uint64 sum_row_size= 0; @@ -5294,6 +5384,7 @@ ndb_get_table_statistics(Ndb* ndb, const char * table, if (sum_row_size < size) sum_row_size= size; sum_mem+= mem; + count++; } if (check == -1) @@ -5308,8 +5399,11 @@ ndb_get_table_statistics(Ndb* ndb, const char * table, ndbstat->row_size= sum_row_size; ndbstat->fragment_memory= sum_mem; - DBUG_PRINT("exit", ("records: %u commits: %u row_size: %d mem: %d", - sum_rows, sum_commits, sum_row_size, sum_mem)); + DBUG_PRINT("exit", ("records: %llu commits: %llu " + "row_size: %llu mem: %llu count: %u", + sum_rows, sum_commits, sum_row_size, + sum_mem, count)); + DBUG_RETURN(0); } while(0); @@ -5739,6 +5833,7 @@ extern "C" pthread_handler_decl(ndb_util_thread_func, arg __attribute__((unused))) { THD *thd; /* needs to be first for thread_stack */ + Ndb* ndb; int error= 0; struct timespec abstime; @@ -5748,12 +5843,13 @@ extern "C" pthread_handler_decl(ndb_util_thread_func, thd= new THD; /* note that contructor of THD uses DBUG_ */ THD_CHECK_SENTRY(thd); + ndb= new Ndb(g_ndb_cluster_connection, ""); pthread_detach_this_thread(); ndb_util_thread= pthread_self(); thd->thread_stack= (char*)&thd; /* remember where our stack is */ - if (thd->store_globals()) + if (thd->store_globals() && (ndb->init() != -1)) { thd->cleanup(); delete thd; @@ -5779,22 +5875,11 @@ extern "C" pthread_handler_decl(ndb_util_thread_func, if (ndb_cache_check_time == 0) { - /* Wake up in 10 seconds to check if value has changed */ - set_timespec(abstime, 10); + /* Wake up in 1 second to check if value has changed */ + set_timespec(abstime, 1); continue; } - /* Set new time to wake up */ - struct timeval tv; - gettimeofday(&tv,0); - abstime.tv_sec= tv.tv_sec + (ndb_cache_check_time / 1000); - abstime.tv_nsec= tv.tv_usec * 1000 + (ndb_cache_check_time % 1000); - if (abstime.tv_nsec >= 1000000000) - { - abstime.tv_sec += 1; - abstime.tv_nsec -= 1000000000; - } - /* Lock mutex and fill list with pointers to all open tables */ NDB_SHARE *share; pthread_mutex_lock(&ndbcluster_mutex); @@ -5814,7 +5899,7 @@ extern "C" pthread_handler_decl(ndb_util_thread_func, /* Iterate through the open files list */ List_iterator_fast it(util_open_tables); - while (share= it++) + while ((share= it++)) { /* Split tab- and dbname */ char buf[FN_REFLEN]; @@ -5825,26 +5910,37 @@ extern "C" pthread_handler_decl(ndb_util_thread_func, buf[length-1]= 0; db= buf+dirname_length(buf); DBUG_PRINT("ndb_util_thread", - ("Fetching commit count for: %s, db: %s, tab: %s", - share->table_name, db, tabname)); + ("Fetching commit count for: %s", + share->table_name)); /* Contact NDB to get commit count for table */ - g_ndb->setDatabaseName(db); - struct Ndb_statistics stat;; - if(ndb_get_table_statistics(g_ndb, tabname, &stat) == 0) + ndb->setDatabaseName(db); + struct Ndb_statistics stat; + + uint lock; + pthread_mutex_lock(&share->mutex); + lock= share->commit_count_lock; + pthread_mutex_unlock(&share->mutex); + + if(ndb_get_table_statistics(ndb, tabname, &stat) == 0) { DBUG_PRINT("ndb_util_thread", - ("Table: %s, rows: %llu, commit_count: %llu", - share->table_name, stat.row_count, stat.commit_count)); - share->commit_count= stat.commit_count; + ("Table: %s, commit_count: %llu, rows: %llu", + share->table_name, stat.commit_count, stat.row_count)); } else { DBUG_PRINT("ndb_util_thread", ("Error: Could not get commit count for table %s", share->table_name)); - share->commit_count++; /* Invalidate */ + stat.commit_count= 0; } + + pthread_mutex_lock(&share->mutex); + if (share->commit_count_lock == lock) + share->commit_count= stat.commit_count; + pthread_mutex_unlock(&share->mutex); + /* Decrease the use count and possibly free share */ free_share(share); } @@ -5852,6 +5948,26 @@ extern "C" pthread_handler_decl(ndb_util_thread_func, /* Clear the list of open tables */ util_open_tables.empty(); + /* Calculate new time to wake up */ + int secs= 0; + int msecs= ndb_cache_check_time; + + struct timeval tick_time; + gettimeofday(&tick_time, 0); + abstime.tv_sec= tick_time.tv_sec; + abstime.tv_nsec= tick_time.tv_usec * 1000; + + if(msecs >= 1000){ + secs= msecs / 1000; + msecs= msecs % 1000; + } + + abstime.tv_sec+= secs; + abstime.tv_nsec+= msecs * 1000000; + if (abstime.tv_nsec >= 1000000000) { + abstime.tv_sec+= 1; + abstime.tv_nsec-= 1000000000; + } } thd->cleanup(); diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 10ee568df69..a88a05b3543 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -60,6 +60,7 @@ typedef struct st_ndbcluster_share { pthread_mutex_t mutex; char *table_name; uint table_name_length,use_count; + uint commit_count_lock; ulonglong commit_count; } NDB_SHARE; @@ -77,6 +78,7 @@ class Thd_ndb { NdbTransaction *all; NdbTransaction *stmt; int error; + List changed_tables; }; class ha_ndbcluster: public handler @@ -226,7 +228,7 @@ private: char *update_table_comment(const char * comment); private: - int check_ndb_connection(); + int check_ndb_connection(THD* thd= current_thd); NdbTransaction *m_active_trans; NdbScanOperation *m_active_cursor; @@ -250,6 +252,7 @@ private: ha_rows m_rows_to_insert; ha_rows m_rows_inserted; ha_rows m_bulk_insert_rows; + ha_rows m_rows_changed; bool m_bulk_insert_not_flushed; ha_rows m_ops_pending; bool m_skip_auto_increment; diff --git a/sql/set_var.cc b/sql/set_var.cc index cdee9e6d4d1..6fc5fe68484 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -404,7 +404,7 @@ sys_var_long_ptr sys_innodb_thread_concurrency("innodb_thread_concurrency", #ifdef HAVE_NDBCLUSTER_DB /* ndb thread specific variable settings */ -sys_var_thd_ulong +sys_var_thd_ulong sys_ndb_autoincrement_prefetch_sz("ndb_autoincrement_prefetch_sz", &SV::ndb_autoincrement_prefetch_sz); sys_var_thd_bool @@ -413,7 +413,8 @@ sys_var_thd_bool sys_ndb_use_exact_count("ndb_use_exact_count", &SV::ndb_use_exact_count); sys_var_thd_bool sys_ndb_use_transactions("ndb_use_transactions", &SV::ndb_use_transactions); -sys_var_long_ptr sys_ndb_cache_check_time("ndb_cache_check_time", &ndb_cache_check_time); +sys_var_long_ptr +sys_ndb_cache_check_time("ndb_cache_check_time", &ndb_cache_check_time); #endif /* Time/date/datetime formats */ @@ -686,10 +687,10 @@ sys_var *sys_variables[]= #endif #ifdef HAVE_NDBCLUSTER_DB &sys_ndb_autoincrement_prefetch_sz, + &sys_ndb_cache_check_time, &sys_ndb_force_send, &sys_ndb_use_exact_count, &sys_ndb_use_transactions, - &sys_ndb_cache_check_time, #endif &sys_unique_checks, &sys_updatable_views_with_limit, @@ -1276,7 +1277,6 @@ static int check_max_delayed_threads(THD *thd, set_var *var) return 0; } - static void fix_max_connections(THD *thd, enum_var_type type) { #ifndef EMBEDDED_LIBRARY From 4ee9cc5a98d212ef40cca80aee2f0b6420becc88 Mon Sep 17 00:00:00 2001 From: "ramil@mysql.com" <> Date: Tue, 15 Mar 2005 19:43:32 +0400 Subject: [PATCH 08/15] after merge fix --- sql/ha_heap.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc index 28a7b5529c9..c8d0e5c1c18 100644 --- a/sql/ha_heap.cc +++ b/sql/ha_heap.cc @@ -531,8 +531,8 @@ int ha_heap::create(const char *name, TABLE *table_arg, mem_per_row); if (table_arg->found_next_number_field) { - keydef[table_arg->next_number_index].flag|= HA_AUTO_KEY; - found_real_auto_increment= table_arg->next_number_key_offset == 0; + keydef[share->next_number_index].flag|= HA_AUTO_KEY; + found_real_auto_increment= share->next_number_key_offset == 0; } HP_CREATE_INFO hp_create_info; hp_create_info.auto_key_type= auto_key_type; From 10c06f257a11429e790c4b2e101814b3af4d1040 Mon Sep 17 00:00:00 2001 From: "serg@serg.mylan" <> Date: Tue, 15 Mar 2005 18:31:56 +0100 Subject: [PATCH 09/15] include/my_global.h define _XOPEN_SOURCE=500 for solaris include/my_sys.h remove a cast --- include/my_global.h | 15 +++++++++++++++ include/my_sys.h | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/include/my_global.h b/include/my_global.h index e9470ac48aa..e78ec21523d 100644 --- a/include/my_global.h +++ b/include/my_global.h @@ -120,6 +120,21 @@ #define __STDC_EXT__ 1 /* To get large file support on hpux */ #endif +/* + Solaris include file refers to X/Open document + + System Interfaces and Headers, Issue 5 + + saying we should define _XOPEN_SOURCE=500 to get POSIX.1c prototypes + but apparently other systems (namely FreeBSD) don't agree. + Furthermore X/Open has since 2004 "System Interfaces, Issue 6" + that dictates _XOPEN_SOURCE=600, but Solaris checks for 500. + So, let's define 500 for solaris only. +*/ +#ifdef __sun__ +#define _XOPEN_SOURCE 500 +#endif + #if defined(THREAD) && !defined(__WIN__) && !defined(OS2) #ifndef _POSIX_PTHREAD_SEMANTICS #define _POSIX_PTHREAD_SEMANTICS /* We want posix threads */ diff --git a/include/my_sys.h b/include/my_sys.h index ce785b58da4..afd2803b75d 100644 --- a/include/my_sys.h +++ b/include/my_sys.h @@ -812,7 +812,7 @@ my_bool my_gethwaddr(uchar *to); /* qnx ? */ #define my_getpagesize() 8192 #endif -#define my_munmap(a,b) munmap((char*)(a),(b)) +#define my_munmap(a,b) munmap((a),(b)) #else /* not a complete set of mmap() flags, but only those that nesessary */ From 887f2a53fee044ad02e9592c4ad3583d2876848f Mon Sep 17 00:00:00 2001 From: "reggie@mdk10.(none)" <> Date: Tue, 15 Mar 2005 11:33:06 -0600 Subject: [PATCH 10/15] Bug #6660 mysqldump creates bad pathnames on Windows This is a modifiction of my previous patch after receiving feedback. This is a better way to fix the problem. With this patch, data directory and index directory will use only forward slashes (/) when on Windows. mysqldump.c: Removed fixPaths routine. Was improper fix for bug #6660 sql_show.cc: Changed append_directory to convert backslashes to foward slashes when on Windows. --- client/mysqldump.c | 21 --------------------- sql/sql_show.cc | 9 +++++++++ 2 files changed, 9 insertions(+), 21 deletions(-) diff --git a/client/mysqldump.c b/client/mysqldump.c index 5ac5efb5128..fa36ce0242d 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -1081,26 +1081,6 @@ static void print_xml_row(FILE *xml_file, const char *row_name, } -/* fixPaths -- on Windows only, this function will iterate through the output - of show create table and change any \ characters that appear in the data directory - or index directory elements to be / - - RETURN - void -*/ -static void fixPaths(char *buf, int buflen) -{ -#ifdef __WIN__ - int i = 0; - for (i=0; i < buflen; i++) - { - if (buf[i] != '\\') continue; - if (i != 0 && buf[i-1] == '\\') continue; - if (i != (buflen-1) && buf[i+1] == '\\') continue; - buf[i] = '/';} -#endif -} - /* getStructure -- retrievs database structure, prints out corresponding CREATE statement and fills out insert_pat. @@ -1180,7 +1160,6 @@ static uint getTableStructure(char *table, char* db) tableRes=mysql_store_result(sock); row=mysql_fetch_row(tableRes); - fixPaths(row[1], strlen(row[1])); // this really only does something on Windows fprintf(sql_file, "%s;\n", row[1]); check_io(sql_file); mysql_free_result(tableRes); diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 8d741b4dc67..a85a6f92d70 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -1227,7 +1227,16 @@ static void append_directory(THD *thd, String *packet, const char *dir_type, packet->append(' '); packet->append(dir_type); packet->append(" DIRECTORY='", 12); +#ifdef __WIN__ + char *winfilename = strdup(filename); + for (uint i=0; i < length; i++) + if (winfilename[i] == '\\') + winfilename[i] = '/'; + packet->append(winfilename, length); + free(winfilename); +#else packet->append(filename, length); +#endif packet->append('\''); } } From ed9c9732c98fe60822cf7004c1cd416fcd762c9c Mon Sep 17 00:00:00 2001 From: "serg@serg.mylan" <> Date: Tue, 15 Mar 2005 18:45:29 +0100 Subject: [PATCH 11/15] sun forte does not define __sun__, only __sun and sun (gcc defines all the three) --- include/my_global.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/my_global.h b/include/my_global.h index e78ec21523d..2601c53bb92 100644 --- a/include/my_global.h +++ b/include/my_global.h @@ -131,7 +131,7 @@ that dictates _XOPEN_SOURCE=600, but Solaris checks for 500. So, let's define 500 for solaris only. */ -#ifdef __sun__ +#ifdef __sun #define _XOPEN_SOURCE 500 #endif From 5749c537edab388b6d892515751a0b71fdc2c6ee Mon Sep 17 00:00:00 2001 From: "kent@mysql.com" <> Date: Tue, 15 Mar 2005 19:48:42 +0100 Subject: [PATCH 12/15] mysql-test-run.sh: Added feature to disable tests from a list in a file "disabled.def" Moved down the code that disables, so that --do-test and --start-from don't list the disabled tests not in range. disabled.def: List of test cases to temporarely disable --- mysql-test/mysql-test-run.sh | 20 ++++++++++++++------ mysql-test/t/disabled.def | 20 ++++++++++++++++++++ 2 files changed, 34 insertions(+), 6 deletions(-) create mode 100644 mysql-test/t/disabled.def diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh index afed2e2ac84..b69bac1ad0b 100644 --- a/mysql-test/mysql-test-run.sh +++ b/mysql-test/mysql-test-run.sh @@ -1506,12 +1506,6 @@ run_testcase () if [ -n "$RESULT_EXT" -a \( x$RECORD = x1 -o -f "$result_file$RESULT_EXT" \) ] ; then result_file="$result_file$RESULT_EXT" fi - if [ -f "$TESTDIR/$tname.disabled" ] - then - comment=`$CAT $TESTDIR/$tname.disabled`; - disable_test $tname "$comment" - return - fi if [ "$USE_MANAGER" = 1 ] ; then many_slaves=`$EXPR \( \( $tname : rpl_failsafe \) != 0 \) \| \( \( $tname : rpl_chain_temp_table \) != 0 \)` fi @@ -1541,6 +1535,20 @@ run_testcase () return fi + if [ -f "$TESTDIR/$tname.disabled" ] + then + comment=`$CAT $TESTDIR/$tname.disabled`; + disable_test $tname "$comment" + return + fi + comment=`$GREP "^$tname *: *" $TESTDIR/disabled.def`; + if [ -n "$comment" ] + then + comment=`echo $comment | sed 's/^[^:]*: *//'` + disable_test $tname "$comment" + return + fi + if [ "x$USE_EMBEDDED_SERVER" != "x1" ] ; then # Stop all slave threads, so that we don't have useless reconnection # attempts and error messages in case the slave and master servers restart. diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def new file mode 100644 index 00000000000..d2ab8158c51 --- /dev/null +++ b/mysql-test/t/disabled.def @@ -0,0 +1,20 @@ +############################################################################## +# +# List the test cases that are to be disabled temporarely. +# +# Separate the test case name and the comment with ':'. +# +# : Comment test +# +# Don't use any TAB characters for whitespace. +# +############################################################################## + +ndb_alter_table : NDB team needs to fix +ndb_autodiscover : NDB team needs to fix +ndb_autodiscover2 : NDB team needs to fix +ndb_cache_multi : NDB team needs to fix +ndb_cache_multi2 : NDB team needs to fix +ndb_multi : NDB team needs to fix +ndb_restore : NDB team needs to fix + From 28bf536411100596694d4380dc2e613653df9369 Mon Sep 17 00:00:00 2001 From: "petr@mysql.com" <> Date: Tue, 15 Mar 2005 22:51:13 +0300 Subject: [PATCH 13/15] IM setup patch. Enable IM instead of mysqld_safe in start/stop script. Alter RPM to include mysqlmanger binary and config files. --- support-files/mysql.server.sh | 59 +++++++++++++++++++---------------- support-files/mysql.spec.sh | 13 ++++++-- 2 files changed, 43 insertions(+), 29 deletions(-) diff --git a/support-files/mysql.server.sh b/support-files/mysql.server.sh index b361bac74d2..9e5c7ea6983 100644 --- a/support-files/mysql.server.sh +++ b/support-files/mysql.server.sh @@ -53,8 +53,10 @@ then basedir=@prefix@ bindir=@bindir@ datadir=@localstatedir@ + sbindir=@sbindir@ else bindir="$basedir/bin" + sbindir="$basedir/sbin" fi # @@ -79,11 +81,18 @@ case `echo "testing\c"`,`echo -n testing` in *) echo_n= echo_c='\c' ;; esac -parse_arguments() { +parse_server_arguments() { for arg do case "$arg" in --basedir=*) basedir=`echo "$arg" | sed -e 's/^[^=]*=//'` ;; --datadir=*) datadir=`echo "$arg" | sed -e 's/^[^=]*=//'` ;; + esac + done +} + +parse_manager_arguments() { + for arg do + case "$arg" in --pid-file=*) pid_file=`echo "$arg" | sed -e 's/^[^=]*=//'` ;; esac done @@ -104,7 +113,7 @@ wait_for_pid () { } # Get arguments from the my.cnf file, -# groups [mysqld] [mysql_server] and [mysql.server] +# the only group, which is read from now on is [mysqld] if test -x ./bin/my_print_defaults then print_defaults="./bin/my_print_defaults" @@ -153,14 +162,17 @@ then extra_args="-e $datadir/my.cnf" fi -parse_arguments `$print_defaults $extra_args mysqld server mysql_server mysql.server` +parse_server_arguments `$print_defaults $extra_args mysqld` + +# Look for the pidfile +parse_manager_arguments `$print_defaults $extra_args manager` # # Set pid file if not given # if test -z "$pid_file" then - pid_file=$datadir/`@HOSTNAME@`.pid + pid_file=$datadir/mysqlmanager-`@HOSTNAME@`.pid else case "$pid_file" in /* ) ;; @@ -168,6 +180,9 @@ else esac fi +user=@MYSQLD_USER@ +USER_OPTION="--user=$user" + # Safeguard (relative paths, core dumps..) cd $basedir @@ -175,21 +190,21 @@ case "$mode" in 'start') # Start daemon - if test -x $bindir/mysqld_safe + if test -x $sbindir/mysqlmanager then # Give extra arguments to mysqld with the my.cnf file. This script may # be overwritten at next upgrade. echo $echo_n "Starting MySQL" - $bindir/mysqld_safe --datadir=$datadir --pid-file=$pid_file >/dev/null 2>&1 & + $sbindir/mysqlmanager $USER_OPTION --pid-file=$pid_file >/dev/null 2>&1 & wait_for_pid - + # Make lock for RedHat / SuSE if test -w /var/lock/subsys then - touch /var/lock/subsys/mysql + touch /var/lock/subsys/mysqlmanager fi else - log_failure_msg "Can't execute $bindir/mysqld_safe" + log_failure_msg "Can't execute $sbindir/mysqlmanager" fi ;; @@ -198,19 +213,19 @@ case "$mode" in # root password. if test -s "$pid_file" then - mysqld_pid=`cat $pid_file` + mysqlmanager_pid=`cat $pid_file` echo $echo_n "Shutting down MySQL" - kill $mysqld_pid - # mysqld should remove the pid_file when it exits, so wait for it. + kill $mysqlmanager_pid + # mysqlmanager should remove the pid_file when it exits, so wait for it. wait_for_pid # delete lock for RedHat / SuSE - if test -f /var/lock/subsys/mysql + if test -f /var/lock/subsys/mysqlmanager then - rm -f /var/lock/subsys/mysql + rm -f /var/lock/subsys/mysqlmanager fi else - log_failure_msg "MySQL PID file could not be found!" + log_failure_msg "mysqlmanager PID file could not be found!" fi ;; @@ -219,21 +234,11 @@ case "$mode" in # running or not, start it again. $0 stop $0 start - ;; - - 'reload') - if test -s "$pid_file" ; then - mysqld_pid=`cat $pid_file` - kill -HUP $mysqld_pid && log_success_msg "Reloading service MySQL" - touch $pid_file - else - log_failure_msg "MySQL PID file could not be found!" - fi - ;; + ;; *) # usage - echo "Usage: $0 start|stop|restart|reload" + echo "Usage: $0 start|stop|restart" exit 1 ;; esac diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index a7ee5fa8a07..b062930041a 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -429,9 +429,11 @@ ln -s %{_sysconfdir}/init.d/mysql $RPM_BUILD_ROOT%{_sbindir}/rcmysql # (safe_mysqld will be gone in MySQL 4.1) ln -sf ./mysqld_safe $RBR%{_bindir}/safe_mysqld -# Touch the place where the my.cnf config file might be located +# Touch the place where the my.cnf config file and mysqlmanager.passwd +# (MySQL Instance Manager password file) might be located # Just to make sure it's in the file list and marked as a config file touch $RBR%{_sysconfdir}/my.cnf +touch $RBR%{_sysconfdir}/mysqlmanager.passwd %pre server # Shut down a previously installed server first @@ -551,6 +553,7 @@ fi %doc %attr(644, root, man) %{_mandir}/man1/replace.1* %ghost %config(noreplace,missingok) %{_sysconfdir}/my.cnf +%ghost %config(noreplace,missingok) %{_sysconfdir}/mysqlmanager.passwd %attr(755, root, root) %{_bindir}/my_print_defaults %attr(755, root, root) %{_bindir}/myisamchk @@ -579,6 +582,7 @@ fi %attr(755, root, root) %{_bindir}/safe_mysqld %attr(755, root, root) %{_sbindir}/mysqld +%attr(755, root, root) %{_sbindir}/mysqlmanager %attr(755, root, root) %{_sbindir}/rcmysql %attr(644, root, root) %{_libdir}/mysql/mysqld.sym @@ -690,9 +694,14 @@ fi # itself - note that they must be ordered by date (important when # merging BK trees) %changelog +* Sun Feb 20 2005 Petr Chardin + +- Install MySQL Instance Manager together with mysqld, toch mysqlmanager + password file + * Mon Feb 14 2005 Lenz Grimmer -* Fixed the compilation comments and moved them into the separate build sections +- Fixed the compilation comments and moved them into the separate build sections for Max and Standard * Mon Feb 7 2005 Tomas Ulin From 6c00af77a07194effdf5aa23196b4e47efb6e203 Mon Sep 17 00:00:00 2001 From: "reggie@mdk10.(none)" <> Date: Tue, 15 Mar 2005 16:24:37 -0600 Subject: [PATCH 14/15] Bug #6660 mysqldump creates bad pathnames on Windows sql_show.cc: changed strdup to thd->memdup per Serg's advice --- sql/sql_show.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/sql/sql_show.cc b/sql/sql_show.cc index a85a6f92d70..76ea72ef41c 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -1228,12 +1228,11 @@ static void append_directory(THD *thd, String *packet, const char *dir_type, packet->append(dir_type); packet->append(" DIRECTORY='", 12); #ifdef __WIN__ - char *winfilename = strdup(filename); + char *winfilename = thd->memdup(filename, length); for (uint i=0; i < length; i++) if (winfilename[i] == '\\') winfilename[i] = '/'; packet->append(winfilename, length); - free(winfilename); #else packet->append(filename, length); #endif From edf59e548058ede3dd8deaee71bd70d6c28667d7 Mon Sep 17 00:00:00 2001 From: "heikki@hundin.mysql.fi" <> Date: Wed, 16 Mar 2005 00:34:15 +0200 Subject: [PATCH 15/15] Many files: InnoDB true VARCHAR --- innobase/include/data0type.h | 21 ++- innobase/include/data0type.ic | 13 ++ innobase/include/que0que.h | 3 +- innobase/include/row0mysql.h | 102 ++++++---- innobase/include/row0mysql.ic | 146 -------------- innobase/row/row0ins.c | 4 + innobase/row/row0mysql.c | 252 ++++++++++++++++++++++--- innobase/row/row0sel.c | 131 +++++++------ innobase/trx/trx0trx.c | 2 +- sql/ha_innodb.cc | 346 +++++++++++++++++++++++----------- sql/ha_innodb.h | 21 ++- 11 files changed, 652 insertions(+), 389 deletions(-) diff --git a/innobase/include/data0type.h b/innobase/include/data0type.h index 174665ca1fa..b5120e22041 100644 --- a/innobase/include/data0type.h +++ b/innobase/include/data0type.h @@ -24,7 +24,11 @@ extern dtype_t* dtype_binary; /*-------------------------------------------*/ /* The 'MAIN TYPE' of a column */ #define DATA_VARCHAR 1 /* character varying of the - latin1_swedish_ci charset-collation */ + latin1_swedish_ci charset-collation; note + that the MySQL format for this, DATA_BINARY, + DATA_VARMYSQL, is also affected by whether the + 'precise type' contains + DATA_MYSQL_TRUE_VARCHAR */ #define DATA_CHAR 2 /* fixed length character of the latin1_swedish_ci charset-collation */ #define DATA_FIXBINARY 3 /* binary string of fixed length */ @@ -102,6 +106,8 @@ columns, and for them the precise type is usually not used at all. #define DATA_MYSQL_TYPE_MASK 255 /* AND with this mask to extract the MySQL type from the precise type */ +#define DATA_MYSQL_TRUE_VARCHAR 15 /* MySQL type code for the >= 5.0.3 + format true VARCHAR */ /* Precise data types for system columns and the length of those columns; NOTE: the values must run from 0 up in the order given! All codes must @@ -134,6 +140,10 @@ be less than 256 */ In earlier versions this was set for some BLOB columns. */ +#define DATA_LONG_TRUE_VARCHAR 4096 /* this is ORed to the precise data + type when the column is true VARCHAR where + MySQL uses 2 bytes to store the data len; + for shorter VARCHARs MySQL uses only 1 byte */ /*-------------------------------------------*/ /* This many bytes we need to store the type information affecting the @@ -144,6 +154,15 @@ SQL null*/ store the charset-collation number; one byte is left unused, though */ #define DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE 6 +/************************************************************************* +Gets the MySQL type code from a dtype. */ +UNIV_INLINE +ulint +dtype_get_mysql_type( +/*=================*/ + /* out: MySQL type code; this is NOT an InnoDB + type code! */ + dtype_t* type); /* in: type struct */ /************************************************************************* Determine how many bytes the first n characters of the given string occupy. If the string is shorter than n characters, returns the number of bytes diff --git a/innobase/include/data0type.ic b/innobase/include/data0type.ic index e63dde98974..bf04e1c9b27 100644 --- a/innobase/include/data0type.ic +++ b/innobase/include/data0type.ic @@ -32,6 +32,19 @@ dtype_get_charset_coll( return((prtype >> 16) & 0xFFUL); } +/************************************************************************* +Gets the MySQL type code from a dtype. */ +UNIV_INLINE +ulint +dtype_get_mysql_type( +/*=================*/ + /* out: MySQL type code; this is NOT an InnoDB + type code! */ + dtype_t* type) /* in: type struct */ +{ + return(type->prtype & 0xFFUL); +} + /************************************************************************* Sets the mbminlen and mbmaxlen members of a data type structure. */ UNIV_INLINE diff --git a/innobase/include/que0que.h b/innobase/include/que0que.h index 298ec494750..4113e52d425 100644 --- a/innobase/include/que0que.h +++ b/innobase/include/que0que.h @@ -359,7 +359,8 @@ struct que_thr_struct{ the control came */ ulint resource; /* resource usage of the query thread thus far */ - ulint lock_state; /* lock state of thread (table or row) */ + ulint lock_state; /* lock state of thread (table or + row) */ }; #define QUE_THR_MAGIC_N 8476583 diff --git a/innobase/include/row0mysql.h b/innobase/include/row0mysql.h index 48a9d9bc941..e44d689b88b 100644 --- a/innobase/include/row0mysql.h +++ b/innobase/include/row0mysql.h @@ -21,36 +21,6 @@ Created 9/17/2000 Heikki Tuuri typedef struct row_prebuilt_struct row_prebuilt_t; -/*********************************************************************** -Stores a variable-length field (like VARCHAR) length to dest, in the -MySQL format. */ -UNIV_INLINE -byte* -row_mysql_store_var_len( -/*====================*/ - /* out: dest + 2 */ - byte* dest, /* in: where to store */ - ulint len); /* in: length, must fit in two bytes */ -/*********************************************************************** -Reads a MySQL format variable-length field (like VARCHAR) length and -returns pointer to the field data. */ -UNIV_INLINE -byte* -row_mysql_read_var_ref( -/*===================*/ - /* out: field + 2 */ - ulint* len, /* out: variable-length field length */ - byte* field); /* in: field */ -/*********************************************************************** -Reads a MySQL format variable-length field (like VARCHAR) length and -returns pointer to the field data. */ - -byte* -row_mysql_read_var_ref_noninline( -/*=============================*/ - /* out: field + 2 */ - ulint* len, /* out: variable-length field length */ - byte* field); /* in: field */ /*********************************************************************** Frees the blob heap in prebuilt when no longer needed. */ @@ -60,6 +30,30 @@ row_mysql_prebuilt_free_blob_heap( row_prebuilt_t* prebuilt); /* in: prebuilt struct of a ha_innobase:: table handle */ /*********************************************************************** +Stores a >= 5.0.3 format true VARCHAR length to dest, in the MySQL row +format. */ + +byte* +row_mysql_store_true_var_len( +/*=========================*/ + /* out: pointer to the data, we skip the 1 or 2 bytes + at the start that are used to store the len */ + byte* dest, /* in: where to store */ + ulint len, /* in: length, must fit in two bytes */ + ulint lenlen);/* in: storage length of len: either 1 or 2 bytes */ +/*********************************************************************** +Reads a >= 5.0.3 format true VARCHAR length, in the MySQL row format, and +returns a pointer to the data. */ + +byte* +row_mysql_read_true_varchar( +/*========================*/ + /* out: pointer to the data, we skip the 1 or 2 bytes + at the start that are used to store the len */ + ulint* len, /* out: variable-length field length */ + byte* field, /* in: field in the MySQL format */ + ulint lenlen);/* in: storage length of len: either 1 or 2 bytes */ +/*********************************************************************** Stores a reference to a BLOB in the MySQL format. */ void @@ -83,24 +77,40 @@ row_mysql_read_blob_ref( ulint col_len); /* in: BLOB reference length (not BLOB length) */ /****************************************************************** -Stores a non-SQL-NULL field given in the MySQL format in the Innobase -format. */ -UNIV_INLINE -void +Stores a non-SQL-NULL field given in the MySQL format in the InnoDB format. +The counterpart of this function is row_sel_field_store_in_mysql_format() in +row0sel.c. */ + +byte* row_mysql_store_col_in_innobase_format( /*===================================*/ - dfield_t* dfield, /* in/out: dfield */ - byte* buf, /* in/out: buffer for the converted - value */ + /* out: up to which byte we used + buf in the conversion */ + dfield_t* dfield, /* in/out: dfield where dtype + information must be already set when + this function is called! */ + byte* buf, /* in/out: buffer for a converted + integer value; this must be at least + col_len long then! */ + ibool row_format_col, /* TRUE if the mysql_data is from + a MySQL row, FALSE if from a MySQL + key value; + in MySQL, a true VARCHAR storage + format differs in a row and in a + key value: in a key value the length + is always stored in 2 bytes! */ byte* mysql_data, /* in: MySQL column value, not SQL NULL; NOTE that dfield may also get a pointer to mysql_data, therefore do not discard this as long as dfield is used! */ - ulint col_len, /* in: MySQL column length */ - ulint type, /* in: data type */ - bool comp, /* in: TRUE=compact format */ - ulint is_unsigned); /* in: != 0 if unsigned integer type */ + ulint col_len, /* in: MySQL column length; NOTE that + this is the storage length of the + column in the MySQL format row, not + necessarily the length of the actual + payload data; if the column is a true + VARCHAR then this is irrelevant */ + ibool comp); /* in: TRUE = compact format */ /******************************************************************** Handles user errors and lock waits detected by the database engine. */ @@ -457,6 +467,16 @@ struct mysql_row_templ_struct { zero if column cannot be NULL */ ulint type; /* column type in Innobase mtype numbers DATA_CHAR... */ + ulint mysql_type; /* MySQL type code; this is always + < 256 */ + ulint mysql_length_bytes; /* if mysql_type + == DATA_MYSQL_TRUE_VARCHAR, this tells + whether we should use 1 or 2 bytes to + store the MySQL true VARCHAR data + length at the start of row in the MySQL + format (NOTE that the MySQL key value + format always uses 2 bytes for the data + len) */ ulint charset; /* MySQL charset-collation code of the column, or zero */ ulint mbminlen; /* minimum length of a char, in bytes, diff --git a/innobase/include/row0mysql.ic b/innobase/include/row0mysql.ic index 910546e298c..aa8a70d8761 100644 --- a/innobase/include/row0mysql.ic +++ b/innobase/include/row0mysql.ic @@ -5,149 +5,3 @@ MySQL interface for Innobase Created 1/23/2001 Heikki Tuuri *******************************************************/ - -/*********************************************************************** -Stores a variable-length field (like VARCHAR) length to dest, in the -MySQL format. No real var implemented in MySQL yet! */ -UNIV_INLINE -byte* -row_mysql_store_var_len( -/*====================*/ - /* out: dest + 2 */ - byte* dest, /* in: where to store */ - ulint len __attribute__((unused))) /* in: length, must fit in two - bytes */ -{ - ut_ad(len < 256 * 256); -/* - mach_write_to_2_little_endian(dest, len); - - return(dest + 2); -*/ - return(dest); /* No real var implemented in MySQL yet! */ -} - -/*********************************************************************** -Reads a MySQL format variable-length field (like VARCHAR) length and -returns pointer to the field data. No real var implemented in MySQL yet! */ -UNIV_INLINE -byte* -row_mysql_read_var_ref( -/*===================*/ - /* out: field + 2 */ - ulint* len, /* out: variable-length field length; does not work - yet! */ - byte* field) /* in: field */ -{ -/* - *len = mach_read_from_2_little_endian(field); - - return(field + 2); -*/ - UT_NOT_USED(len); - - return(field); /* No real var implemented in MySQL yet! */ -} - -/****************************************************************** -Stores a non-SQL-NULL field given in the MySQL format in the Innobase -format. */ -UNIV_INLINE -void -row_mysql_store_col_in_innobase_format( -/*===================================*/ - dfield_t* dfield, /* in/out: dfield */ - byte* buf, /* in/out: buffer for the converted - value; this must be at least col_len - long! */ - byte* mysql_data, /* in: MySQL column value, not - SQL NULL; NOTE that dfield may also - get a pointer to mysql_data, - therefore do not discard this as long - as dfield is used! */ - ulint col_len, /* in: MySQL column length */ - ulint type, /* in: data type */ - bool comp, /* in: TRUE=compact format */ - ulint is_unsigned) /* in: != 0 if unsigned integer type */ -{ - byte* ptr = mysql_data; - - if (type == DATA_INT) { - /* Store integer data in Innobase in a big-endian format, - sign bit negated */ - - ptr = buf + col_len; - - for (;;) { - ptr--; - *ptr = *mysql_data; - if (ptr == buf) { - break; - } - mysql_data++; - } - - if (!is_unsigned) { - *ptr = (byte) (*ptr ^ 128); - } - } else if (type == DATA_VARCHAR || type == DATA_VARMYSQL - || type == DATA_BINARY) { - /* Remove trailing spaces. */ - - /* Handle UCS2 strings differently. */ - ulint mbminlen = dtype_get_mbminlen( - dfield_get_type(dfield)); - ptr = row_mysql_read_var_ref(&col_len, mysql_data); - if (mbminlen == 2) { - /* space=0x0020 */ - /* Trim "half-chars", just in case. */ - col_len &= ~1; - - while (col_len >= 2 && ptr[col_len - 2] == 0x00 - && ptr[col_len - 1] == 0x20) { - col_len -= 2; - } - } else { - ut_a(mbminlen == 1); - /* space=0x20 */ - while (col_len > 0 && ptr[col_len - 1] == 0x20) { - col_len--; - } - } - } else if (comp && type == DATA_MYSQL - && dtype_get_mbminlen(dfield_get_type(dfield)) == 1 - && dtype_get_mbmaxlen(dfield_get_type(dfield)) > 1) { - /* We assume that this CHAR field is encoded in a - variable-length character set where spaces have - 1:1 correspondence to 0x20 bytes, such as UTF-8. - - Consider a CHAR(n) field, a field of n characters. - It will contain between n*mbminlen and n*mbmaxlen bytes. - We will try to truncate it to n bytes by stripping - space padding. If the field contains single-byte - characters only, it will be truncated to n characters. - Consider a CHAR(5) field containing the string ".a " - where "." denotes a 3-byte character represented by - the bytes "$%&". After our stripping, the string will - be stored as "$%&a " (5 bytes). The string ".abc " - will be stored as "$%&abc" (6 bytes). - - The space padding will be restored in row0sel.c, function - row_sel_field_store_in_mysql_format(). */ - - ulint n_chars; - dtype_t* dtype = dfield_get_type(dfield); - - ut_a(!(dtype_get_len(dtype) % dtype_get_mbmaxlen(dtype))); - n_chars = dtype_get_len(dtype) / dtype_get_mbmaxlen(dtype); - - /* Strip space padding. */ - while (col_len > n_chars && ptr[col_len - 1] == 0x20) { - col_len--; - } - } else if (type == DATA_BLOB) { - ptr = row_mysql_read_blob_ref(&col_len, mysql_data, col_len); - } - - dfield_set_data(dfield, ptr, col_len); -} diff --git a/innobase/row/row0ins.c b/innobase/row/row0ins.c index fdbbe993ff0..303fe5749bc 100644 --- a/innobase/row/row0ins.c +++ b/innobase/row/row0ins.c @@ -521,6 +521,10 @@ row_ins_cascade_calc_update_vec( fixed_size = dtype_get_fixed_size(type); + /* TODO: pad in UCS-2 with 0x0020. + TODO: How does the special truncation of + UTF-8 CHAR cols affect this? */ + if (fixed_size && ufield->new_val.len != UNIV_SQL_NULL && ufield->new_val.len < fixed_size) { diff --git a/innobase/row/row0mysql.c b/innobase/row/row0mysql.c index 2b2b2d83002..b13ba056d85 100644 --- a/innobase/row/row0mysql.c +++ b/innobase/row/row0mysql.c @@ -105,20 +105,6 @@ row_mysql_delay_if_needed(void) } } -/*********************************************************************** -Reads a MySQL format variable-length field (like VARCHAR) length and -returns pointer to the field data. */ - -byte* -row_mysql_read_var_ref_noninline( -/*=============================*/ - /* out: field + 2 */ - ulint* len, /* out: variable-length field length */ - byte* field) /* in: field */ -{ - return(row_mysql_read_var_ref(len, field)); -} - /*********************************************************************** Frees the blob heap in prebuilt when no longer needed. */ @@ -132,6 +118,61 @@ row_mysql_prebuilt_free_blob_heap( prebuilt->blob_heap = NULL; } +/*********************************************************************** +Stores a >= 5.0.3 format true VARCHAR length to dest, in the MySQL row +format. */ + +byte* +row_mysql_store_true_var_len( +/*=========================*/ + /* out: pointer to the data, we skip the 1 or 2 bytes + at the start that are used to store the len */ + byte* dest, /* in: where to store */ + ulint len, /* in: length, must fit in two bytes */ + ulint lenlen) /* in: storage length of len: either 1 or 2 bytes */ +{ + if (lenlen == 2) { + ut_a(len < 256 * 256); + + mach_write_to_2_little_endian(dest, len); + + return(dest + 2); + } + + ut_a(lenlen == 1); + ut_a(len < 256); + + mach_write_to_1(dest, len); + + return(dest + 1); +} + +/*********************************************************************** +Reads a >= 5.0.3 format true VARCHAR length, in the MySQL row format, and +returns a pointer to the data. */ + +byte* +row_mysql_read_true_varchar( +/*========================*/ + /* out: pointer to the data, we skip the 1 or 2 bytes + at the start that are used to store the len */ + ulint* len, /* out: variable-length field length */ + byte* field, /* in: field in the MySQL format */ + ulint lenlen) /* in: storage length of len: either 1 or 2 bytes */ +{ + if (lenlen == 2) { + *len = mach_read_from_2_little_endian(field); + + return(field + 2); + } + + ut_a(lenlen == 1); + + *len = mach_read_from_1(field); + + return(field + 1); +} + /*********************************************************************** Stores a reference to a BLOB in the MySQL format. */ @@ -191,15 +232,177 @@ row_mysql_read_blob_ref( } /****************************************************************** -Convert a row in the MySQL format to a row in the Innobase format. */ +Stores a non-SQL-NULL field given in the MySQL format in the InnoDB format. +The counterpart of this function is row_sel_field_store_in_mysql_format() in +row0sel.c. */ + +byte* +row_mysql_store_col_in_innobase_format( +/*===================================*/ + /* out: up to which byte we used + buf in the conversion */ + dfield_t* dfield, /* in/out: dfield where dtype + information must be already set when + this function is called! */ + byte* buf, /* in/out: buffer for a converted + integer value; this must be at least + col_len long then! */ + ibool row_format_col, /* TRUE if the mysql_data is from + a MySQL row, FALSE if from a MySQL + key value; + in MySQL, a true VARCHAR storage + format differs in a row and in a + key value: in a key value the length + is always stored in 2 bytes! */ + byte* mysql_data, /* in: MySQL column value, not + SQL NULL; NOTE that dfield may also + get a pointer to mysql_data, + therefore do not discard this as long + as dfield is used! */ + ulint col_len, /* in: MySQL column length; NOTE that + this is the storage length of the + column in the MySQL format row, not + necessarily the length of the actual + payload data; if the column is a true + VARCHAR then this is irrelevant */ + ibool comp) /* in: TRUE = compact format */ +{ + byte* ptr = mysql_data; + dtype_t* dtype; + ulint type; + ulint lenlen; + + dtype = dfield_get_type(dfield); + + type = dtype->mtype; + + if (type == DATA_INT) { + /* Store integer data in Innobase in a big-endian format, + sign bit negated if the data is a signed integer. In MySQL, + integers are stored in a little-endian format. */ + + ptr = buf + col_len; + + for (;;) { + ptr--; + *ptr = *mysql_data; + if (ptr == buf) { + break; + } + mysql_data++; + } + + if (!(dtype->prtype & DATA_UNSIGNED)) { + + *ptr = (byte) (*ptr ^ 128); + } + + buf += col_len; + } else if ((type == DATA_VARCHAR + || type == DATA_VARMYSQL + || type == DATA_BINARY)) { + + if (dtype_get_mysql_type(dtype) == DATA_MYSQL_TRUE_VARCHAR) { + /* The length of the actual data is stored to 1 or 2 + bytes at the start of the field */ + + if (row_format_col) { + if (dtype->prtype & DATA_LONG_TRUE_VARCHAR) { + lenlen = 2; + } else { + lenlen = 1; + } + } else { + /* In a MySQL key value, lenlen is always 2 */ + lenlen = 2; + } + + ptr = row_mysql_read_true_varchar(&col_len, mysql_data, + lenlen); + } else { + /* Remove trailing spaces from old style VARCHAR + columns. */ + + /* Handle UCS2 strings differently. */ + ulint mbminlen = dtype_get_mbminlen(dtype); + + ptr = mysql_data; + + if (mbminlen == 2) { + /* space=0x0020 */ + /* Trim "half-chars", just in case. */ + col_len &= ~1; + + while (col_len >= 2 && ptr[col_len - 2] == 0x00 + && ptr[col_len - 1] == 0x20) { + col_len -= 2; + } + } else { + ut_a(mbminlen == 1); + /* space=0x20 */ + while (col_len > 0 + && ptr[col_len - 1] == 0x20) { + col_len--; + } + } + } + } else if (comp && type == DATA_MYSQL + && dtype_get_mbminlen(dtype) == 1 + && dtype_get_mbmaxlen(dtype) > 1) { + /* In some cases we strip trailing spaces from UTF-8 and other + multibyte charsets, from FIXED-length CHAR columns, to save + space. UTF-8 would otherwise normally use 3 * the string length + bytes to store a latin1 string! */ + + /* We assume that this CHAR field is encoded in a + variable-length character set where spaces have + 1:1 correspondence to 0x20 bytes, such as UTF-8. + + Consider a CHAR(n) field, a field of n characters. + It will contain between n * mbminlen and n * mbmaxlen bytes. + We will try to truncate it to n bytes by stripping + space padding. If the field contains single-byte + characters only, it will be truncated to n characters. + Consider a CHAR(5) field containing the string ".a " + where "." denotes a 3-byte character represented by + the bytes "$%&". After our stripping, the string will + be stored as "$%&a " (5 bytes). The string ".abc " + will be stored as "$%&abc" (6 bytes). + + The space padding will be restored in row0sel.c, function + row_sel_field_store_in_mysql_format(). */ + + ulint n_chars; + + ut_a(!(dtype_get_len(dtype) % dtype_get_mbmaxlen(dtype))); + + n_chars = dtype_get_len(dtype) / dtype_get_mbmaxlen(dtype); + + /* Strip space padding. */ + while (col_len > n_chars && ptr[col_len - 1] == 0x20) { + col_len--; + } + } else if (type == DATA_BLOB && row_format_col) { + + ptr = row_mysql_read_blob_ref(&col_len, mysql_data, col_len); + } + + dfield_set_data(dfield, ptr, col_len); + + return(buf); +} + +/****************************************************************** +Convert a row in the MySQL format to a row in the Innobase format. Note that +the function to convert a MySQL format key value to an InnoDB dtuple is +row_sel_convert_mysql_key_to_innobase() in row0sel.c. */ static void row_mysql_convert_row_to_innobase( /*==============================*/ dtuple_t* row, /* in/out: Innobase row where the field type information is already - copied there, or will be copied - later */ + copied there! */ row_prebuilt_t* prebuilt, /* in: prebuilt struct where template must be of type ROW_MYSQL_WHOLE_ROW */ byte* mysql_rec) /* in: row in the MySQL format; @@ -236,10 +439,10 @@ row_mysql_convert_row_to_innobase( row_mysql_store_col_in_innobase_format(dfield, prebuilt->ins_upd_rec_buff + templ->mysql_col_offset, + TRUE, /* MySQL row format data */ mysql_rec + templ->mysql_col_offset, templ->mysql_col_len, - templ->type, prebuilt->table->comp, - templ->is_unsigned); + prebuilt->table->comp); next_column: ; } @@ -594,7 +797,8 @@ static dtuple_t* row_get_prebuilt_insert_row( /*========================*/ - /* out: prebuilt dtuple */ + /* out: prebuilt dtuple; the column + type information is also set in it */ row_prebuilt_t* prebuilt) /* in: prebuilt struct in MySQL handle */ { @@ -784,6 +988,7 @@ row_unlock_tables_for_mysql( lock_release_tables_off_kernel(trx); mutex_exit(&kernel_mutex); } + /************************************************************************* Sets a table lock on the table mentioned in prebuilt. */ @@ -962,10 +1167,13 @@ run_again: if (err != DB_SUCCESS) { que_thr_stop_for_mysql(thr); - thr->lock_state= QUE_THR_LOCK_ROW; + +/* TODO: what is this? */ thr->lock_state= QUE_THR_LOCK_ROW; + was_lock_wait = row_mysql_handle_errors(&err, trx, thr, &savept); - thr->lock_state= QUE_THR_LOCK_NOLOCK; + thr->lock_state= QUE_THR_LOCK_NOLOCK; + if (was_lock_wait) { goto run_again; } diff --git a/innobase/row/row0sel.c b/innobase/row/row0sel.c index 54dfbe997ce..a09e09342e0 100644 --- a/innobase/row/row0sel.c +++ b/innobase/row/row0sel.c @@ -2119,10 +2119,10 @@ row_sel_convert_mysql_key_to_innobase( + 256 * key_ptr[data_offset + 1]; data_field_len = data_offset + 2 + field->prefix_len; data_offset += 2; - - type = DATA_CHAR; /* now that we know the length, we - store the column value like it would - be a fixed char field */ + + /* now that we know the length, we store the column + value like it would be a fixed char field */ + } else if (field->prefix_len > 0) { /* Looks like MySQL pads unused end bytes in the prefix with space. Therefore, also in UTF-8, it is ok @@ -2146,11 +2146,12 @@ row_sel_convert_mysql_key_to_innobase( if (!is_null) { row_mysql_store_col_in_innobase_format( - dfield, buf, key_ptr + data_offset, - data_len, type, - index->table->comp, - dfield_get_type(dfield)->prtype - & DATA_UNSIGNED); + dfield, + buf, + FALSE, /* MySQL key value format col */ + key_ptr + data_offset, + data_len, + index->table->comp); buf += data_len; } @@ -2225,7 +2226,7 @@ row_sel_store_row_id_to_prebuilt( dict_index_name_print(stderr, prebuilt->trx, index); fprintf(stderr, "\n" "InnoDB: Field number %lu, record:\n", - (ulong) dict_index_get_sys_col_pos(index, DATA_ROW_ID)); + (ulong) dict_index_get_sys_col_pos(index, DATA_ROW_ID)); rec_print_new(stderr, index_rec, offsets); putc('\n', stderr); ut_error; @@ -2235,8 +2236,9 @@ row_sel_store_row_id_to_prebuilt( } /****************************************************************** -Stores a non-SQL-NULL field in the MySQL format. */ -UNIV_INLINE +Stores a non-SQL-NULL field in the MySQL format. The counterpart of this +function is row_mysql_store_col_in_innobase_format() in row0mysql.c. */ +static void row_sel_field_store_in_mysql_format( /*================================*/ @@ -2251,6 +2253,8 @@ row_sel_field_store_in_mysql_format( ulint len) /* in: length of the data */ { byte* ptr; + byte* field_end; + byte* pad_ptr; ut_ad(len != UNIV_SQL_NULL); @@ -2274,25 +2278,66 @@ row_sel_field_store_in_mysql_format( } ut_ad(templ->mysql_col_len == len); - } else if (templ->type == DATA_VARCHAR || templ->type == DATA_VARMYSQL - || templ->type == DATA_BINARY) { - /* Store the length of the data to the first two bytes of - dest; does not do anything yet because MySQL has - no real vars! */ + } else if (templ->type == DATA_VARCHAR + || templ->type == DATA_VARMYSQL + || templ->type == DATA_BINARY) { + + field_end = dest + templ->mysql_col_len; + + if (templ->mysql_type == DATA_MYSQL_TRUE_VARCHAR) { + /* This is a >= 5.0.3 type true VARCHAR. Store the + length of the data to the first byte or the first + two bytes of dest. */ - dest = row_mysql_store_var_len(dest, len); + dest = row_mysql_store_true_var_len(dest, len, + templ->mysql_length_bytes); + } + + /* Copy the actual data */ ut_memcpy(dest, data, len); -#if 0 - /* No real var implemented in MySQL yet! */ - ut_ad(templ->mysql_col_len >= len + 2); -#endif + /* Pad with trailing spaces. We pad with spaces also the + unused end of a >= 5.0.3 true VARCHAR column, just in case + MySQL expects its contents to be deterministic. */ + + pad_ptr = dest + len; + + ut_ad(templ->mbminlen <= templ->mbmaxlen); + + /* We handle UCS2 charset strings differently. */ + if (templ->mbminlen == 2) { + /* A space char is two bytes, 0x0020 in UCS2 */ + + if (len & 1) { + /* A 0x20 has been stripped from the column. + Pad it back. */ + + if (pad_ptr < field_end) { + *pad_ptr = 0x20; + pad_ptr++; + } + } + + /* Pad the rest of the string with 0x0020 */ + + while (pad_ptr < field_end) { + *pad_ptr = 0x00; + pad_ptr++; + *pad_ptr = 0x20; + pad_ptr++; + } + } else { + ut_ad(templ->mbminlen == 1); + /* space=0x20 */ + + memset(pad_ptr, 0x20, field_end - pad_ptr); + } } else if (templ->type == DATA_BLOB) { /* Store a pointer to the BLOB buffer to dest: the BLOB was already copied to the buffer in row_sel_store_mysql_rec */ - row_mysql_store_blob_ref(dest, templ->mysql_col_len, - data, len); + row_mysql_store_blob_ref(dest, templ->mysql_col_len, data, + len); } else if (templ->type == DATA_MYSQL) { memcpy(dest, data, len); @@ -2306,9 +2351,10 @@ row_sel_field_store_in_mysql_format( ut_a(len * templ->mbmaxlen >= templ->mysql_col_len); if (templ->mbminlen != templ->mbmaxlen) { - /* Pad with spaces. This undoes the stripping + /* Pad with spaces. This undoes the stripping done in row0mysql.ic, function row_mysql_store_col_in_innobase_format(). */ + memset(dest + len, 0x20, templ->mysql_col_len - len); } } else { @@ -2320,6 +2366,7 @@ row_sel_field_store_in_mysql_format( || templ->type == DATA_DOUBLE || templ->type == DATA_DECIMAL); ut_ad(templ->mysql_col_len == len); + memcpy(dest, data, len); } } @@ -2436,40 +2483,6 @@ row_sel_store_mysql_rec( mysql_rec + templ->mysql_col_offset, templ, data, len); - if (templ->type == DATA_VARCHAR - || templ->type == DATA_VARMYSQL - || templ->type == DATA_BINARY) { - /* Pad with trailing spaces */ - data = mysql_rec + templ->mysql_col_offset; - - ut_ad(templ->mbminlen <= templ->mbmaxlen); - /* Handle UCS2 strings differently. */ - if (templ->mbminlen == 2) { - /* space=0x0020 */ - ulint col_len = templ->mysql_col_len; - - ut_a(!(col_len & 1)); - if (len & 1) { - /* A 0x20 has been stripped - from the column. - Pad it back. */ - goto pad_0x20; - } - /* Pad the rest of the string - with 0x0020 */ - while (len < col_len) { - data[len++] = 0x00; - pad_0x20: - data[len++] = 0x20; - } - } else { - ut_ad(templ->mbminlen == 1); - /* space=0x20 */ - memset(data + len, 0x20, - templ->mysql_col_len - len); - } - } - /* Cleanup */ if (extern_field_heap) { mem_heap_free(extern_field_heap); diff --git a/innobase/trx/trx0trx.c b/innobase/trx/trx0trx.c index 614058e6860..643f7e164e5 100644 --- a/innobase/trx/trx0trx.c +++ b/innobase/trx/trx0trx.c @@ -1958,7 +1958,7 @@ trx_recover_for_mysql( ut_print_timestamp(stderr); fprintf(stderr, -" InnoDB: %d transactions in prepare state after recovery\n", +" InnoDB: %d transactions in prepared state after recovery\n", count); return (count); diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index 73d5ac9e94e..7132ab00bb9 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -1074,6 +1074,8 @@ innobase_init(void) DBUG_ENTER("innobase_init"); + ut_a(DATA_MYSQL_TRUE_VARCHAR == (ulint)MYSQL_TYPE_VARCHAR); + os_innodb_umask = (ulint)my_umask; /* First calculate the default path for innodb_data_home_dir etc., @@ -2244,7 +2246,9 @@ innobase_mysql_cmp( } /****************************************************************** -Converts a MySQL type to an InnoDB type. */ +Converts a MySQL type to an InnoDB type. Note that this function returns +the 'mtype' of InnoDB. InnoDB differentiates between MySQL's old <= 4.1 +VARCHAR and the new true VARCHAR in >= 5.0.3 by the 'prtype'. */ inline ulint get_innobase_type_from_mysql_type( @@ -2259,8 +2263,9 @@ get_innobase_type_from_mysql_type( switch (field->type()) { /* NOTE that we only allow string types in DATA_MYSQL and DATA_VARMYSQL */ - case MYSQL_TYPE_VAR_STRING: - case MYSQL_TYPE_VARCHAR: if (field->binary()) { + case MYSQL_TYPE_VAR_STRING: /* old <= 4.1 VARCHAR */ + case MYSQL_TYPE_VARCHAR: /* new >= 5.0.3 true VARCHAR */ + if (field->binary()) { return(DATA_BINARY); } else if (strcmp( field->charset()->name, @@ -2313,6 +2318,35 @@ get_innobase_type_from_mysql_type( return(0); } +/*********************************************************************** +Writes an unsigned integer value < 64k to 2 bytes, in the little-endian +storage format. */ +inline +void +innobase_write_to_2_little_endian( +/*==============================*/ + byte* buf, /* in: where to store */ + ulint val) /* in: value to write, must be < 64k */ +{ + ut_a(val < 256 * 256); + + buf[0] = (byte)(val & 0xFF); + buf[1] = (byte)(val / 256); +} + +/*********************************************************************** +Reads an unsigned integer value < 64k from 2 bytes, in the little-endian +storage format. */ +inline +uint +innobase_read_from_2_little_endian( +/*===============================*/ + /* out: value */ + const mysql_byte* buf) /* in: from where to read */ +{ + return((ulint)(buf[0]) + 256 * ((ulint)(buf[1]))); +} + /*********************************************************************** Stores a key value for a row to a buffer. */ @@ -2352,9 +2386,14 @@ ha_innobase::store_key_val_for_row( 3. In a column prefix field, prefix_len next bytes are reserved for data. In a normal field the max field length next bytes are reserved for data. For a VARCHAR(n) the max field length is n. If the stored - value is the SQL NULL then these data bytes are set to 0. */ + value is the SQL NULL then these data bytes are set to 0. - /* We have to zero-fill the buffer so that MySQL is able to use a + 4. We always use a 2 byte length for a true >= 5.0.3 VARCHAR. Note that + in the MySQL row format, the length is stored in 1 or 2 bytes, + depending on the maximum allowed length. But in the MySQL key value + format, the length always takes 2 bytes. + + We have to zero-fill the buffer so that MySQL is able to use a simple memcmp to compare two key values to determine if they are equal. MySQL does this to compare contents of two 'ref' values. */ @@ -2377,7 +2416,43 @@ ha_innobase::store_key_val_for_row( field = key_part->field; mysql_type = field->type(); - if (mysql_type == FIELD_TYPE_TINY_BLOB + if (mysql_type == MYSQL_TYPE_VARCHAR) { + /* >= 5.0.3 true VARCHAR */ + ulint lenlen; + ulint len; + byte* data; + + if (is_null) { + buff += key_part->length + 2; + + continue; + } + + lenlen = (ulint) + (((Field_varstring*)field)->length_bytes); + + data = row_mysql_read_true_varchar(&len, + (byte*) (record + + (ulint)get_field_offset(table, field)), + lenlen); + + /* The length in a key value is always stored in 2 + bytes */ + + row_mysql_store_true_var_len((byte*)buff, len, 2); + buff += 2; + + memcpy(buff, data, len); + + /* Note that we always reserve the maximum possible + length of the true VARCHAR in the key value, though + only len first bytes after the 2 length bytes contain + actual data. The rest of the space was reset to zero + in the bzero() call above. */ + + buff += key_part->length; + + } else if (mysql_type == FIELD_TYPE_TINY_BLOB || mysql_type == FIELD_TYPE_MEDIUM_BLOB || mysql_type == FIELD_TYPE_BLOB || mysql_type == FIELD_TYPE_LONG_BLOB) { @@ -2385,9 +2460,9 @@ ha_innobase::store_key_val_for_row( ut_a(key_part->key_part_flag & HA_PART_KEY_SEG); if (is_null) { - buff += key_part->length + 2; + buff += key_part->length + 2; - continue; + continue; } blob_data = row_mysql_read_blob_ref(&blob_len, @@ -2404,12 +2479,15 @@ ha_innobase::store_key_val_for_row( /* MySQL reserves 2 bytes for the length and the storage of the number is little-endian */ - ut_a(blob_len < 256); - *((byte*)buff) = (byte)blob_len; + innobase_write_to_2_little_endian( + (byte*)buff, (ulint)blob_len); buff += 2; memcpy(buff, blob_data, blob_len); + /* Note that we always reserve the maximum possible + length of the BLOB prefix in the key value. */ + buff += key_part->length; } else { if (is_null) { @@ -2573,6 +2651,13 @@ build_template( templ->mysql_col_len = (ulint) field->pack_length(); templ->type = get_innobase_type_from_mysql_type(field); + templ->mysql_type = (ulint)field->type(); + + if (templ->mysql_type == DATA_MYSQL_TRUE_VARCHAR) { + templ->mysql_length_bytes = (ulint) + (((Field_varstring*)field)->length_bytes); + } + templ->charset = dtype_get_charset_coll_noninline( index->table->cols[i].type.prtype); templ->mbminlen = index->table->cols[i].type.mbminlen; @@ -2810,54 +2895,6 @@ func_exit: DBUG_RETURN(error); } -/****************************************************************** -Converts field data for storage in an InnoDB update vector. */ -inline -mysql_byte* -innobase_convert_and_store_changed_col( -/*===================================*/ - /* out: pointer to the end of the converted - data in the buffer */ - upd_field_t* ufield, /* in/out: field in the update vector */ - mysql_byte* buf, /* in: buffer we can use in conversion */ - mysql_byte* data, /* in: column data to store */ - ulint len, /* in: data len */ - ulint col_type,/* in: data type in InnoDB type numbers */ - ulint is_unsigned)/* in: != 0 if an unsigned integer type */ -{ - uint i; - - if (len == UNIV_SQL_NULL) { - data = NULL; - } else if (col_type == DATA_VARCHAR || col_type == DATA_BINARY - || col_type == DATA_VARMYSQL) { - /* Remove trailing spaces */ - while (len > 0 && data[len - 1] == ' ') { - len--; - } - } else if (col_type == DATA_INT) { - /* Store integer data in InnoDB in a big-endian - format, sign bit negated, if signed */ - - for (i = 0; i < len; i++) { - buf[len - 1 - i] = data[i]; - } - - if (!is_unsigned) { - buf[0] = buf[0] ^ 128; - } - - data = buf; - - buf += len; - } - - ufield->new_val.data = data; - ufield->new_val.len = len; - - return(buf); -} - /************************************************************************** Checks which fields have changed in a row and stores information of them to an update vector. */ @@ -2878,9 +2915,11 @@ calc_row_difference( { mysql_byte* original_upd_buff = upd_buff; Field* field; + enum_field_types field_mysql_type; uint n_fields; ulint o_len; ulint n_len; + ulint col_pack_len; byte* o_ptr; byte* n_ptr; byte* buf; @@ -2888,6 +2927,7 @@ calc_row_difference( ulint col_type; ulint is_unsigned; ulint n_changed = 0; + dfield_t dfield; uint i; n_fields = table->s->fields; @@ -2907,9 +2947,13 @@ calc_row_difference( o_ptr = (byte*) old_row + get_field_offset(table, field); n_ptr = (byte*) new_row + get_field_offset(table, field); - o_len = field->pack_length(); - n_len = field->pack_length(); + + col_pack_len = field->pack_length(); + o_len = col_pack_len; + n_len = col_pack_len; + field_mysql_type = field->type(); + col_type = get_innobase_type_from_mysql_type(field); is_unsigned = (ulint) (field->flags & UNSIGNED_FLAG); @@ -2918,14 +2962,29 @@ calc_row_difference( case DATA_BLOB: o_ptr = row_mysql_read_blob_ref(&o_len, o_ptr, o_len); n_ptr = row_mysql_read_blob_ref(&n_len, n_ptr, n_len); + break; + case DATA_VARCHAR: case DATA_BINARY: case DATA_VARMYSQL: - o_ptr = row_mysql_read_var_ref_noninline(&o_len, - o_ptr); - n_ptr = row_mysql_read_var_ref_noninline(&n_len, - n_ptr); + if (field_mysql_type == MYSQL_TYPE_VARCHAR) { + /* This is a >= 5.0.3 type true VARCHAR where + the real payload data length is stored in + 1 or 2 bytes */ + + o_ptr = row_mysql_read_true_varchar( + &o_len, o_ptr, + (ulint) + (((Field_varstring*)field)->length_bytes)); + + n_ptr = row_mysql_read_true_varchar( + &n_len, n_ptr, + (ulint) + (((Field_varstring*)field)->length_bytes)); + } + + break; default: ; } @@ -2947,12 +3006,29 @@ calc_row_difference( /* The field has changed */ ufield = uvect->fields + n_changed; + + /* Let us use a dummy dfield to make the conversion + from the MySQL column format to the InnoDB format */ + + dfield.type = (prebuilt->table->cols + i)->type; + + if (n_len != UNIV_SQL_NULL) { + buf = row_mysql_store_col_in_innobase_format( + &dfield, + (byte*)buf, + TRUE, + n_ptr, + col_pack_len, + prebuilt->table->comp); + ufield->new_val.data = + dfield_get_data(&dfield); + ufield->new_val.len = + dfield_get_len(&dfield); + } else { + ufield->new_val.data = NULL; + ufield->new_val.len = UNIV_SQL_NULL; + } - buf = (byte*) - innobase_convert_and_store_changed_col(ufield, - (mysql_byte*)buf, - (mysql_byte*)n_ptr, n_len, col_type, - is_unsigned); ufield->exp = NULL; ufield->field_no = (prebuilt->table->cols + i)->clust_pos; @@ -3701,7 +3777,7 @@ ha_innobase::rnd_pos( } if (error) { - DBUG_PRINT("error",("Got error: %ld",error)); + DBUG_PRINT("error", ("Got error: %ld", error)); DBUG_RETURN(error); } @@ -3709,10 +3785,11 @@ ha_innobase::rnd_pos( for the table, and it is == ref_length */ error = index_read(buf, pos, ref_length, HA_READ_KEY_EXACT); - if (error) - { - DBUG_PRINT("error",("Got error: %ld",error)); + + if (error) { + DBUG_PRINT("error", ("Got error: %ld", error)); } + change_active_index(keynr); DBUG_RETURN(error); @@ -3752,12 +3829,11 @@ ha_innobase::position( ref_length, record); } - /* Since we do not store len to the buffer 'ref', we must assume - that len is always fixed for this table. The following assertion - checks this. */ + /* We assume that the 'ref' value len is always fixed for the same + table. */ if (len != ref_length) { - fprintf(stderr, + fprintf(stderr, "InnoDB: Error: stored ref len is %lu, but table ref len is %lu\n", (ulong)len, (ulong)ref_length); } @@ -3788,9 +3864,11 @@ create_table_def( ulint n_cols; int error; ulint col_type; + ulint col_len; ulint nulls_allowed; ulint unsigned_type; ulint binary_type; + ulint long_true_varchar; ulint charset_no; ulint i; @@ -3837,17 +3915,40 @@ create_table_def( charset_no = (ulint)field->charset()->number; - ut_a(charset_no < 256); /* in ut0type.h we assume that - the number fits in one byte */ + ut_a(charset_no < 256); /* in data0type.h we assume + that the number fits in one + byte */ } - dict_mem_table_add_col(table, (char*) field->field_name, - col_type, dtype_form_prtype( - (ulint)field->type() - | nulls_allowed | unsigned_type - | binary_type, - + charset_no), - field->pack_length(), 0); + ut_a(field->type() < 256); /* we assume in dtype_form_prtype() + that this fits in one byte */ + col_len = field->pack_length(); + + /* The MySQL pack length contains 1 or 2 bytes length field + for a true VARCHAR. Let us subtract that, so that the InnoDB + column length in the InnoDB data dictionary is the real + maximum byte length of the actual data. */ + + long_true_varchar = 0; + + if (field->type() == MYSQL_TYPE_VARCHAR) { + col_len -= ((Field_varstring*)field)->length_bytes; + + if (((Field_varstring*)field)->length_bytes == 2) { + long_true_varchar = DATA_LONG_TRUE_VARCHAR; + } + } + + dict_mem_table_add_col(table, + (char*) field->field_name, + col_type, + dtype_form_prtype( + (ulint)field->type() + | nulls_allowed | unsigned_type + | binary_type | long_true_varchar, + charset_no), + col_len, + 0); } error = row_create_table_for_mysql(table, trx); @@ -6125,54 +6226,79 @@ ha_innobase::get_auto_increment() return((ulonglong) nr); } +/*********************************************************************** +Compares two 'refs'. A 'ref' is the (internal) primary key value of the row. +If there is no explicitly declared non-null unique key or a primary key, then +InnoDB internally uses the row id as the primary key. */ int ha_innobase::cmp_ref( - const mysql_byte *ref1, - const mysql_byte *ref2) +/*=================*/ + /* out: < 0 if ref1 < ref2, 0 if equal, else + > 0 */ + const mysql_byte* ref1, /* in: an (internal) primary key value in the + MySQL key value format */ + const mysql_byte* ref2) /* in: an (internal) primary key value in the + MySQL key value format */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; + row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; enum_field_types mysql_type; - Field* field; - int result; + Field* field; + KEY_PART_INFO* key_part; + KEY_PART_INFO* key_part_end; + uint len1; + uint len2; + int result; - if (prebuilt->clust_index_was_generated) - return memcmp(ref1, ref2, DATA_ROW_ID_LEN); + if (prebuilt->clust_index_was_generated) { + /* The 'ref' is an InnoDB row id */ + + return(memcmp(ref1, ref2, DATA_ROW_ID_LEN)); + } + + /* Do a type-aware comparison of primary key fields. PK fields + are always NOT NULL, so no checks for NULL are performed. */ + + key_part = table->key_info[table->s->primary_key].key_part; + + key_part_end = key_part + + table->key_info[table->s->primary_key].key_parts; - /* Do type-aware comparison of Primary Key members. PK members - are always NOT NULL, so no checks for NULL are performed */ - KEY_PART_INFO *key_part= - table->key_info[table->s->primary_key].key_part; - KEY_PART_INFO *key_part_end= - key_part + table->key_info[table->s->primary_key].key_parts; for (; key_part != key_part_end; ++key_part) { field = key_part->field; mysql_type = field->type(); + if (mysql_type == FIELD_TYPE_TINY_BLOB || mysql_type == FIELD_TYPE_MEDIUM_BLOB || mysql_type == FIELD_TYPE_BLOB || mysql_type == FIELD_TYPE_LONG_BLOB) { - ut_a(!ref1[1]); - ut_a(!ref2[1]); - byte len1= *ref1; - byte len2= *ref2; + /* In the MySQL key value format, a column prefix of + a BLOB is preceded by a 2-byte length field */ + + len1 = innobase_read_from_2_little_endian(ref1); + len2 = innobase_read_from_2_little_endian(ref2); + ref1 += 2; ref2 += 2; - result = - ((Field_blob*)field)->cmp((const char*)ref1, len1, + result = ((Field_blob*)field)->cmp( + (const char*)ref1, len1, (const char*)ref2, len2); } else { - result = - field->cmp((const char*)ref1, (const char*)ref2); + result = field->cmp((const char*)ref1, + (const char*)ref2); + } + + if (result) { + + return(result); } - if (result) - return result; ref1 += key_part->length; ref2 += key_part->length; } - return 0; + + return(0); } char* diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h index 1c8063b9373..e1ed3a486cf 100644 --- a/sql/ha_innodb.h +++ b/sql/ha_innodb.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB && Innobase Oy +/* Copyright (C) 2000-2005 MySQL AB && Innobase Oy This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -40,9 +40,10 @@ my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name, /* The class defining a handle to an Innodb table */ class ha_innobase: public handler { - void* innobase_prebuilt; /* (row_prebuilt_t*) prebuilt - struct in Innodb, used to save - CPU */ + void* innobase_prebuilt;/* (row_prebuilt_t*) prebuilt + struct in InnoDB, used to save + CPU time with prebuilt data + structures*/ THD* user_thd; /* the thread handle of the user currently using the handle; this is set in external_lock function */ @@ -83,12 +84,12 @@ class ha_innobase: public handler public: ha_innobase(TABLE *table): handler(table), int_table_flags(HA_REC_NOT_IN_SEQ | - HA_NULL_IN_KEY | HA_FAST_KEY_READ | + HA_NULL_IN_KEY | + HA_FAST_KEY_READ | HA_CAN_INDEX_BLOBS | HA_CAN_SQL_HANDLER | HA_NOT_EXACT_COUNT | HA_PRIMARY_KEY_IN_READ_INDEX | - HA_NO_VARCHAR | HA_TABLE_SCAN_ON_INDEX), last_dup_key((uint) -1), start_of_scan(0), @@ -108,7 +109,10 @@ class ha_innobase: public handler ulong table_flags() const { return int_table_flags; } ulong index_flags(uint idx, uint part, bool all_parts) const { - return (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_READ_RANGE | + return (HA_READ_NEXT | + HA_READ_PREV | + HA_READ_ORDER | + HA_READ_RANGE | HA_KEYREAD_ONLY); } uint max_supported_keys() const { return MAX_KEY; } @@ -163,7 +167,8 @@ class ha_innobase: public handler int start_stmt(THD *thd); void position(byte *record); - ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); + ha_rows records_in_range(uint inx, key_range *min_key, key_range + *max_key); ha_rows estimate_rows_upper_bound(); int create(const char *name, register TABLE *form,