From b8ab82a6294228c65f1ac2a3b3efa82a5abc1dc8 Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 1 Oct 2006 16:36:26 +0500 Subject: [PATCH 01/57] bug #21790 (UNKNOWN ERROR message in geometry) We issued UNKNOWN ERROR initially in this place and forgot to fix it when we implemented informative error message for this mysql-test/r/gis-rtree.result: test result mysql-test/t/gis-rtree.test: test case sql/handler.cc: let's issue informative error message here --- mysql-test/r/gis-rtree.result | 8 ++++++++ mysql-test/t/gis-rtree.test | 12 ++++++++++++ sql/handler.cc | 4 ++-- 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/gis-rtree.result b/mysql-test/r/gis-rtree.result index b283d64395d..f872838f6f3 100644 --- a/mysql-test/r/gis-rtree.result +++ b/mysql-test/r/gis-rtree.result @@ -862,3 +862,11 @@ CHECK TABLE t1 EXTENDED; Table Op Msg_type Msg_text test.t1 check status OK DROP TABLE t1; +CREATE TABLE t1(foo GEOMETRY NOT NULL, SPATIAL INDEX(foo) ); +INSERT INTO t1(foo) VALUES (NULL); +ERROR 23000: Column 'foo' cannot be null +INSERT INTO t1() VALUES (); +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +INSERT INTO t1(foo) VALUES (''); +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +DROP TABLE t1; diff --git a/mysql-test/t/gis-rtree.test b/mysql-test/t/gis-rtree.test index 163f2806ad2..091811b05b3 100644 --- a/mysql-test/t/gis-rtree.test +++ b/mysql-test/t/gis-rtree.test @@ -232,3 +232,15 @@ CHECK TABLE t1 EXTENDED; DROP TABLE t1; # End of 4.1 tests + +# +# bug #21790 (UNKNOWN ERROR on NULLs in RTree) +# +CREATE TABLE t1(foo GEOMETRY NOT NULL, SPATIAL INDEX(foo) ); +--error 1048 +INSERT INTO t1(foo) VALUES (NULL); +--error 1416 +INSERT INTO t1() VALUES (); +--error 1416 +INSERT INTO t1(foo) VALUES (''); +DROP TABLE t1; diff --git a/sql/handler.cc b/sql/handler.cc index 4accc746664..3516b55feb5 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -1788,8 +1788,8 @@ void handler::print_error(int error, myf errflag) break; } case HA_ERR_NULL_IN_SPATIAL: - textno= ER_UNKNOWN_ERROR; - break; + my_error(ER_CANT_CREATE_GEOMETRY_OBJECT, MYF(0)); + DBUG_VOID_RETURN; case HA_ERR_FOUND_DUPP_UNIQUE: textno=ER_DUP_UNIQUE; break; From e94087c5ddc3f5b61ce69cfd51240d1a16999597 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 16 Oct 2006 19:57:33 +0300 Subject: [PATCH 02/57] Changed several char* to LEX_STRING*. BUILD/SETUP.sh: Added check for CCACHE_DISABLE. If set, do not use ccache at all. BUILD/compile-pentium-gcov: Moved CCACHE_DISABLE up before going into SETUP.sh. Added debug_extra_flags to extra_flags. mysql-test/r/create.result: Added tests for incorrect database names. mysql-test/r/ctype_create.result: Added tests for incorrect alter database names. mysql-test/r/events.result: Added tests for incorrect database names. mysql-test/r/grant.result: Output changed to capital letters. mysql-test/t/alter_table.test: Removed extra empty line mysql-test/t/create.test: Added tests for incorrect database names. mysql-test/t/ctype_create.test: Added tests for incorrect name handling mysql-test/t/events.test: Added tests for incorrect database names. sql/item_timefunc.cc: Added dummy case to avoid compiler warning. sql/mysql_priv.h: Changed argument from char pointer to LEX_STRING pointer. sql/mysqld.cc: Added a missing component from struct. sql/sql_class.h: Added function LEX_STRING_make that sets the string and length. sql/sql_db.cc: Changed several char pointers to lex_strings. sql/sql_lex.cc: name is now LEX_STRING sql/sql_lex.h: Changed name to LEX_STRING. sql/sql_parse.cc: Changed several char pointers to lex_strings. db_length needed a trick, because in old client protocol there was an extra char zero added to the string. check_db_name() now takes LEX_STRING pointer as an argument. Changed remove_escape() to take LEX_STRING pointer as an argument. Removed COM_CREATE_DB and COM_DROP_DB. These are obsolete. sql/sql_table.cc: char* -> LEX_STRING* sql/sql_yacc.yy: Changed char* -> LEX_STRING* sql/table.cc: check_db_name() now takes LEX_STRING* as argument instead of char*. Optimized code a bit. tests/mysql_client_test.c: Added test for (short) status. After defining out (ifdef) COM_DROP_DB and COM_CREATE_DB in mysqld.cc mysql_client_test needed to be informed that failing in recognizing these commands is not fatal error anymore. --- BUILD/SETUP.sh | 2 +- BUILD/compile-pentium-gcov | 19 +- mysql-test/r/create.result | 16 ++ mysql-test/r/ctype_create.result | 4 + mysql-test/r/events.result | 6 + mysql-test/r/grant.result | 4 +- mysql-test/t/alter_table.test | 1 - mysql-test/t/create.test | 21 ++ mysql-test/t/ctype_create.test | 5 + mysql-test/t/events.test | 11 + sql/item_timefunc.cc | 5 + sql/mysql_priv.h | 2 +- sql/mysqld.cc | 3 +- sql/sql_class.h | 9 +- sql/sql_db.cc | 46 ++-- sql/sql_lex.cc | 3 +- sql/sql_lex.h | 3 +- sql/sql_parse.cc | 356 +++++++++++++++++++------------ sql/sql_table.cc | 2 +- sql/sql_yacc.yy | 38 ++-- sql/table.cc | 42 ++-- tests/mysql_client_test.c | 34 ++- 22 files changed, 411 insertions(+), 221 deletions(-) diff --git a/BUILD/SETUP.sh b/BUILD/SETUP.sh index 02d160158b3..ced9c7ec2df 100755 --- a/BUILD/SETUP.sh +++ b/BUILD/SETUP.sh @@ -183,7 +183,7 @@ fi # (http://samba.org/ccache) is installed, use it. # We use 'grep' and hope 'grep' will work as expected # (returns 0 if finds lines) -if ccache -V > /dev/null 2>&1 +if ccache -V > /dev/null 2>&1 && test "$CCACHE_GCOV_VERSION_ENABLED" == "1" then if ! (echo "$CC" | grep "ccache" > /dev/null) then diff --git a/BUILD/compile-pentium-gcov b/BUILD/compile-pentium-gcov index ca37f78e283..5633efaddf0 100755 --- a/BUILD/compile-pentium-gcov +++ b/BUILD/compile-pentium-gcov @@ -1,12 +1,21 @@ #! /bin/sh +# Need to disable ccache, or we loose the gcov-needed compiler output files. + +CCACHE_GCOV_VERSION_ENABLED=0 +if ccache -V > /dev/null 2>&1 +then + CCACHE_VER=`ccache -V | head -1 | sed s/"ccache version "//` + if test "$CCACHE_VER" == "2.4-gcov" + then + CCACHE_GCOV_VERSION_ENABLED=1 + fi +fi +export CCACHE_GCOV_VERSION_ENABLED + path=`dirname $0` . "$path/SETUP.sh" -# Need to disable ccache, or we loose the gcov-needed compiler output files. -CCACHE_DISABLE=1 -export CCACHE_DISABLE - # GCC4 needs -fprofile-arcs -ftest-coverage on the linker command line (as well # as on the compiler command line), and this requires setting LDFLAGS for BDB. export LDFLAGS="-fprofile-arcs -ftest-coverage" @@ -14,7 +23,7 @@ export LDFLAGS="-fprofile-arcs -ftest-coverage" # The -fprofile-arcs and -ftest-coverage options cause GCC to instrument the # code with profiling information used by gcov. # the -DDISABLE_TAO_ASM is needed to avoid build failures in Yassl. -extra_flags="$pentium_cflags -fprofile-arcs -ftest-coverage -DDISABLE_TAO_ASM -DHAVE_MUTEX_THREAD_ONLY" +extra_flags="$pentium_cflags -fprofile-arcs -ftest-coverage -DDISABLE_TAO_ASM -DHAVE_MUTEX_THREAD_ONLY $debug_extra_flags" extra_configs="$pentium_configs $debug_configs --disable-shared $static_link" extra_configs="$extra_configs $max_configs" diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result index 9ecaaa66cc3..2ed6d561b96 100644 --- a/mysql-test/r/create.result +++ b/mysql-test/r/create.result @@ -818,3 +818,19 @@ SELECT * from t2; a b 1 1 drop table t1,t2; +CREATE DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; +ERROR 42000: Incorrect database name 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' +DROP DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; +ERROR 42000: Incorrect database name 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' +RENAME DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa TO a; +ERROR 42000: Unknown database 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' +RENAME DATABASE mysqltest TO aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; +ERROR 42000: Incorrect database name 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' +create database mysqltest; +RENAME DATABASE mysqltest TO aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; +ERROR 42000: Incorrect database name 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' +drop database mysqltest; +USE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; +ERROR 42000: Incorrect database name 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' +SHOW CREATE DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; +ERROR 42000: Incorrect database name 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' diff --git a/mysql-test/r/ctype_create.result b/mysql-test/r/ctype_create.result index 8a81991ea78..35461fce45a 100644 --- a/mysql-test/r/ctype_create.result +++ b/mysql-test/r/ctype_create.result @@ -72,3 +72,7 @@ mysqltest2 CREATE DATABASE `mysqltest2` /*!40100 DEFAULT CHARACTER SET latin2 */ drop database mysqltest2; ALTER DATABASE DEFAULT CHARACTER SET latin2; ERROR 3D000: No database selected +ALTER DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa DEFAULT CHARACTER SET latin2; +ERROR 42000: Incorrect database name 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' +ALTER DATABASE `` DEFAULT CHARACTER SET latin2; +ERROR 42000: Incorrect database name '' diff --git a/mysql-test/r/events.result b/mysql-test/r/events.result index e115e077535..4fae7651311 100644 --- a/mysql-test/r/events.result +++ b/mysql-test/r/events.result @@ -386,4 +386,10 @@ create trigger t1_ai after insert on t1 for each row show create event e1; ERROR 0A000: Not allowed to return a result set from a trigger drop table t1; drop event e1; +SHOW EVENTS FROM aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; +ERROR 42000: Incorrect database name 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' +SHOW EVENTS FROM ``; +ERROR 42000: Incorrect database name '' +SHOW EVENTS FROM `events\\test`; +Db Name Definer Type Execute at Interval value Interval field Starts Ends Status drop database events_test; diff --git a/mysql-test/r/grant.result b/mysql-test/r/grant.result index b60d238c951..3ac48e4a01f 100644 --- a/mysql-test/r/grant.result +++ b/mysql-test/r/grant.result @@ -911,7 +911,7 @@ ERROR 42000: SELECT command denied to user 'mysqltest_1'@'localhost' for table ' SHOW CREATE TABLE mysqltest2.t_nn; Table Create Table t_nn CREATE TABLE `t_nn` ( - `c1` int(11) default NULL + `c1` int(11) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 SHOW CREATE VIEW mysqltest2.t_nn; ERROR HY000: 'mysqltest2.t_nn' is not VIEW @@ -930,7 +930,7 @@ v_nn CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER SHOW CREATE TABLE mysqltest2.t_nn; Table Create Table t_nn CREATE TABLE `t_nn` ( - `c1` int(11) default NULL + `c1` int(11) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 SHOW CREATE VIEW mysqltest2.t_nn; ERROR HY000: 'mysqltest2.t_nn' is not VIEW diff --git a/mysql-test/t/alter_table.test b/mysql-test/t/alter_table.test index 168d011a2ac..78bbd23adf1 100644 --- a/mysql-test/t/alter_table.test +++ b/mysql-test/t/alter_table.test @@ -535,4 +535,3 @@ INSERT INTO `@0023sql1` VALUES (2); SHOW CREATE TABLE `#sql2`; SHOW CREATE TABLE `@0023sql1`; DROP TABLE `#sql2`, `@0023sql1`; - diff --git a/mysql-test/t/create.test b/mysql-test/t/create.test index 140cdccc218..442120fae4a 100644 --- a/mysql-test/t/create.test +++ b/mysql-test/t/create.test @@ -706,3 +706,24 @@ TRUNCATE table t2; INSERT INTO t2 select * from t1; SELECT * from t2; drop table t1,t2; + +# +# Test incorrect database names +# + +--error 1102 +CREATE DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; +--error 1102 +DROP DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; +--error 1049 +RENAME DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa TO a; +--error 1102 +RENAME DATABASE mysqltest TO aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; +create database mysqltest; +--error 1102 +RENAME DATABASE mysqltest TO aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; +drop database mysqltest; +--error 1102 +USE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; +--error 1102 +SHOW CREATE DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; diff --git a/mysql-test/t/ctype_create.test b/mysql-test/t/ctype_create.test index e88004bbb8c..060c09a0459 100644 --- a/mysql-test/t/ctype_create.test +++ b/mysql-test/t/ctype_create.test @@ -100,3 +100,8 @@ drop database mysqltest2; ALTER DATABASE DEFAULT CHARACTER SET latin2; # End of 4.1 tests + +--error 1102 +ALTER DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa DEFAULT CHARACTER SET latin2; +--error 1102 +ALTER DATABASE `` DEFAULT CHARACTER SET latin2; diff --git a/mysql-test/t/events.test b/mysql-test/t/events.test index aac13a55dd3..b98f0f54130 100644 --- a/mysql-test/t/events.test +++ b/mysql-test/t/events.test @@ -378,4 +378,15 @@ drop event e1; ##show processlist; ##select count(*) from mysql.event; +# +# Test wrong syntax +# + +--error 1102 +SHOW EVENTS FROM aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; +--error 1102 +SHOW EVENTS FROM ``; + +SHOW EVENTS FROM `events\\test`; + drop database events_test; diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index f603cafd89d..c4f0fed88f7 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -1249,6 +1249,9 @@ bool get_interval_value(Item *args,interval_type int_type, interval->second= array[0]; interval->second_part= array[1]; break; + case INTERVAL_LAST: /* purecov: begin deadcode */ + DBUG_ASSERT(0); + break; /* purecov: end */ } return 0; } @@ -2088,6 +2091,7 @@ void Item_extract::fix_length_and_dec() case INTERVAL_HOUR_MICROSECOND: max_length=13; date_value=0; break; case INTERVAL_MINUTE_MICROSECOND: max_length=11; date_value=0; break; case INTERVAL_SECOND_MICROSECOND: max_length=9; date_value=0; break; + case INTERVAL_LAST: DBUG_ASSERT(0); break; /* purecov: deadcode */ } } @@ -2157,6 +2161,7 @@ longlong Item_extract::val_int() ltime.second_part)*neg; case INTERVAL_SECOND_MICROSECOND: return ((longlong)ltime.second*1000000L+ ltime.second_part)*neg; + case INTERVAL_LAST: DBUG_ASSERT(0); break; /* purecov: deadcode */ } return 0; // Impossible } diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 7de6f6d04e0..466a37c7d77 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1828,7 +1828,7 @@ int create_frm(THD *thd, const char *name, const char *db, const char *table, HA_CREATE_INFO *create_info, uint keys); void update_create_info_from_table(HA_CREATE_INFO *info, TABLE *form); int rename_file_ext(const char * from,const char * to,const char * ext); -bool check_db_name(char *db); +bool check_db_name(LEX_STRING *db); bool check_column_name(const char *name); bool check_table_name(const char *name, uint length); char *get_field(MEM_ROOT *mem, Field *field); diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 419ea27b941..1f95939b895 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -5310,7 +5310,8 @@ master-ssl", (gptr*) &locked_in_memory, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"merge", OPT_MERGE, "Enable Merge storage engine. Disable with \ --skip-merge.", - (gptr*) &opt_merge, (gptr*) &opt_merge, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0}, + (gptr*) &opt_merge, (gptr*) &opt_merge, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, + 0}, {"myisam-recover", OPT_MYISAM_RECOVER, "Syntax: myisam-recover[=option[,option...]], where option can be DEFAULT, BACKUP, FORCE or QUICK.", (gptr*) &myisam_recover_options_str, (gptr*) &myisam_recover_options_str, 0, diff --git a/sql/sql_class.h b/sql/sql_class.h index 6b46c9676f7..e577774a16f 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -420,6 +420,12 @@ public: { return strdup_root(mem_root,str); } inline char *strmake(const char *str, uint size) { return strmake_root(mem_root,str,size); } + inline bool LEX_STRING_make(LEX_STRING *lex_str, const char *str, uint size) + { + return ((lex_str->str= + strmake_root(mem_root, str, (lex_str->length= size)))) == 0; + } + inline char *memdup(const char *str, uint size) { return memdup_root(mem_root,str,size); } inline char *memdup_w_gap(const char *str, uint size, uint gap) @@ -1617,8 +1623,7 @@ public: return TRUE; } *p_db= strmake(db, db_length); - if (p_db_length) - *p_db_length= db_length; + *p_db_length= db_length; return FALSE; } }; diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 372a350566f..ac3ab0196bb 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -1297,8 +1297,8 @@ err: bool mysql_change_db(THD *thd, const char *name, bool no_access_check) { - int path_length, db_length; - char *db_name; + int path_length; + LEX_STRING db_name; bool system_db= 0; #ifndef NO_EMBEDDED_ACCESS_CHECKS ulong db_access; @@ -1318,25 +1318,26 @@ bool mysql_change_db(THD *thd, const char *name, bool no_access_check) /* Called from SP to restore the original database, which was NULL */ DBUG_ASSERT(no_access_check); system_db= 1; - db_name= NULL; - db_length= 0; + db_name.str= NULL; + db_name.length= 0; goto end; } /* Now we need to make a copy because check_db_name requires a non-constant argument. TODO: fix check_db_name. */ - if ((db_name= my_strdup(name, MYF(MY_WME))) == NULL) + if ((db_name.str= my_strdup(name, MYF(MY_WME))) == NULL) DBUG_RETURN(1); /* the error is set */ - db_length= strlen(db_name); - if (check_db_name(db_name)) + db_name.length= strlen(db_name.str); + if (check_db_name(&db_name)) { - my_error(ER_WRONG_DB_NAME, MYF(0), db_name); - my_free(db_name, MYF(0)); + my_error(ER_WRONG_DB_NAME, MYF(0), db_name.str); + my_free(db_name.str, MYF(0)); DBUG_RETURN(1); } - DBUG_PRINT("info",("Use database: %s", db_name)); - if (!my_strcasecmp(system_charset_info, db_name, information_schema_name.str)) + DBUG_PRINT("info",("Use database: %s", db_name.str)); + if (!my_strcasecmp(system_charset_info, db_name.str, + information_schema_name.str)) { system_db= 1; #ifndef NO_EMBEDDED_ACCESS_CHECKS @@ -1351,34 +1352,35 @@ bool mysql_change_db(THD *thd, const char *name, bool no_access_check) if (test_all_bits(sctx->master_access, DB_ACLS)) db_access=DB_ACLS; else - db_access= (acl_get(sctx->host, sctx->ip, sctx->priv_user, db_name, 0) | + db_access= (acl_get(sctx->host, sctx->ip, sctx->priv_user, + db_name.str, 0) | sctx->master_access); if (!(db_access & DB_ACLS) && (!grant_option || - check_grant_db(thd,db_name))) + check_grant_db(thd, db_name.str))) { my_error(ER_DBACCESS_DENIED_ERROR, MYF(0), sctx->priv_user, sctx->priv_host, - db_name); + db_name.str); general_log_print(thd, COM_INIT_DB, ER(ER_DBACCESS_DENIED_ERROR), - sctx->priv_user, sctx->priv_host, db_name); - my_free(db_name,MYF(0)); + sctx->priv_user, sctx->priv_host, db_name.str); + my_free(db_name.str, MYF(0)); DBUG_RETURN(1); } } #endif - if (check_db_dir_existence(db_name)) + if (check_db_dir_existence(db_name.str)) { - my_error(ER_BAD_DB_ERROR, MYF(0), db_name); - my_free(db_name, MYF(0)); + my_error(ER_BAD_DB_ERROR, MYF(0), db_name.str); + my_free(db_name.str, MYF(0)); DBUG_RETURN(1); } end: x_free(thd->db); - DBUG_ASSERT(db_name == NULL || db_name[0] != '\0'); - thd->reset_db(db_name, db_length); // THD::~THD will free this + DBUG_ASSERT(db_name.str == NULL || db_name.str[0] != '\0'); + thd->reset_db(db_name.str, db_name.length); // THD::~THD will free this #ifndef NO_EMBEDDED_ACCESS_CHECKS if (!no_access_check) sctx->db_access= db_access; @@ -1392,7 +1394,7 @@ end: { HA_CREATE_INFO create; - load_db_opt_by_name(thd, db_name, &create); + load_db_opt_by_name(thd, db_name.str, &create); thd->db_charset= create.default_table_charset ? create.default_table_charset : diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index c5064df931b..99b99c1407c 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -175,7 +175,8 @@ void lex_start(THD *thd, const uchar *buf, uint length) lex->escape_used= lex->et_compile_phase= FALSE; lex->reset_query_tables_list(FALSE); - lex->name= 0; + lex->name.str= 0; + lex->name.length= 0; lex->et= NULL; lex->nest_level=0 ; diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 9f6df9861e2..eae8bd1b81d 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -879,7 +879,8 @@ typedef struct st_lex : public Query_tables_list /* The values of tok_start/tok_end as they were one call of MYSQLlex before */ const uchar *tok_start_prev, *tok_end_prev; - char *length,*dec,*change,*name; + char *length,*dec,*change; + LEX_STRING name; Table_ident *like_name; char *help_arg; char *backup_dir; /* For RESTORE/BACKUP */ diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 9217c147143..4be8c656bb5 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -68,6 +68,7 @@ static void decrease_user_connections(USER_CONN *uc); #endif /* NO_EMBEDDED_ACCESS_CHECKS */ static bool check_multi_update_lock(THD *thd); static void remove_escape(char *name); +static void remove_escape(LEX_STRING *str); static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables); const char *any_db="*any*"; // Special symbol for check_access @@ -1056,11 +1057,14 @@ static int check_connection(THD *thd) Old clients send null-terminated string as password; new clients send the size (1 byte) + string (not null-terminated). Hence in case of empty password both send '\0'. + + This strlen() can't be easily deleted without changing protocol. */ uint passwd_len= thd->client_capabilities & CLIENT_SECURE_CONNECTION ? *passwd++ : strlen(passwd); db= thd->client_capabilities & CLIENT_CONNECT_WITH_DB ? db + passwd_len + 1 : 0; + /* strlen() can't be easily deleted without changing protocol */ uint db_len= db ? strlen(db) : 0; if (passwd + passwd_len + db_len > (char *)net->read_pos + pkt_len) @@ -1315,28 +1319,31 @@ pthread_handler_t handle_bootstrap(void *arg) thd->init_for_queries(); while (fgets(buff, thd->net.max_packet, file)) { - ulong length= (ulong) strlen(buff); - while (buff[length-1] != '\n' && !feof(file)) - { - /* - We got only a part of the current string. Will try to increase - net buffer then read the rest of the current string. - */ - if (net_realloc(&(thd->net), 2 * thd->net.max_packet)) - { - net_send_error(thd, ER_NET_PACKET_TOO_LARGE, NullS); - thd->fatal_error(); - break; - } - buff= (char*) thd->net.buff; - fgets(buff + length, thd->net.max_packet - length, file); - length+= (ulong) strlen(buff + length); - } - if (thd->is_fatal_error) - break; + /* strlen() can't be deleted because fgets() doesn't return length */ + ulong length= (ulong) strlen(buff); + while (buff[length-1] != '\n' && !feof(file)) + { + /* + We got only a part of the current string. Will try to increase + net buffer then read the rest of the current string. + */ + /* purecov: begin tested */ + if (net_realloc(&(thd->net), 2 * thd->net.max_packet)) + { + net_send_error(thd, ER_NET_PACKET_TOO_LARGE, NullS); + thd->fatal_error(); + break; + } + buff= (char*) thd->net.buff; + fgets(buff + length, thd->net.max_packet - length, file); + length+= (ulong) strlen(buff + length); + /* purecov: end */ + } + if (thd->is_fatal_error) + break; /* purecov: inspected */ while (length && (my_isspace(thd->charset(), buff[length-1]) || - buff[length-1] == ';')) + buff[length-1] == ';')) length--; buff[length]=0; thd->query_length=length; @@ -1421,24 +1428,30 @@ void cleanup_items(Item *item) */ static -int mysql_table_dump(THD* thd, char* db, char* tbl_name) +int mysql_table_dump(THD *thd, LEX_STRING *db, char *tbl_name) { TABLE* table; TABLE_LIST* table_list; int error = 0; DBUG_ENTER("mysql_table_dump"); - db = (db && db[0]) ? db : thd->db; + if (db->length == 0) + { + db->str= thd->db; /* purecov: inspected */ + db->length= thd->db_length; /* purecov: inspected */ + } if (!(table_list = (TABLE_LIST*) thd->calloc(sizeof(TABLE_LIST)))) DBUG_RETURN(1); // out of memory - table_list->db= db; + table_list->db= db->str; table_list->table_name= table_list->alias= tbl_name; table_list->lock_type= TL_READ_NO_INSERT; table_list->prev_global= &table_list; // can be removed after merge with 4.1 - if (!db || check_db_name(db)) + if (check_db_name(db)) { - my_error(ER_WRONG_DB_NAME ,MYF(0), db ? db : "NULL"); + /* purecov: begin inspected */ + my_error(ER_WRONG_DB_NAME ,MYF(0), db->str ? db->str : "NULL"); goto err; + /* purecov: end */ } if (lower_case_table_names) my_casedn_str(files_charset_info, tbl_name); @@ -1668,7 +1681,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, statistic_increment(thd->status_var.com_stat[SQLCOM_CHANGE_DB], &LOCK_status); thd->convert_string(&tmp, system_charset_info, - packet, strlen(packet), thd->charset()); + packet, packet_length-1, thd->charset()); if (!mysql_change_db(thd, tmp.str, FALSE)) { general_log_print(thd, command, "%s",thd->db); @@ -1686,7 +1699,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd, #endif case COM_TABLE_DUMP: { - char *db, *tbl_name; + char *tbl_name; + LEX_STRING db; uint db_len= *(uchar*) packet; if (db_len >= packet_length || db_len > NAME_LEN) { @@ -1702,25 +1716,27 @@ bool dispatch_command(enum enum_server_command command, THD *thd, statistic_increment(thd->status_var.com_other, &LOCK_status); thd->enable_slow_log= opt_log_slow_admin_statements; - db= thd->alloc(db_len + tbl_len + 2); - if (!db) + db.str= thd->alloc(db_len + tbl_len + 2); + db.length= db_len; + if (!db.str) { my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); break; } - tbl_name= strmake(db, packet + 1, db_len)+1; + tbl_name= strmake(db.str, packet + 1, db_len)+1; strmake(tbl_name, packet + db_len + 2, tbl_len); - mysql_table_dump(thd, db, tbl_name); + mysql_table_dump(thd, &db, tbl_name); break; } case COM_CHANGE_USER: { + statistic_increment(thd->status_var.com_other, &LOCK_status); + char *user= (char*) packet, *packet_end= packet+ packet_length; + char *passwd= strend(user)+1; + thd->change_user(); thd->clear_error(); // if errors from rollback - statistic_increment(thd->status_var.com_other, &LOCK_status); - char *user= (char*) packet; - char *passwd= strend(user)+1; /* Old clients send null-terminated string ('\0' for empty string) for password. New clients send the size (1 byte) + string (not null @@ -1728,8 +1744,13 @@ bool dispatch_command(enum enum_server_command command, THD *thd, */ char db_buff[NAME_LEN+1]; // buffer to store db in utf8 char *db= passwd; - uint passwd_len= thd->client_capabilities & CLIENT_SECURE_CONNECTION ? - *passwd++ : strlen(passwd); + char *save_db; + uint passwd_len= (thd->client_capabilities & CLIENT_SECURE_CONNECTION ? + *passwd++ : strlen(passwd)); + uint dummy_errors, save_db_length, db_length, res; + Security_context save_security_ctx= *thd->security_ctx; + USER_CONN *save_user_connect; + db+= passwd_len + 1; #ifndef EMBEDDED_LIBRARY /* Small check for incoming packet */ @@ -1740,17 +1761,22 @@ bool dispatch_command(enum enum_server_command command, THD *thd, } #endif /* Convert database name to utf8 */ - uint dummy_errors; + /* + Handle problem with old bug in client protocol where db had an extra + \0 + */ + db_length= (packet_end - db); + if (db_length > 0 && db[db_length-1] == 0) + db_length--; db_buff[copy_and_convert(db_buff, sizeof(db_buff)-1, - system_charset_info, db, strlen(db), + system_charset_info, db, db_length, thd->charset(), &dummy_errors)]= 0; db= db_buff; /* Save user and privileges */ - uint save_db_length= thd->db_length; - char *save_db= thd->db; - Security_context save_security_ctx= *thd->security_ctx; - USER_CONN *save_user_connect= thd->user_connect; + save_db_length= thd->db_length; + save_db= thd->db; + save_user_connect= thd->user_connect; if (!(thd->security_ctx->user= my_strdup(user, MYF(0)))) { @@ -1761,7 +1787,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, /* Clear variables that are allocated */ thd->user_connect= 0; - int res= check_user(thd, COM_CHANGE_USER, passwd, passwd_len, db, FALSE); + res= check_user(thd, COM_CHANGE_USER, passwd, passwd_len, db, FALSE); if (res) { @@ -1871,29 +1897,31 @@ bool dispatch_command(enum enum_server_command command, THD *thd, break; #else { - char *fields, *pend; + char *fields, *packet_end= packet + packet_length - 1, *arg_end; /* Locked closure of all tables */ TABLE_LIST *locked_tables= NULL; TABLE_LIST table_list; LEX_STRING conv_name; /* Saved variable value */ my_bool old_innodb_table_locks= thd->variables.innodb_table_locks; - + uint dummy; /* used as fields initializator */ lex_start(thd, 0, 0); - statistic_increment(thd->status_var.com_stat[SQLCOM_SHOW_FIELDS], &LOCK_status); bzero((char*) &table_list,sizeof(table_list)); - if (thd->copy_db_to(&table_list.db, 0)) + if (thd->copy_db_to(&table_list.db, &dummy)) break; - pend= strend(packet); + /* + We have name + wildcard in packet, separated by endzero + */ + arg_end= strend(packet); thd->convert_string(&conv_name, system_charset_info, - packet, (uint) (pend-packet), thd->charset()); + packet, (uint) (arg_end - packet), thd->charset()); table_list.alias= table_list.table_name= conv_name.str; - packet= pend+1; + packet= arg_end + 1; if (!my_strcasecmp(system_charset_info, table_list.db, information_schema_name.str)) @@ -1903,7 +1931,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, table_list.schema_table= schema_table; } - thd->query_length= strlen(packet); // for simplicity: don't optimize + thd->query_length= (uint) (packet_end - packet); // Don't count end \0 if (!(thd->query=fields=thd->memdup(packet,thd->query_length+1))) break; general_log_print(thd, command, "%s %s", table_list.table_name, fields); @@ -1940,24 +1968,27 @@ bool dispatch_command(enum enum_server_command command, THD *thd, error=TRUE; // End server break; +#ifdef REMOVED case COM_CREATE_DB: // QQ: To be removed { - char *db=thd->strdup(packet), *alias; + LEX_STRING db, alias; HA_CREATE_INFO create_info; statistic_increment(thd->status_var.com_stat[SQLCOM_CREATE_DB], &LOCK_status); - // null test to handle EOM - if (!db || !(alias= thd->strdup(db)) || check_db_name(db)) + if (thd->LEX_STRING_make(&db, packet, packet_length -1) || + thd->LEX_STRING_make(&alias, db.str, db.length) || + check_db_name(&db)) { - my_error(ER_WRONG_DB_NAME, MYF(0), db ? db : "NULL"); + my_error(ER_WRONG_DB_NAME, MYF(0), db.str ? db.str : "NULL"); break; } - if (check_access(thd,CREATE_ACL,db,0,1,0,is_schema_db(db))) + if (check_access(thd, CREATE_ACL, db.str , 0, 1, 0, + is_schema_db(db.str))) break; general_log_print(thd, command, packet); bzero(&create_info, sizeof(create_info)); - mysql_create_db(thd, (lower_case_table_names == 2 ? alias : db), + mysql_create_db(thd, (lower_case_table_names == 2 ? alias.str : db.str), &create_info, 0); break; } @@ -1965,14 +1996,15 @@ bool dispatch_command(enum enum_server_command command, THD *thd, { statistic_increment(thd->status_var.com_stat[SQLCOM_DROP_DB], &LOCK_status); - char *db=thd->strdup(packet); - /* null test to handle EOM */ - if (!db || check_db_name(db)) + LEX_STRING db; + + if (thd->LEX_STRING_make(&db, packet, packet_length - 1) || + check_db_name(&db)) { - my_error(ER_WRONG_DB_NAME, MYF(0), db ? db : "NULL"); + my_error(ER_WRONG_DB_NAME, MYF(0), db.str ? db.str : "NULL"); break; } - if (check_access(thd,DROP_ACL,db,0,1,0,is_schema_db(db))) + if (check_access(thd, DROP_ACL, db.str, 0, 1, 0, is_schema_db(db.str))) break; if (thd->locked_tables || thd->active_transaction()) { @@ -1980,10 +2012,11 @@ bool dispatch_command(enum enum_server_command command, THD *thd, ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0)); break; } - general_log_print(thd, command, db); - mysql_rm_db(thd, db, 0, 0); + general_log_print(thd, command, db.str); + mysql_rm_db(thd, db.str, 0, 0); break; } +#endif #ifndef EMBEDDED_LIBRARY case COM_BINLOG_DUMP: { @@ -2065,37 +2098,47 @@ bool dispatch_command(enum enum_server_command command, THD *thd, #endif case COM_STATISTICS: { + STATUS_VAR current_global_status_var; + ulong uptime; + uint length; +#ifndef EMBEDDED_LIBRARY + char buff[250]; + uint buff_len= sizeof(buff); +#else + char *buff= thd->net.last_error; + uint buff_len= sizeof(thd->net.last_error); +#endif + general_log_print(thd, command, NullS); statistic_increment(thd->status_var.com_stat[SQLCOM_SHOW_STATUS], &LOCK_status); -#ifndef EMBEDDED_LIBRARY - char buff[200]; -#else - char *buff= thd->net.last_error; -#endif - - STATUS_VAR current_global_status_var; calc_sum_of_all_status(¤t_global_status_var); - - ulong uptime = (ulong) (thd->start_time - start_time); - sprintf((char*) buff, - "Uptime: %lu Threads: %d Questions: %lu Slow queries: %lu Opens: %lu Flush tables: %lu Open tables: %u Queries per second avg: %.3f", - uptime, - (int) thread_count, (ulong) thd->query_id, - current_global_status_var.long_query_count, - current_global_status_var.opened_tables, refresh_version, - cached_open_tables(), - (uptime ? (ulonglong2double(thd->query_id) / (double) uptime) : - (double) 0)); + uptime= (ulong) (thd->start_time - start_time); + length= my_snprintf((char*) buff, buff_len - 1, + "Uptime: %lu Threads: %d Questions: %lu " + "Slow queries: %lu Opens: %lu Flush tables: %lu " + "Open tables: %u Queries per second avg: %.3f", + uptime, + (int) thread_count, (ulong) thd->query_id, + current_global_status_var.long_query_count, + current_global_status_var.opened_tables, + refresh_version, + cached_open_tables(), + (uptime ? (ulonglong2double(thd->query_id) / + (double) uptime) : (double) 0)); #ifdef SAFEMALLOC if (sf_malloc_cur_memory) // Using SAFEMALLOC - sprintf(strend(buff), " Memory in use: %ldK Max memory used: %ldK", - (sf_malloc_cur_memory+1023L)/1024L, - (sf_malloc_max_memory+1023L)/1024L); + { + char *end= buff + length; + length+= my_snprintf(end, buff_len - length - 1, + end," Memory in use: %ldK Max memory used: %ldK", + (sf_malloc_cur_memory+1023L)/1024L, + (sf_malloc_max_memory+1023L)/1024L); + } #endif #ifndef EMBEDDED_LIBRARY - VOID(my_net_write(net, buff,(uint) strlen(buff))); - VOID(net_flush(net)); + VOID(my_net_write(net, buff, length)); + VOID(net_flush(net)); #endif break; } @@ -2292,27 +2335,29 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, DBUG_RETURN(1); #else { - char *db; + LEX_STRING db; + uint dummy; if (lex->select_lex.db == NULL && - thd->copy_db_to(&lex->select_lex.db, 0)) + thd->copy_db_to(&lex->select_lex.db, &dummy)) { DBUG_RETURN(1); } - db= lex->select_lex.db; - remove_escape(db); // Fix escaped '_' - if (check_db_name(db)) + db.str= lex->select_lex.db; + db.length= strlen(db.str); + remove_escape(&db); // Fix escaped '_' + if (check_db_name(&db)) { - my_error(ER_WRONG_DB_NAME, MYF(0), db); + my_error(ER_WRONG_DB_NAME, MYF(0), db.str); DBUG_RETURN(1); } - if (check_access(thd, SELECT_ACL, db, &thd->col_access, 0, 0, - is_schema_db(db))) + if (check_access(thd, SELECT_ACL, db.str, &thd->col_access, 0, 0, + is_schema_db(db.str))) DBUG_RETURN(1); /* purecov: inspected */ - if (!thd->col_access && check_grant_db(thd,db)) + if (!thd->col_access && check_grant_db(thd, db.str)) { my_error(ER_DBACCESS_DENIED_ERROR, MYF(0), thd->security_ctx->priv_user, thd->security_ctx->priv_host, - db); + db.str); DBUG_RETURN(1); } break; @@ -2854,11 +2899,6 @@ mysql_execute_command(THD *thd) if (check_grant(thd, CREATE_ACL, all_tables, 0, 1, 0)) goto error; } - if (strlen(first_table->table_name) > NAME_LEN) - { - my_error(ER_WRONG_TABLE_NAME, MYF(0), first_table->table_name); - break; - } pthread_mutex_lock(&LOCK_active_mi); /* fetch_master_table will send the error to the client on failure. @@ -3093,11 +3133,6 @@ end_with_restore_list: if (lex->alter_info.flags & ALTER_DROP_PARTITION) priv_needed|= DROP_ACL; - if (lex->name && (!lex->name[0] || strlen(lex->name) > NAME_LEN)) - { - my_error(ER_WRONG_TABLE_NAME, MYF(0), lex->name); - goto error; - } /* Must be set in the parser */ DBUG_ASSERT(select_lex->db); if (check_access(thd, priv_needed, first_table->db, @@ -3113,11 +3148,11 @@ end_with_restore_list: { if (check_grant(thd, priv_needed, all_tables, 0, UINT_MAX, 0)) goto error; - if (lex->name && !test_all_bits(priv,INSERT_ACL | CREATE_ACL)) + if (lex->name.str && !test_all_bits(priv,INSERT_ACL | CREATE_ACL)) { // Rename of table TABLE_LIST tmp_table; bzero((char*) &tmp_table,sizeof(tmp_table)); - tmp_table.table_name=lex->name; + tmp_table.table_name= lex->name.str; tmp_table.db=select_lex->db; tmp_table.grant.privilege=priv; if (check_grant(thd, INSERT_ACL | CREATE_ACL, &tmp_table, 0, @@ -3145,7 +3180,7 @@ end_with_restore_list: } thd->enable_slow_log= opt_log_slow_admin_statements; - res= mysql_alter_table(thd, select_lex->db, lex->name, + res= mysql_alter_table(thd, select_lex->db, lex->name.str, &lex->create_info, first_table, lex->create_list, lex->key_list, @@ -3740,9 +3775,10 @@ end_with_restore_list: break; } char *alias; - if (!(alias=thd->strdup(lex->name)) || check_db_name(lex->name)) + if (!(alias=thd->strmake(lex->name.str, lex->name.length)) || + check_db_name(&lex->name)) { - my_error(ER_WRONG_DB_NAME, MYF(0), lex->name); + my_error(ER_WRONG_DB_NAME, MYF(0), lex->name.str); break; } /* @@ -3754,17 +3790,18 @@ end_with_restore_list: */ #ifdef HAVE_REPLICATION if (thd->slave_thread && - (!rpl_filter->db_ok(lex->name) || - !rpl_filter->db_ok_with_wild_table(lex->name))) + (!rpl_filter->db_ok(lex->name.str) || + !rpl_filter->db_ok_with_wild_table(lex->name.str))) { my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0)); break; } #endif - if (check_access(thd,CREATE_ACL,lex->name,0,1,0,is_schema_db(lex->name))) + if (check_access(thd,CREATE_ACL,lex->name.str, 0, 1, 0, + is_schema_db(lex->name.str))) break; - res= mysql_create_db(thd,(lower_case_table_names == 2 ? alias : lex->name), - &lex->create_info, 0); + res= mysql_create_db(thd,(lower_case_table_names == 2 ? alias : + lex->name.str), &lex->create_info, 0); break; } case SQLCOM_DROP_DB: @@ -3774,9 +3811,9 @@ end_with_restore_list: res= -1; break; } - if (check_db_name(lex->name)) + if (check_db_name(&lex->name)) { - my_error(ER_WRONG_DB_NAME, MYF(0), lex->name); + my_error(ER_WRONG_DB_NAME, MYF(0), lex->name.str); break; } /* @@ -3788,14 +3825,15 @@ end_with_restore_list: */ #ifdef HAVE_REPLICATION if (thd->slave_thread && - (!rpl_filter->db_ok(lex->name) || - !rpl_filter->db_ok_with_wild_table(lex->name))) + (!rpl_filter->db_ok(lex->name.str) || + !rpl_filter->db_ok_with_wild_table(lex->name.str))) { my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0)); break; } #endif - if (check_access(thd,DROP_ACL,lex->name,0,1,0,is_schema_db(lex->name))) + if (check_access(thd,DROP_ACL,lex->name.str,0,1,0, + is_schema_db(lex->name.str))) break; if (thd->locked_tables || thd->active_transaction()) { @@ -3803,7 +3841,7 @@ end_with_restore_list: ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0)); goto error; } - res= mysql_rm_db(thd, lex->name, lex->drop_if_exists, 0); + res= mysql_rm_db(thd, lex->name.str, lex->drop_if_exists, 0); break; } case SQLCOM_RENAME_DB: @@ -3829,6 +3867,11 @@ end_with_restore_list: break; } #endif + if (check_db_name(newdb)) + { + my_error(ER_WRONG_DB_NAME, MYF(0), newdb->str); + break; + } if (check_access(thd,ALTER_ACL,olddb->str,0,1,0,is_schema_db(olddb->str)) || check_access(thd,DROP_ACL,olddb->str,0,1,0,is_schema_db(olddb->str)) || check_access(thd,CREATE_ACL,newdb->str,0,1,0,is_schema_db(newdb->str))) @@ -3850,11 +3893,10 @@ end_with_restore_list: } case SQLCOM_ALTER_DB: { - char *db= lex->name; - DBUG_ASSERT(db); /* Must be set in the parser */ - if (!strip_sp(db) || check_db_name(db)) + LEX_STRING *db= &lex->name; + if (check_db_name(db)) { - my_error(ER_WRONG_DB_NAME, MYF(0), db); + my_error(ER_WRONG_DB_NAME, MYF(0), db->str); break; } /* @@ -3866,14 +3908,14 @@ end_with_restore_list: */ #ifdef HAVE_REPLICATION if (thd->slave_thread && - (!rpl_filter->db_ok(db) || - !rpl_filter->db_ok_with_wild_table(db))) + (!rpl_filter->db_ok(db->str) || + !rpl_filter->db_ok_with_wild_table(db->str))) { my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0)); break; } #endif - if (check_access(thd, ALTER_ACL, db, 0, 1, 0, is_schema_db(db))) + if (check_access(thd, ALTER_ACL, db->str, 0, 1, 0, is_schema_db(db->str))) break; if (thd->locked_tables || thd->active_transaction()) { @@ -3881,17 +3923,17 @@ end_with_restore_list: ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0)); goto error; } - res= mysql_alter_db(thd, db, &lex->create_info); + res= mysql_alter_db(thd, db->str, &lex->create_info); break; } case SQLCOM_SHOW_CREATE_DB: { - if (!strip_sp(lex->name) || check_db_name(lex->name)) + if (check_db_name(&lex->name)) { - my_error(ER_WRONG_DB_NAME, MYF(0), lex->name); + my_error(ER_WRONG_DB_NAME, MYF(0), lex->name.str); break; } - res=mysqld_show_create_db(thd,lex->name,&lex->create_info); + res= mysqld_show_create_db(thd, lex->name.str, &lex->create_info); break; } case SQLCOM_CREATE_EVENT: @@ -6276,9 +6318,38 @@ add_proc_to_list(THD* thd, Item *item) /* Fix escaping of _, % and \ in database and table names (for ODBC) */ +static void remove_escape(LEX_STRING *str) +{ + if (!str->length) // For empty DB names + return; + char *to, *end; + char *name= str->str; + + for (to= name, end= name + str->length; name != end; ) + { +#ifdef USE_MB + int l; + if (use_mb(system_charset_info) && + (l= my_ismbchar(system_charset_info, name, end))) + { + while (l--) + *to++ = *name++; + continue; + } +#endif + if (*name == '\\' && name + 1 != end) + name++; // Skip '\\' + *to++= *name++; + } + *to= 0; // Add end \0 for compability + str->length= (uint) (to - str->str); +} + +/* To be removed after next iteration of LEX_STRING replacements */ + static void remove_escape(char *name) { - if (!*name) // For empty DB names + if (!*name) // For empty DB names return; char *to; #ifdef USE_MB @@ -6291,19 +6362,20 @@ static void remove_escape(char *name) if (use_mb(system_charset_info) && (l = my_ismbchar(system_charset_info, name, strend))) { - while (l--) - *to++ = *name++; - name--; - continue; + while (l--) + *to++ = *name++; + name--; + continue; } #endif if (*name == '\\' && name[1]) - name++; // Skip '\\' + name++; // Skip '\\' /* purecov: inspected */ *to++= *name; } *to=0; } + /**************************************************************************** ** save order by and tables in own lists ****************************************************************************/ @@ -6371,7 +6443,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, } if (table->is_derived_table() == FALSE && table->db.str && - check_db_name(table->db.str)) + check_db_name(&table->db)) { my_error(ER_WRONG_DB_NAME, MYF(0), table->db.str); DBUG_RETURN(0); @@ -7659,7 +7731,7 @@ bool create_table_precheck(THD *thd, TABLE_LIST *tables, #ifdef NOT_NECESSARY_TO_CHECK_CREATE_TABLE_EXIST_WHEN_PREPARING_STATEMENT /* This code throws an ill error for CREATE TABLE t1 SELECT * FROM t1 */ /* - Only do the check for PS, becasue we on execute we have to check that + Only do the check for PS, because we on execute we have to check that against the opened tables to ensure we don't use a table that is part of the view (which can only be done after the table has been opened). */ diff --git a/sql/sql_table.cc b/sql/sql_table.cc index f0f69676ed2..3b221c67854 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -4579,7 +4579,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, my_error(ER_WRONG_TABLE_NAME, MYF(0), src_table); DBUG_RETURN(TRUE); } - if (!src_db || check_db_name(src_db)) + if (!src_db || check_db_name(&table_ident->db)) { my_error(ER_WRONG_DB_NAME, MYF(0), src_db ? src_db : "NULL"); DBUG_RETURN(-1); diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 8b60eefe4ea..3655ee99087 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -724,7 +724,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); LEX_HOSTNAME ULONGLONG_NUM field_ident select_alias ident ident_or_text UNDERSCORE_CHARSET IDENT_sys TEXT_STRING_sys TEXT_STRING_literal NCHAR_STRING opt_component key_cache_name - sp_opt_label BIN_NUM label_ident TEXT_STRING_filesystem + sp_opt_label BIN_NUM label_ident TEXT_STRING_filesystem ident_or_empty %type opt_table_alias @@ -734,7 +734,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %type remember_name remember_end opt_ident opt_db text_or_password - opt_constraint constraint ident_or_empty + opt_constraint constraint %type text_string opt_gconcat_separator @@ -1213,7 +1213,8 @@ create: lex->create_info.options=$2 | $4; lex->create_info.db_type= lex->thd->variables.table_type; lex->create_info.default_table_charset= NULL; - lex->name= 0; + lex->name.str= 0; + lex->name.length= 0; lex->like_name= 0; } create2 @@ -1253,7 +1254,7 @@ create: { LEX *lex=Lex; lex->sql_command=SQLCOM_CREATE_DB; - lex->name=$4.str; + lex->name= $4; lex->create_info.options=$3; } | CREATE EVENT_SYM opt_if_not_exists sp_name @@ -1578,7 +1579,7 @@ clear_privileges: sp_name: ident '.' ident { - if (!$1.str || check_db_name($1.str)) + if (!$1.str || check_db_name(&$1)) { my_error(ER_WRONG_DB_NAME, MYF(0), $1.str); YYABORT; @@ -3163,7 +3164,7 @@ size_number: uint text_shift_number= 0; longlong prefix_number; char *start_ptr= $1.str; - uint str_len= strlen(start_ptr); + uint str_len= $1.length; char *end_ptr= start_ptr + str_len; int error; prefix_number= my_strtoll10(start_ptr, &end_ptr, &error); @@ -4674,7 +4675,8 @@ alter: { THD *thd= YYTHD; LEX *lex= thd->lex; - lex->name= 0; + lex->name.str= 0; + lex->name.length= 0; lex->sql_command= SQLCOM_ALTER_TABLE; lex->duplicates= DUP_ERROR; if (!lex->select_lex.add_table_to_list(thd, $4, NULL, @@ -4684,7 +4686,6 @@ alter: lex->key_list.empty(); lex->col_list.empty(); lex->select_lex.init_order(); - lex->name= 0; lex->like_name= 0; lex->select_lex.db= ((TABLE_LIST*) lex->select_lex.table_list.first)->db; @@ -4709,7 +4710,8 @@ alter: THD *thd= Lex->thd; lex->sql_command=SQLCOM_ALTER_DB; lex->name= $3; - if (lex->name == NULL && thd->copy_db_to(&lex->name, NULL)) + if (lex->name.str == NULL && + thd->copy_db_to(&lex->name.str, &lex->name.length)) YYABORT; } | ALTER PROCEDURE sp_name @@ -4867,8 +4869,8 @@ opt_ev_sql_stmt: /* empty*/ { $$= 0;} ident_or_empty: - /* empty */ { $$= 0; } - | ident { $$= $1.str; }; + /* empty */ { $$.str= 0; $$.length= 0; } + | ident { $$= $1; }; alter_commands: | DISCARD TABLESPACE { Lex->alter_info.tablespace_op= DISCARD_TABLESPACE; } @@ -5146,19 +5148,20 @@ alter_list_item: { LEX *lex=Lex; THD *thd= lex->thd; + uint dummy; lex->select_lex.db=$3->db.str; if (lex->select_lex.db == NULL && - thd->copy_db_to(&lex->select_lex.db, NULL)) + thd->copy_db_to(&lex->select_lex.db, &dummy)) { YYABORT; } if (check_table_name($3->table.str,$3->table.length) || - $3->db.str && check_db_name($3->db.str)) + $3->db.str && check_db_name(&$3->db)) { my_error(ER_WRONG_TABLE_NAME, MYF(0), $3->table.str); YYABORT; } - lex->name= $3->table.str; + lex->name= $3->table; lex->alter_info.flags|= ALTER_RENAME; } | CONVERT_SYM TO_SYM charset charset_name_or_default opt_collate @@ -7705,7 +7708,7 @@ drop: LEX *lex=Lex; lex->sql_command= SQLCOM_DROP_DB; lex->drop_if_exists=$3; - lex->name=$4.str; + lex->name= $4; } | DROP FUNCTION_SYM if_exists sp_name { @@ -8378,7 +8381,7 @@ show_param: { Lex->sql_command=SQLCOM_SHOW_CREATE_DB; Lex->create_info.options=$3; - Lex->name=$4.str; + Lex->name= $4; } | CREATE TABLE_SYM table_ident { @@ -10368,7 +10371,8 @@ grant_ident: { LEX *lex= Lex; THD *thd= lex->thd; - if (thd->copy_db_to(&lex->current_select->db, NULL)) + uint dummy; + if (thd->copy_db_to(&lex->current_select->db, &dummy)) YYABORT; if (lex->grant == GLOBAL_ACLS) lex->grant = DB_ACLS & ~GRANT_ACL; diff --git a/sql/table.cc b/sql/table.cc index dec4901807d..71a6d09730f 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -2234,7 +2234,7 @@ char *get_field(MEM_ROOT *mem, Field *field) SYNPOSIS check_db_name() - name Name of database + org_name Name of database and length NOTES If lower_case_table_names is set then database is converted to lower case @@ -2244,35 +2244,35 @@ char *get_field(MEM_ROOT *mem, Field *field) 1 error */ -bool check_db_name(char *name) +bool check_db_name(LEX_STRING *org_name) { - char *start=name; - /* Used to catch empty names and names with end space */ - bool last_char_is_space= TRUE; + char *name= org_name->str; + + if (!org_name->length || org_name->length > NAME_LEN) + return 1; if (lower_case_table_names && name != any_db) my_casedn_str(files_charset_info, name); - while (*name) - { #if defined(USE_MB) && defined(USE_MB_IDENT) - last_char_is_space= my_isspace(system_charset_info, *name); - if (use_mb(system_charset_info)) + if (use_mb(system_charset_info)) + { + bool last_char_is_space= TRUE; + char *end= name + org_name->length; + while (name < end) { - int len=my_ismbchar(system_charset_info, name, - name+system_charset_info->mbmaxlen); - if (len) - { - name += len; - continue; - } + int len; + last_char_is_space= my_isspace(system_charset_info, *name); + len= my_ismbchar(system_charset_info, name, end); + if (!len) + len= 1; + name+= len; } -#else - last_char_is_space= *name==' '; -#endif - name++; + return last_char_is_space; } - return last_char_is_space || (uint) (name - start) > NAME_LEN; + else +#endif + return org_name->str[org_name->length - 1] != ' '; /* purecov: inspected */ } diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index 02170f6aacb..669a6b74ba6 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -33,6 +33,7 @@ #include #include #include +#include #define VER "2.1" #define MAX_TEST_QUERY_LENGTH 300 /* MAX QUERY BUFFER LENGTH */ @@ -11990,13 +11991,21 @@ static void test_bug6081() rc= simple_command(mysql, COM_DROP_DB, current_db, (ulong)strlen(current_db), 0); - myquery(rc); + if (rc == 0 && mysql_errno(mysql) != ER_UNKNOWN_COM_ERROR) + { + myerror(NULL); /* purecov: inspected */ + die(__FILE__, __LINE__, "COM_DROP_DB failed"); /* purecov: inspected */ + } rc= simple_command(mysql, COM_DROP_DB, current_db, (ulong)strlen(current_db), 0); myquery_r(rc); rc= simple_command(mysql, COM_CREATE_DB, current_db, (ulong)strlen(current_db), 0); - myquery(rc); + if (rc == 0 && mysql_errno(mysql) != ER_UNKNOWN_COM_ERROR) + { + myerror(NULL); /* purecov: inspected */ + die(__FILE__, __LINE__, "COM_CREATE_DB failed"); /* purecov: inspected */ + } rc= simple_command(mysql, COM_CREATE_DB, current_db, (ulong)strlen(current_db), 0); myquery_r(rc); @@ -15297,7 +15306,7 @@ static void test_bug15752() MYSQL mysql_local; int rc, i; const int ITERATION_COUNT= 100; - char *query= "CALL p1()"; + const char *query= "CALL p1()"; myheader("test_bug15752"); @@ -15392,6 +15401,24 @@ static void test_bug21206() DBUG_VOID_RETURN; } +/* + Ensure we execute the status code while testing +*/ + +static void test_status() +{ + const char *status; + DBUG_ENTER("test_status"); + myheader("test_status"); + + if (!(status= mysql_stat(mysql))) + { + myerror("mysql_stat failed"); /* purecov: inspected */ + die(__FILE__, __LINE__, "mysql_stat failed"); /* purecov: inspected */ + } + DBUG_VOID_RETURN; +} + /* Read and parse arguments and MySQL options from my.cnf @@ -15669,6 +15696,7 @@ static struct my_tests_st my_tests[]= { { "test_mysql_insert_id", test_mysql_insert_id }, { "test_bug19671", test_bug19671}, { "test_bug21206", test_bug21206}, + { "test_status", test_status}, { 0, 0 } }; From d649efbb07af288b6b58b4bd01a04d34c7a7f467 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 19 Oct 2006 23:05:53 -0700 Subject: [PATCH 03/57] Fixed bug #23478. If elements a not top-level IN subquery were accessed by an index and the subquery result set included a NULL value then the quantified predicate that contained the subquery was evaluated to NULL when it should return a non-null value. mysql-test/r/subselect.result: Added a test case for bug #23478. mysql-test/t/subselect.test: Added a test case for bug #23478. --- mysql-test/r/subselect.result | 15 +++++++++++++++ mysql-test/t/subselect.test | 17 +++++++++++++++++ sql/item_subselect.cc | 3 +++ 3 files changed, 35 insertions(+) diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index ad847b5f156..28fbfc86657 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -2982,3 +2982,18 @@ field1 field2 1 1 1 3 DROP TABLE t1, t2; +CREATE TABLE t1(a int, INDEX (a)); +INSERT INTO t1 VALUES (1), (3), (5), (7); +INSERT INTO t1 VALUES (NULL); +CREATE TABLE t2(a int); +INSERT INTO t2 VALUES (1),(2),(3); +EXPLAIN SELECT a, a IN (SELECT a FROM t1) FROM t2; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 3 +2 DEPENDENT SUBQUERY t1 index_subquery a a 5 func 2 Using index +SELECT a, a IN (SELECT a FROM t1) FROM t2; +a a IN (SELECT a FROM t1) +1 1 +2 NULL +3 1 +DROP TABLE t1,t2; diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test index 6defa8b16a5..ac035c72d18 100644 --- a/mysql-test/t/subselect.test +++ b/mysql-test/t/subselect.test @@ -1948,4 +1948,21 @@ SELECT field1, field2 DROP TABLE t1, t2; +# +# Bug #23478: not top-level IN subquery returning a non-empty result set +# with possible NULL values by index access from the outer query +# + +CREATE TABLE t1(a int, INDEX (a)); +INSERT INTO t1 VALUES (1), (3), (5), (7); +INSERT INTO t1 VALUES (NULL); + +CREATE TABLE t2(a int); +INSERT INTO t2 VALUES (1),(2),(3); + +EXPLAIN SELECT a, a IN (SELECT a FROM t1) FROM t2; +SELECT a, a IN (SELECT a FROM t1) FROM t2; + +DROP TABLE t1,t2; + # End of 4.1 tests diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index f3be0663af8..1ab81d1862d 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -610,6 +610,7 @@ double Item_in_subselect::val() */ DBUG_ASSERT(0); DBUG_ASSERT(fixed == 1); + null_value= 0; if (exec()) { reset(); @@ -625,6 +626,7 @@ double Item_in_subselect::val() longlong Item_in_subselect::val_int() { DBUG_ASSERT(fixed == 1); + null_value= 0; if (exec()) { reset(); @@ -645,6 +647,7 @@ String *Item_in_subselect::val_str(String *str) */ DBUG_ASSERT(0); DBUG_ASSERT(fixed == 1); + null_value= 0; if (exec()) { reset(); From c3f2224f2e735e480058e1a5975f08e19d3c52a0 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 20 Oct 2006 12:41:27 -0700 Subject: [PATCH 04/57] Adjustments after the merge for bug 23478. --- sql/item_subselect.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 78dae0aaa1e..7015f450aa7 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -729,6 +729,7 @@ String *Item_in_subselect::val_str(String *str) bool Item_in_subselect::val_bool() { DBUG_ASSERT(fixed == 1); + null_value= 0; if (exec()) { reset(); @@ -747,6 +748,7 @@ my_decimal *Item_in_subselect::val_decimal(my_decimal *decimal_value) method should not be used */ DBUG_ASSERT(0); + null_value= 0; DBUG_ASSERT(fixed == 1); if (exec()) { From 4ddb48c619c5fb703a846f5dd2659a4854a8c0d7 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 23 Oct 2006 15:02:51 +0500 Subject: [PATCH 05/57] WL#3475 (Threads for the embedded server in mysqltest) Necessary code added to mysqltest.c. Disabled tests are available now. client/mysqltest.c: do_send_query function implemented, so now 'send' command will be run in separate thread for the embedded server. Mutex and condition added to the 'connection' struct for syncronisation purposes. Yes it'd be easier if we had pthread_join() command libmysql/libmysql.c: this isn't actually needed and causes problems in embedded server mysql-test/t/bdb-deadlock.test: test is available for the embedded server now mysql-test/t/flush.test: test is available for the embedded server now mysql-test/t/flush_block_commit.test: test is available for the embedded server now mysql-test/t/innodb-deadlock.test: test is available for the embedded server now mysql-test/t/innodb-lock.test: test is available for the embedded server now mysql-test/t/lock_multi.test: test is available for the embedded server now mysql-test/t/rename.test: test is available for the embedded server now mysql-test/t/show_check.test: test is available for the embedded server now mysql-test/t/status.test: test is available for the embedded server now --- client/mysqltest.c | 102 +++++++++++++++++++++++---- libmysql/libmysql.c | 7 -- mysql-test/t/bdb-deadlock.test | 8 --- mysql-test/t/flush.test | 8 --- mysql-test/t/flush_block_commit.test | 3 - mysql-test/t/innodb-deadlock.test | 2 - mysql-test/t/innodb-lock.test | 2 - mysql-test/t/lock_multi.test | 8 --- mysql-test/t/rename.test | 4 -- mysql-test/t/show_check.test | 3 +- mysql-test/t/status.test | 7 -- 11 files changed, 90 insertions(+), 64 deletions(-) diff --git a/client/mysqltest.c b/client/mysqltest.c index ad0f9f857bb..6889ae1a84c 100644 --- a/client/mysqltest.c +++ b/client/mysqltest.c @@ -23,6 +23,7 @@ * Matt Wagner * Monty * Jani + * Holyfoot **/ /********************************************************************** @@ -215,6 +216,12 @@ struct connection { MYSQL mysql; char *name; + + const char *cur_query; + int cur_query_len; + pthread_mutex_t mutex; + pthread_cond_t cond; + int query_done; }; typedef struct @@ -461,6 +468,57 @@ static void replace_dynstr_append_mem(DYNAMIC_STRING *ds, const char *val, int len); static int handle_no_error(struct st_query *q); +#ifdef EMBEDDED_LIBRARY +/* + send_one_query executes query in separate thread what is + necessary in embedded library to run 'send' in proper way. + This implementation doesn't handle errors returned + by mysql_send_query. It's technically possible, though + i don't see where it is needed. +*/ +pthread_handler_decl(send_one_query, arg) +{ + struct connection *cn= (struct connection*)arg; + + mysql_thread_init(); + VOID(mysql_send_query(&cn->mysql, cn->cur_query, cn->cur_query_len)); + + mysql_thread_end(); + pthread_mutex_lock(&cn->mutex); + cn->query_done= 1; + VOID(pthread_cond_signal(&cn->cond)); + pthread_mutex_unlock(&cn->mutex); + pthread_exit(0); + return 0; +} + +static int do_send_query(struct connection *cn, const char *q, int q_len, + int flags) +{ + pthread_t tid; + + if (flags & QUERY_REAP) + return mysql_send_query(&cn->mysql, q, q_len); + + if (pthread_mutex_init(&cn->mutex, NULL) || + pthread_cond_init(&cn->cond, NULL)) + die("Error in the thread library"); + + cn->cur_query= q; + cn->cur_query_len= q_len; + cn->query_done= 0; + if (pthread_create(&tid, NULL, send_one_query, (void*)cn)) + die("Cannot start new thread for query"); + + return 0; +} + +#else /*EMBEDDED_LIBRARY*/ + +#define do_send_query(cn,q,q_len,flags) mysql_send_query(&cn->mysql, q, q_len) + +#endif /*EMBEDDED_LIBRARY*/ + static void do_eval(DYNAMIC_STRING* query_eval, const char *query) { const char *p; @@ -1849,7 +1907,7 @@ int close_connection(struct st_query *q) #ifndef EMBEDDED_LIBRARY if (q->type == Q_DIRTY_CLOSE) { - if (con->mysql.net.vio) + while (con->mysql.net.vio) { vio_delete(con->mysql.net.vio); con->mysql.net.vio = 0; @@ -2767,15 +2825,17 @@ static void append_result(DYNAMIC_STRING *ds, MYSQL_RES *res) * the result will be read - for regular query, both bits must be on */ -static int run_query_normal(MYSQL *mysql, struct st_query *q, int flags); -static int run_query_stmt (MYSQL *mysql, struct st_query *q, int flags); +static int run_query_normal(struct connection *cn, struct st_query *q, + int flags); +static int run_query_stmt (struct connection *cn, struct st_query *q, + int flags); static void run_query_stmt_handle_warnings(MYSQL *mysql, DYNAMIC_STRING *ds); static int run_query_stmt_handle_error(char *query, struct st_query *q, MYSQL_STMT *stmt, DYNAMIC_STRING *ds); static void run_query_display_metadata(MYSQL_FIELD *field, uint num_fields, DYNAMIC_STRING *ds); -static int run_query(MYSQL *mysql, struct st_query *q, int flags) +static int run_query(struct connection *cn, struct st_query *q, int flags) { /* @@ -2791,13 +2851,15 @@ static int run_query(MYSQL *mysql, struct st_query *q, int flags) if (ps_protocol_enabled && disable_info && (flags & QUERY_SEND) && (flags & QUERY_REAP) && ps_match_re(q->query)) - return run_query_stmt(mysql, q, flags); - return run_query_normal(mysql, q, flags); + return run_query_stmt(cn, q, flags); + return run_query_normal(cn, q, flags); } -static int run_query_normal(MYSQL* mysql, struct st_query* q, int flags) +static int run_query_normal(struct connection *cn, struct st_query* q, + int flags) { + MYSQL *mysql= &cn->mysql; MYSQL_RES* res= 0; uint i; int error= 0, err= 0, counter= 0; @@ -2833,11 +2895,24 @@ static int run_query_normal(MYSQL* mysql, struct st_query* q, int flags) if (flags & QUERY_SEND) { - got_error_on_send= mysql_send_query(mysql, query, query_len); + got_error_on_send= do_send_query(cn, query, query_len, flags); if (got_error_on_send && q->expected_errno[0].type == ERR_EMPTY) die("unable to send query '%s' (mysql_errno=%d , errno=%d)", query, mysql_errno(mysql), errno); } +#ifdef EMBEDDED_LIBRARY + /* + Here we handle 'reap' command, so we need to check if the + query's thread was finished and probably wait + */ + else if (flags & QUERY_REAP) + { + pthread_mutex_lock(&cn->mutex); + if (!cn->query_done) + pthread_cond_wait(&cn->cond, &cn->mutex); + pthread_mutex_unlock(&cn->mutex); + } +#endif /*EMBEDDED_LIBRARY*/ do { @@ -3038,8 +3113,9 @@ end: complete SEND+REAP */ -static int run_query_stmt(MYSQL *mysql, struct st_query *q, int flags) +static int run_query_stmt(struct connection *cn, struct st_query *q, int flags) { + MYSQL *mysql= &cn->mysql; int error= 0; /* Function return code if "goto end;" */ int err; /* Temporary storage of return code from calls */ int query_len, got_error_on_execute; @@ -3095,7 +3171,7 @@ static int run_query_stmt(MYSQL *mysql, struct st_query *q, int flags) C API. */ if ((err= mysql_stmt_prepare(stmt, query, query_len)) == CR_NO_PREPARE_STMT) - return run_query_normal(mysql, q, flags); + return run_query_normal(cn, q, flags); if (err != 0) { @@ -3922,7 +3998,7 @@ int main(int argc, char **argv) q->require_file=require_file; save_file[0]=0; } - error|= run_query(&cur_con->mysql, q, QUERY_REAP|QUERY_SEND); + error|= run_query(cur_con, q, QUERY_REAP|QUERY_SEND); display_result_vertically= old_display_result_vertically; q->last_argument= q->end; query_executed= 1; @@ -3949,7 +4025,7 @@ int main(int argc, char **argv) q->require_file=require_file; save_file[0]=0; } - error |= run_query(&cur_con->mysql, q, flags); + error |= run_query(cur_con, q, flags); query_executed= 1; q->last_argument= q->end; break; @@ -3970,7 +4046,7 @@ int main(int argc, char **argv) query and read the result some time later when reap instruction is given on this connection. */ - error |= run_query(&cur_con->mysql, q, QUERY_SEND); + error |= run_query(cur_con, q, QUERY_SEND); query_executed= 1; q->last_argument= q->end; break; diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c index 91c0b6b8864..5577ecdb556 100644 --- a/libmysql/libmysql.c +++ b/libmysql/libmysql.c @@ -4395,13 +4395,6 @@ int STDCALL mysql_stmt_store_result(MYSQL_STMT *stmt) set_stmt_error(stmt, CR_COMMANDS_OUT_OF_SYNC, unknown_sqlstate); DBUG_RETURN(1); } - if (result->data) - { - free_root(&result->alloc, MYF(MY_KEEP_PREALLOC)); - result->data= NULL; - result->rows= 0; - stmt->data_cursor= NULL; - } if (stmt->update_max_length && !stmt->bind_result_done) { diff --git a/mysql-test/t/bdb-deadlock.test b/mysql-test/t/bdb-deadlock.test index 88243cfc860..b48648e0fd0 100644 --- a/mysql-test/t/bdb-deadlock.test +++ b/mysql-test/t/bdb-deadlock.test @@ -1,11 +1,3 @@ -# This test doesn't work with the embedded version as this code -# assumes that one query is running while we are doing queries on -# a second connection. -# This would work if mysqltest run would be threaded and handle each -# connection in a separate thread. -# - --- source include/not_embedded.inc -- source include/have_bdb.inc connect (con1,localhost,root,,); diff --git a/mysql-test/t/flush.test b/mysql-test/t/flush.test index aedf8e85b65..8fe62ecac01 100644 --- a/mysql-test/t/flush.test +++ b/mysql-test/t/flush.test @@ -1,11 +1,3 @@ -# This test doesn't work with the embedded version as this code -# assumes that one query is running while we are doing queries on -# a second connection. -# This would work if mysqltest run would be threaded and handle each -# connection in a separate thread. -# --- source include/not_embedded.inc - connect (con1,localhost,root,,); connect (con2,localhost,root,,); connection con1; diff --git a/mysql-test/t/flush_block_commit.test b/mysql-test/t/flush_block_commit.test index 1e7ecd2548c..0c1d2b82df6 100644 --- a/mysql-test/t/flush_block_commit.test +++ b/mysql-test/t/flush_block_commit.test @@ -3,9 +3,6 @@ # We verify that we did not introduce a deadlock. # This is intended to mimick how mysqldump and innobackup work. -# This test doesn't work with the embedded server --- source include/not_embedded.inc - # And it requires InnoDB -- source include/have_innodb.inc diff --git a/mysql-test/t/innodb-deadlock.test b/mysql-test/t/innodb-deadlock.test index 41741942963..81acfba5c93 100644 --- a/mysql-test/t/innodb-deadlock.test +++ b/mysql-test/t/innodb-deadlock.test @@ -1,6 +1,4 @@ -- source include/have_innodb.inc -# Can't test this with embedded server --- source include/not_embedded.inc connect (con1,localhost,root,,); connect (con2,localhost,root,,); diff --git a/mysql-test/t/innodb-lock.test b/mysql-test/t/innodb-lock.test index 55a712fef9b..eacf7e562be 100644 --- a/mysql-test/t/innodb-lock.test +++ b/mysql-test/t/innodb-lock.test @@ -1,6 +1,4 @@ -- source include/have_innodb.inc -# Can't test this with embedded server --- source include/not_embedded.inc # # Check and select innodb lock type diff --git a/mysql-test/t/lock_multi.test b/mysql-test/t/lock_multi.test index 2e40aeaccb7..32e7f4234c4 100644 --- a/mysql-test/t/lock_multi.test +++ b/mysql-test/t/lock_multi.test @@ -1,11 +1,3 @@ -# This test doesn't work with the embedded version as this code -# assumes that one query is running while we are doing queries on -# a second connection. -# This would work if mysqltest run would be threaded and handle each -# connection in a separate thread. -# --- source include/not_embedded.inc - --disable_warnings drop table if exists t1,t2; --enable_warnings diff --git a/mysql-test/t/rename.test b/mysql-test/t/rename.test index 5caecef176e..ad9921d2cf0 100644 --- a/mysql-test/t/rename.test +++ b/mysql-test/t/rename.test @@ -2,10 +2,6 @@ # Test of rename table # -# Test requires concurrent connections, which can't be tested on embedded -# server --- source include/not_embedded.inc - --disable_warnings drop table if exists t0,t1,t2,t3,t4; # Clear up from other tests (to ensure that SHOW TABLES below is right) diff --git a/mysql-test/t/show_check.test b/mysql-test/t/show_check.test index d70903adbc4..8be676d9a35 100644 --- a/mysql-test/t/show_check.test +++ b/mysql-test/t/show_check.test @@ -1,5 +1,4 @@ -# Requires use of multiple simultaneous connections, not supported with -# embedded server testing +# Uses GRANT commands that usually disabled in embedded server -- source include/not_embedded.inc # diff --git a/mysql-test/t/status.test b/mysql-test/t/status.test index 7fea51c9327..df8da26df57 100644 --- a/mysql-test/t/status.test +++ b/mysql-test/t/status.test @@ -1,10 +1,3 @@ -# This test doesn't work with the embedded version as this code -# assumes that one query is running while we are doing queries on -# a second connection. -# This would work if mysqltest run would be threaded and handle each -# connection in a separate thread. -# ---source include/not_embedded.inc # PS causes different statistics --disable_ps_protocol From 7e2336925fa6dd1bab052f9b3bc5594f2d22f25b Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 24 Oct 2006 12:35:32 +0500 Subject: [PATCH 06/57] merging fix client/mysqltest.c: wrong 'while' was added instead of 'if' --- client/mysqltest.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/mysqltest.c b/client/mysqltest.c index 6889ae1a84c..3294612f7cc 100644 --- a/client/mysqltest.c +++ b/client/mysqltest.c @@ -1907,7 +1907,7 @@ int close_connection(struct st_query *q) #ifndef EMBEDDED_LIBRARY if (q->type == Q_DIRTY_CLOSE) { - while (con->mysql.net.vio) + if (con->mysql.net.vio) { vio_delete(con->mysql.net.vio); con->mysql.net.vio = 0; @@ -2908,7 +2908,7 @@ static int run_query_normal(struct connection *cn, struct st_query* q, else if (flags & QUERY_REAP) { pthread_mutex_lock(&cn->mutex); - if (!cn->query_done) + while (!cn->query_done) pthread_cond_wait(&cn->cond, &cn->mutex); pthread_mutex_unlock(&cn->mutex); } From 430a4ad32ca7a1b51e6b5c3ac30371bd9aa9f8e5 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 24 Oct 2006 15:28:16 +0500 Subject: [PATCH 07/57] WL#3475 merging client/mysqltest.c: merging fixes --- client/mysqltest.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/client/mysqltest.c b/client/mysqltest.c index f04df7db863..261a2ae2870 100644 --- a/client/mysqltest.c +++ b/client/mysqltest.c @@ -496,7 +496,7 @@ static void handle_no_error(struct st_query *q); by mysql_send_query. It's technically possible, though i don't see where it is needed. */ -pthread_handler_decl(send_one_query, arg) +pthread_handler_t send_one_query(void *arg) { struct connection *cn= (struct connection*)arg; @@ -2097,7 +2097,7 @@ int close_connection(struct st_query *q) #ifndef EMBEDDED_LIBRARY if (q->type == Q_DIRTY_CLOSE) { - while (con->mysql.net.vio) + if (con->mysql.net.vio) { vio_delete(con->mysql.net.vio); con->mysql.net.vio = 0; @@ -3684,7 +3684,7 @@ static void run_query_normal(struct connection *cn, struct st_query *command, else if (flags & QUERY_REAP) { pthread_mutex_lock(&cn->mutex); - if (!cn->query_done) + while (!cn->query_done) pthread_cond_wait(&cn->cond, &cn->mutex); pthread_mutex_unlock(&cn->mutex); } @@ -3939,11 +3939,10 @@ static void handle_no_error(struct st_query *q) error - function will not return */ -static void run_query_stmt(struct connection *cn, struct st_query *command, +static void run_query_stmt(MYSQL *mysql, struct st_query *command, char *query, int query_len, DYNAMIC_STRING *ds, DYNAMIC_STRING *ds_warnings) { - MYSQL *mysql= &cn->mysql; MYSQL_RES *res= NULL; /* Note that here 'res' is meta data result set */ MYSQL_STMT *stmt; DYNAMIC_STRING ds_prepare_warnings; @@ -4175,8 +4174,10 @@ static int util_query(MYSQL* org_mysql, const char* query){ */ -static void run_query(MYSQL *mysql, struct st_query *command, int flags) +static void run_query(struct connection *cn, struct st_query *command, + int flags) { + MYSQL *mysql= &cn->mysql; DYNAMIC_STRING *ds; DYNAMIC_STRING ds_result; DYNAMIC_STRING ds_warnings; @@ -4329,7 +4330,7 @@ static void run_query(MYSQL *mysql, struct st_query *command, int flags) match_re(&ps_re, query)) run_query_stmt(mysql, command, query, query_len, ds, &ds_warnings); else - run_query_normal(mysql, command, flags, query, query_len, + run_query_normal(cn, command, flags, query, query_len, ds, &ds_warnings); if (sp_created) From 6c4aa883ce15b6b5193d34caf72025548da56175 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 24 Oct 2006 17:19:02 +0500 Subject: [PATCH 08/57] Bug #23427 (incompatible ABI change) the incompatibility was caused by current_stmt member added to the MYSQL structure. It's possible to move it to THD structure instead which saves ABI include/mysql.h: member moved to the THD structure libmysqld/lib_sql.cc: now we use THD member here sql/sql_class.h: current_stmt member added for the embedded server --- include/mysql.h | 6 ------ libmysqld/lib_sql.cc | 10 +++++----- sql/sql_class.h | 6 ++++++ 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/include/mysql.h b/include/mysql.h index 143f6752c46..89e861864df 100644 --- a/include/mysql.h +++ b/include/mysql.h @@ -270,12 +270,6 @@ typedef struct st_mysql from mysql_stmt_close if close had to cancel result set of this object. */ my_bool *unbuffered_fetch_owner; - /* - In embedded server it points to the statement that is processed - in the current query. We store some results directly in statement - fields then. - */ - struct st_mysql_stmt *current_stmt; } MYSQL; typedef struct st_mysql_res { diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index 1a3e10f08a8..64bc37fb40d 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -94,7 +94,7 @@ emb_advanced_command(MYSQL *mysql, enum enum_server_command command, mysql->affected_rows= ~(my_ulonglong) 0; mysql->field_count= 0; net->last_errno= 0; - mysql->current_stmt= stmt; + thd->current_stmt= stmt; thd->store_globals(); // Fix if more than one connect /* @@ -644,8 +644,8 @@ bool Protocol::send_fields(List *list, uint flag) DBUG_RETURN(0); field_count= list->elements; - field_alloc= mysql->current_stmt ? &mysql->current_stmt->mem_root : - &mysql->field_alloc; + field_alloc= thd->current_stmt ? &thd->current_stmt->mem_root : + &mysql->field_alloc; if (!(client_field= mysql->fields= (MYSQL_FIELD *)alloc_root(field_alloc, sizeof(MYSQL_FIELD) * field_count))) @@ -751,8 +751,8 @@ bool Protocol_prep::write() { MYSQL *mysql= thd->mysql; - if (mysql->current_stmt) - data= &mysql->current_stmt->result; + if (thd->current_stmt) + data= &thd->current_stmt->result; else { if (!(data= (MYSQL_DATA*) my_malloc(sizeof(MYSQL_DATA), diff --git a/sql/sql_class.h b/sql/sql_class.h index cc90de2a6ea..ed161de55de 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -686,6 +686,12 @@ public: char *extra_data; ulong extra_length; String query_rest; + /* + In embedded server it points to the statement that is processed + in the current query. We store some results directly in statement + fields then. + */ + struct st_mysql_stmt *current_stmt; #endif NET net; // client connection descriptor MEM_ROOT warn_root; // For warnings and errors From f51d0812a78ebd53be143fec274c532b94a65c12 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 24 Oct 2006 15:26:41 +0300 Subject: [PATCH 09/57] Bug #21809: Error 1356 while selecting from view with grouping though underlying select OK. The SQL parser was using Item::name to transfer user defined function attributes to the user defined function (udf). It was not distinguishing between user defined function call arguments and stored procedure call arguments. Setting Item::name was causing Item_ref::print() method to print the argument as quoted identifiers and caused views that reference aggregate functions as udf call arguments (and rely on Item::print() for the text of the view to store) to throw an undefined identifier error. Overloaded Item_ref::print to print aggregate functions as such when printing the references to aggregate functions taken out of context by split_sum_func2() Fixed the parser to properly detect using AS clause in stored procedure arguments as an error. Fixed printing the arguments of udf call to print properly the udf attribute. mysql-test/r/udf.result: Bug #21809: Error 1356 while selecting from view with grouping though underlying select OK. - test cases mysql-test/t/udf.test: Bug #21809: Error 1356 while selecting from view with grouping though underlying select OK. - test cases sql/item.cc: Bug #21809: Error 1356 while selecting from view with grouping though underlying select OK. - Don't print the refs to SUM functions as refs. sql/item_func.cc: Bug #21809: Error 1356 while selecting from view with grouping though underlying select OK. - print the aliases in the udf calls sql/item_func.h: Bug #21809: Error 1356 while selecting from view with grouping though underlying select OK. - print the aliases in the udf calls sql/sql_lex.cc: Bug #21809: Error 1356 while selecting from view with grouping though underlying select OK. - disable aliases for arguments in stored routine calls sql/sql_lex.h: Bug #21809: Error 1356 while selecting from view with grouping though underlying select OK. - disable aliases for arguments in stored routine calls sql/sql_yacc.yy: Bug #21809: Error 1356 while selecting from view with grouping though underlying select OK. - disable aliases for arguments in stored routine calls - fix bison duplicate symbol warnings --- mysql-test/r/udf.result | 79 +++++++++++++++++++++++++++++++++++++++++ mysql-test/t/udf.test | 44 +++++++++++++++++++++++ sql/item.cc | 26 ++++++++++++-- sql/item_func.cc | 14 ++++++++ sql/item_func.h | 1 + sql/sql_lex.cc | 2 ++ sql/sql_lex.h | 2 ++ sql/sql_yacc.yy | 41 +++++++++++++++------ 8 files changed, 196 insertions(+), 13 deletions(-) diff --git a/mysql-test/r/udf.result b/mysql-test/r/udf.result index 8e37cca6aa9..396f1efa1b7 100644 --- a/mysql-test/r/udf.result +++ b/mysql-test/r/udf.result @@ -105,6 +105,85 @@ explain select myfunc_int(f1) from t1 order by 1; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort drop table t1; +CREATE TABLE t1(a INT, b INT); +INSERT INTO t1 values (1,1),(2,2); +CREATE FUNCTION fn(a int) RETURNS int DETERMINISTIC +BEGIN +RETURN a; +END +|| +CREATE VIEW v1 AS SELECT a, fn(MIN(b)) as c FROM t1 GROUP BY a; +SELECT myfunc_int(a AS attr_name) FROM t1; +myfunc_int(a AS attr_name) +1 +2 +EXPLAIN EXTENDED SELECT myfunc_int(a AS attr_name) FROM t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 +Warnings: +Note 1003 select myfunc_int(`test`.`t1`.`a` AS `attr_name`) AS `myfunc_int(a AS attr_name)` from `test`.`t1` +EXPLAIN EXTENDED SELECT myfunc_int(a) FROM t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 +Warnings: +Note 1003 select myfunc_int(`test`.`t1`.`a` AS `a`) AS `myfunc_int(a)` from `test`.`t1` +SELECT a,c FROM v1; +a c +1 1 +2 2 +SELECT a, fn(MIN(b) xx) as c FROM t1 GROUP BY a; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'xx) as c FROM t1 GROUP BY a' at line 1 +SELECT myfunc_int(fn(MIN(b) xx)) as c FROM t1 GROUP BY a; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'xx)) as c FROM t1 GROUP BY a' at line 1 +SELECT myfunc_int(test.fn(MIN(b) xx)) as c FROM t1 GROUP BY a; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'xx)) as c FROM t1 GROUP BY a' at line 1 +SELECT myfunc_int(fn(MIN(b)) xx) as c FROM t1 GROUP BY a; +c +1 +2 +SELECT myfunc_int(test.fn(MIN(b)) xx) as c FROM t1 GROUP BY a; +c +1 +2 +EXPLAIN EXTENDED SELECT myfunc_int(MIN(b) xx) as c FROM t1 GROUP BY a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort +Warnings: +Note 1003 select myfunc_int(min(`test`.`t1`.`b`) AS `xx`) AS `c` from `test`.`t1` group by `test`.`t1`.`a` +EXPLAIN EXTENDED SELECT test.fn(MIN(b)) as c FROM t1 GROUP BY a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort +Warnings: +Note 1003 select `test`.`fn`(min(`test`.`t1`.`b`)) AS `c` from `test`.`t1` group by `test`.`t1`.`a` +EXPLAIN EXTENDED SELECT myfunc_int(fn(MIN(b))) as c FROM t1 GROUP BY a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort +Warnings: +Note 1003 select myfunc_int(`test`.`fn`(min(`test`.`t1`.`b`)) AS `fn(MIN(b))`) AS `c` from `test`.`t1` group by `test`.`t1`.`a` +EXPLAIN EXTENDED SELECT myfunc_int(test.fn(MIN(b))) as c FROM t1 GROUP BY a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort +Warnings: +Note 1003 select myfunc_int(`test`.`fn`(min(`test`.`t1`.`b`)) AS `test.fn(MIN(b))`) AS `c` from `test`.`t1` group by `test`.`t1`.`a` +SELECT myfunc_int(MIN(b) xx) as c FROM t1 GROUP BY a; +c +1 +2 +SELECT test.fn(MIN(b)) as c FROM t1 GROUP BY a; +c +1 +2 +SELECT myfunc_int(fn(MIN(b))) as c FROM t1 GROUP BY a; +c +1 +2 +SELECT myfunc_int(test.fn(MIN(b))) as c FROM t1 GROUP BY a; +c +1 +2 +DROP VIEW v1; +DROP TABLE t1; +DROP FUNCTION fn; End of 5.0 tests. DROP FUNCTION metaphon; DROP FUNCTION myfunc_double; diff --git a/mysql-test/t/udf.test b/mysql-test/t/udf.test index 96e559f5c05..37358a292be 100644 --- a/mysql-test/t/udf.test +++ b/mysql-test/t/udf.test @@ -127,6 +127,50 @@ create table t1(f1 int); insert into t1 values(1),(2); explain select myfunc_int(f1) from t1 order by 1; drop table t1; + +# +# Bug #21809: Error 1356 while selecting from view with grouping though +# underlying select OK. +# +CREATE TABLE t1(a INT, b INT); INSERT INTO t1 values (1,1),(2,2); + +DELIMITER ||; +CREATE FUNCTION fn(a int) RETURNS int DETERMINISTIC +BEGIN + RETURN a; +END +|| +DELIMITER ;|| + +CREATE VIEW v1 AS SELECT a, fn(MIN(b)) as c FROM t1 GROUP BY a; + +SELECT myfunc_int(a AS attr_name) FROM t1; +EXPLAIN EXTENDED SELECT myfunc_int(a AS attr_name) FROM t1; +EXPLAIN EXTENDED SELECT myfunc_int(a) FROM t1; +SELECT a,c FROM v1; + +--error ER_PARSE_ERROR +SELECT a, fn(MIN(b) xx) as c FROM t1 GROUP BY a; +--error ER_PARSE_ERROR +SELECT myfunc_int(fn(MIN(b) xx)) as c FROM t1 GROUP BY a; +--error ER_PARSE_ERROR +SELECT myfunc_int(test.fn(MIN(b) xx)) as c FROM t1 GROUP BY a; + +SELECT myfunc_int(fn(MIN(b)) xx) as c FROM t1 GROUP BY a; +SELECT myfunc_int(test.fn(MIN(b)) xx) as c FROM t1 GROUP BY a; + +EXPLAIN EXTENDED SELECT myfunc_int(MIN(b) xx) as c FROM t1 GROUP BY a; +EXPLAIN EXTENDED SELECT test.fn(MIN(b)) as c FROM t1 GROUP BY a; +EXPLAIN EXTENDED SELECT myfunc_int(fn(MIN(b))) as c FROM t1 GROUP BY a; +EXPLAIN EXTENDED SELECT myfunc_int(test.fn(MIN(b))) as c FROM t1 GROUP BY a; +SELECT myfunc_int(MIN(b) xx) as c FROM t1 GROUP BY a; +SELECT test.fn(MIN(b)) as c FROM t1 GROUP BY a; +SELECT myfunc_int(fn(MIN(b))) as c FROM t1 GROUP BY a; +SELECT myfunc_int(test.fn(MIN(b))) as c FROM t1 GROUP BY a; +DROP VIEW v1; +DROP TABLE t1; +DROP FUNCTION fn; + --echo End of 5.0 tests. # diff --git a/sql/item.cc b/sql/item.cc index a0eef7a19e9..78becb129d1 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -1147,6 +1147,28 @@ void Item_name_const::print(String *str) } +/* + need a special class to adjust printing : references to aggregate functions + must not be printed as refs because the aggregate functions that are added to + the front of select list are not printed as well. +*/ +class Item_aggregate_ref : public Item_ref +{ +public: + Item_aggregate_ref(Name_resolution_context *context_arg, Item **item, + const char *table_name_arg, const char *field_name_arg) + :Item_ref(context_arg, item, table_name_arg, field_name_arg) {} + + void print (String *str) + { + if (ref) + (*ref)->print(str); + else + Item_ident::print(str); + } +}; + + /* Move SUM items out from item tree and replace with reference @@ -1200,8 +1222,8 @@ void Item::split_sum_func2(THD *thd, Item **ref_pointer_array, Item *new_item, *real_itm= real_item(); ref_pointer_array[el]= real_itm; - if (!(new_item= new Item_ref(&thd->lex->current_select->context, - ref_pointer_array + el, 0, name))) + if (!(new_item= new Item_aggregate_ref(&thd->lex->current_select->context, + ref_pointer_array + el, 0, name))) return; // fatal_error is set fields.push_front(real_itm); thd->change_item_tree(ref, new_item); diff --git a/sql/item_func.cc b/sql/item_func.cc index 2e594c74031..823435dd1e5 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -2869,6 +2869,20 @@ void Item_udf_func::cleanup() } +void Item_udf_func::print(String *str) +{ + str->append(func_name()); + str->append('('); + for (uint i=0 ; i < arg_count ; i++) + { + if (i != 0) + str->append(','); + args[i]->print_item_w_name(str); + } + str->append(')'); +} + + double Item_func_udf_float::val_real() { DBUG_ASSERT(fixed == 1); diff --git a/sql/item_func.h b/sql/item_func.h index 177daf0311f..6bbbf2caabd 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -951,6 +951,7 @@ public: Item_result result_type () const { return udf.result_type(); } table_map not_null_tables() const { return 0; } bool is_expensive() { return 1; } + void print(String *str); }; diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 788276ac654..a65d36cb07c 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -163,6 +163,7 @@ void lex_start(THD *thd, uchar *buf,uint length) lex->select_lex.ftfunc_list= &lex->select_lex.ftfunc_list_alloc; lex->select_lex.group_list.empty(); lex->select_lex.order_list.empty(); + lex->select_lex.udf_list.empty(); lex->current_select= &lex->select_lex; lex->yacc_yyss=lex->yacc_yyvs=0; lex->ignore_space=test(thd->variables.sql_mode & MODE_IGNORE_SPACE); @@ -1166,6 +1167,7 @@ void st_select_lex::init_select() braces= 0; when_list.empty(); expr_list.empty(); + udf_list.empty(); interval_list.empty(); use_index.empty(); ftfunc_list_alloc.empty(); diff --git a/sql/sql_lex.h b/sql/sql_lex.h index fdf14c691e9..d7d480dabc3 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -580,6 +580,8 @@ public: /* exclude this select from check of unique_table() */ bool exclude_from_table_unique_test; + List udf_list; /* udf function calls stack */ + void init_query(); void init_select(); st_select_lex_unit* master_unit(); diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index cb105d05332..e5c11f6d437 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -650,11 +650,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token UNIX_TIMESTAMP %token UNKNOWN_SYM %token UNLOCK_SYM -%token UNLOCK_SYM %token UNSIGNED %token UNTIL_SYM -%token UNTIL_SYM -%token UPDATE_SYM %token UPDATE_SYM %token UPGRADE_SYM %token USAGE @@ -764,7 +761,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %type expr_list udf_expr_list udf_expr_list2 when_list - ident_list ident_list_arg + ident_list ident_list_arg opt_expr_list %type option_type opt_var_type opt_var_ident_type @@ -4724,7 +4721,7 @@ simple_expr: { $$= new Item_func_trim($5,$3); } | TRUNCATE_SYM '(' expr ',' expr ')' { $$= new Item_func_round($3,$5,1); } - | ident '.' ident '(' udf_expr_list ')' + | ident '.' ident '(' opt_expr_list ')' { LEX *lex= Lex; sp_name *name= new sp_name($1, $3); @@ -4741,27 +4738,27 @@ simple_expr: { #ifdef HAVE_DLOPEN udf_func *udf= 0; + LEX *lex= Lex; if (using_udf_functions && (udf= find_udf($1.str, $1.length)) && udf->type == UDFTYPE_AGGREGATE) { - LEX *lex= Lex; if (lex->current_select->inc_in_sum_expr()) { yyerror(ER(ER_SYNTAX_ERROR)); YYABORT; } } - $$= udf; + lex->current_select->udf_list.push_front(udf); #endif } udf_expr_list ')' { #ifdef HAVE_DLOPEN - udf_func *udf= $3; - SELECT_LEX *sel= Select; + udf_func *udf; + LEX *lex= Lex; - if (udf) + if (NULL != (udf= lex->current_select->udf_list.pop())) { if (udf->type == UDFTYPE_AGGREGATE) Select->in_sum_expr--; @@ -4988,12 +4985,29 @@ udf_expr_list3: udf_expr: remember_name expr remember_end select_alias { + udf_func *udf= Select->udf_list.head(); + /* + Use Item::name as a storage for the attribute value of user + defined function argument. It is safe to use Item::name + because the syntax will not allow having an explicit name here. + See WL#1017 re. udf attributes. + */ if ($4.str) { + if (!udf) + { + /* + Disallow using AS to specify explicit names for the arguments + of stored routine calls + */ + yyerror(ER(ER_SYNTAX_ERROR)); + YYABORT; + } + $2->is_autogenerated_name= FALSE; $2->set_name($4.str, $4.length, system_charset_info); } - else + else if (udf) $2->set_name($1, (uint) ($3 - $1), YYTHD->charset()); $$= $2; } @@ -5114,6 +5128,11 @@ cast_type: | DECIMAL_SYM float_options { $$=ITEM_CAST_DECIMAL; Lex->charset= NULL; } ; +opt_expr_list: + /* empty */ { $$= NULL; } + | expr_list { $$= $1;} + ; + expr_list: { Select->expr_list.push_front(new List); } expr_list2 From 01a0be193d8344e1b20c366b30a7ca75a0a9bd44 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 25 Oct 2006 20:14:39 +0500 Subject: [PATCH 10/57] bug #19491 (CAST do DATETIME wrong result) mysql-test/r/type_datetime.result: result fixed mysql-test/r/type_newdecimal.result: result fixed mysql-test/t/type_datetime.test: testcase mysql-test/t/type_newdecimal.test: testcase sql/field.cc: Field_new_decimal::store_time implemented sql/field.h: Field_new_decimal::store_time added sql/item.cc: auxiliary methods implemented to operate with datatimes sql/item.h: auxiliary methods declared in Item to operate with datatimes sql/item_timefunc.cc: Item_date::save_in_field old implementation removed sql/item_timefunc.h: my_decimal and save_in_field methods implemented for datetime items sql/my_decimal.cc: date2my_decimal implemented sql/my_decimal.h: date2my_decimal declared --- mysql-test/r/type_datetime.result | 12 +++ mysql-test/r/type_newdecimal.result | 8 ++ mysql-test/t/type_datetime.test | 10 ++ mysql-test/t/type_newdecimal.test | 8 ++ sql/field.cc | 7 ++ sql/field.h | 1 + sql/item.cc | 47 +++++++++ sql/item.h | 5 + sql/item_timefunc.cc | 11 -- sql/item_timefunc.h | 151 +++++++++++++++++++++------- sql/my_decimal.cc | 19 ++++ sql/my_decimal.h | 7 +- 12 files changed, 236 insertions(+), 50 deletions(-) diff --git a/mysql-test/r/type_datetime.result b/mysql-test/r/type_datetime.result index 49e4827cb97..7fc1c4f398d 100644 --- a/mysql-test/r/type_datetime.result +++ b/mysql-test/r/type_datetime.result @@ -179,3 +179,15 @@ a 2006-06-06 15:55:55 DROP PREPARE s; DROP TABLE t1; +SELECT CAST(CAST('2006-08-10' AS DATE) AS DECIMAL(20,6)); +CAST(CAST('2006-08-10' AS DATE) AS DECIMAL(20,6)) +20060810.000000 +SELECT CAST(CAST('2006-08-10 10:11:12' AS DATETIME) AS DECIMAL(20,6)); +CAST(CAST('2006-08-10 10:11:12' AS DATETIME) AS DECIMAL(20,6)) +20060810101112.000000 +SELECT CAST(CAST('2006-08-10 10:11:12' AS DATETIME) + INTERVAL 14 MICROSECOND AS DECIMAL(20,6)); +CAST(CAST('2006-08-10 10:11:12' AS DATETIME) + INTERVAL 14 MICROSECOND AS DECIMAL(20,6)) +20060810101112.000014 +SELECT CAST(CAST('10:11:12.098700' AS TIME) AS DECIMAL(20,6)); +CAST(CAST('10:11:12.098700' AS TIME) AS DECIMAL(20,6)) +101112.098700 diff --git a/mysql-test/r/type_newdecimal.result b/mysql-test/r/type_newdecimal.result index 33f1ece0390..f24014da285 100644 --- a/mysql-test/r/type_newdecimal.result +++ b/mysql-test/r/type_newdecimal.result @@ -1412,3 +1412,11 @@ i2 count(distinct j) 1.0 2 2.0 2 drop table t1; +create table t1(f1 decimal(20,6)); +insert into t1 values (CAST('10:11:12' AS date) + interval 14 microsecond); +insert into t1 values (CAST('10:11:12' AS time)); +select * from t1; +f1 +20101112000000.000014 +20101112.000000 +drop table t1; diff --git a/mysql-test/t/type_datetime.test b/mysql-test/t/type_datetime.test index cdf73bf6c89..3ad6bdc53e4 100644 --- a/mysql-test/t/type_datetime.test +++ b/mysql-test/t/type_datetime.test @@ -125,3 +125,13 @@ PREPARE s FROM 'SELECT a FROM t1 WHERE a=(SELECT MAX(a) FROM t1) AND (a="2006060 EXECUTE s; DROP PREPARE s; DROP TABLE t1; + + +# +# Bug 19491 (CAST DATE AS DECIMAL returns incorrect result +# +SELECT CAST(CAST('2006-08-10' AS DATE) AS DECIMAL(20,6)); +SELECT CAST(CAST('2006-08-10 10:11:12' AS DATETIME) AS DECIMAL(20,6)); +SELECT CAST(CAST('2006-08-10 10:11:12' AS DATETIME) + INTERVAL 14 MICROSECOND AS DECIMAL(20,6)); +SELECT CAST(CAST('10:11:12.098700' AS TIME) AS DECIMAL(20,6)); + diff --git a/mysql-test/t/type_newdecimal.test b/mysql-test/t/type_newdecimal.test index de1ebd74d17..12da41dcfd5 100644 --- a/mysql-test/t/type_newdecimal.test +++ b/mysql-test/t/type_newdecimal.test @@ -1108,3 +1108,11 @@ insert into t1 values (1,1), (1,2), (2,3), (2,4); select i, count(distinct j) from t1 group by i; select i+0.0 as i2, count(distinct j) from t1 group by i2; drop table t1; + + +create table t1(f1 decimal(20,6)); +insert into t1 values (CAST('10:11:12' AS date) + interval 14 microsecond); +insert into t1 values (CAST('10:11:12' AS time)); +select * from t1; +drop table t1; + diff --git a/sql/field.cc b/sql/field.cc index 4fea6a085bb..9858e873aa2 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -2413,6 +2413,13 @@ int Field_new_decimal::store_decimal(const my_decimal *decimal_value) } +int Field_new_decimal::store_time(TIME *ltime, timestamp_type t_type) +{ + my_decimal decimal_value; + return store_value(date2my_decimal(ltime, &decimal_value)); +} + + double Field_new_decimal::val_real(void) { double dbl; diff --git a/sql/field.h b/sql/field.h index 65e747e9d2f..08d4a2c7a53 100644 --- a/sql/field.h +++ b/sql/field.h @@ -489,6 +489,7 @@ public: int store(const char *to, uint length, CHARSET_INFO *charset); int store(double nr); int store(longlong nr, bool unsigned_val); + int store_time(TIME *ltime, timestamp_type t_type); int store_decimal(const my_decimal *); double val_real(void); longlong val_int(void); diff --git a/sql/item.cc b/sql/item.cc index 96b20d0f0bb..1e70788d9f4 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -272,6 +272,34 @@ my_decimal *Item::val_decimal_from_string(my_decimal *decimal_value) } +my_decimal *Item::val_decimal_from_date(my_decimal *decimal_value) +{ + DBUG_ASSERT(fixed == 1); + TIME ltime; + longlong date; + if (get_date(<ime, TIME_FUZZY_DATE)) + { + my_decimal_set_zero(decimal_value); + return 0; + } + return date2my_decimal(<ime, decimal_value); +} + + +my_decimal *Item::val_decimal_from_time(my_decimal *decimal_value) +{ + DBUG_ASSERT(fixed == 1); + TIME ltime; + longlong date; + if (get_time(<ime)) + { + my_decimal_set_zero(decimal_value); + return 0; + } + return date2my_decimal(<ime, decimal_value); +} + + double Item::val_real_from_decimal() { /* Note that fix_fields may not be called for Item_avg_field items */ @@ -295,6 +323,25 @@ longlong Item::val_int_from_decimal() return result; } +int Item::save_time_in_field(Field *field) +{ + TIME ltime; + if (get_time(<ime)) + return set_field_to_null(field); + field->set_notnull(); + return field->store_time(<ime, MYSQL_TIMESTAMP_TIME); +} + + +int Item::save_date_in_field(Field *field) +{ + TIME ltime; + if (get_date(<ime, TIME_FUZZY_DATE)) + return set_field_to_null(field); + field->set_notnull(); + return field->store_time(<ime, MYSQL_TIMESTAMP_DATETIME); +} + Item::Item(): rsize(0), name(0), orig_name(0), name_length(0), fixed(0), diff --git a/sql/item.h b/sql/item.h index e3df0fdf389..a820f78719a 100644 --- a/sql/item.h +++ b/sql/item.h @@ -605,9 +605,14 @@ public: my_decimal *val_decimal_from_real(my_decimal *decimal_value); my_decimal *val_decimal_from_int(my_decimal *decimal_value); my_decimal *val_decimal_from_string(my_decimal *decimal_value); + my_decimal *val_decimal_from_date(my_decimal *decimal_value); + my_decimal *val_decimal_from_time(my_decimal *decimal_value); longlong val_int_from_decimal(); double val_real_from_decimal(); + int save_time_in_field(Field *field); + int save_date_in_field(Field *field); + virtual Field *get_tmp_table_field() { return 0; } /* This is also used to create fields in CREATE ... SELECT: */ virtual Field *tmp_table_field(TABLE *t_arg) { return 0; } diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 30230005f6e..24a0d12ee9a 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -1294,17 +1294,6 @@ String *Item_date::val_str(String *str) } -int Item_date::save_in_field(Field *field, bool no_conversions) -{ - TIME ltime; - if (get_date(<ime, TIME_FUZZY_DATE)) - return set_field_to_null(field); - field->set_notnull(); - field->store_time(<ime, MYSQL_TIMESTAMP_DATE); - return 0; -} - - longlong Item_date::val_int() { DBUG_ASSERT(fixed == 1); diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index d5d3efeeab4..29978cf60a3 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -339,12 +339,20 @@ public: decimals=0; max_length=MAX_DATE_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; } - int save_in_field(Field *to, bool no_conversions); Field *tmp_table_field(TABLE *t_arg) { return (new Field_date(maybe_null, name, t_arg, &my_charset_bin)); } bool result_as_longlong() { return TRUE; } + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + return val_decimal_from_date(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + return save_date_in_field(field); + } }; @@ -361,21 +369,57 @@ public: return (new Field_datetime(maybe_null, name, t_arg, &my_charset_bin)); } bool result_as_longlong() { return TRUE; } + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + return val_decimal_from_date(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + return save_date_in_field(field); + } +}; + + +class Item_str_timefunc :public Item_str_func +{ +public: + Item_str_timefunc() :Item_str_func() {} + Item_str_timefunc(Item *a) :Item_str_func(a) {} + Item_str_timefunc(Item *a,Item *b) :Item_str_func(a,b) {} + Item_str_timefunc(Item *a, Item *b, Item *c) :Item_str_func(a, b ,c) {} + enum_field_types field_type() const { return MYSQL_TYPE_TIME; } + void fix_length_and_dec() + { + decimals=0; + max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; + } + Field *tmp_table_field(TABLE *t_arg) + { + return (new Field_time(maybe_null, name, t_arg, &my_charset_bin)); + } + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + return val_decimal_from_time(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + return save_time_in_field(field); + } }; /* Abstract CURTIME function. Children should define what time zone is used */ -class Item_func_curtime :public Item_func +class Item_func_curtime :public Item_str_timefunc { longlong value; char buff[9*2+32]; uint buff_length; public: - Item_func_curtime() :Item_func() {} - Item_func_curtime(Item *a) :Item_func(a) {} - enum Item_result result_type () const { return STRING_RESULT; } - enum_field_types field_type() const { return MYSQL_TYPE_TIME; } + Item_func_curtime() :Item_str_timefunc() {} + Item_func_curtime(Item *a) :Item_str_timefunc(a) {} double val_real() { DBUG_ASSERT(fixed == 1); return (double) value; } longlong val_int() { DBUG_ASSERT(fixed == 1); return value; } String *val_str(String *str); @@ -602,10 +646,10 @@ class Item_func_convert_tz :public Item_date_func }; -class Item_func_sec_to_time :public Item_str_func +class Item_func_sec_to_time :public Item_str_timefunc { public: - Item_func_sec_to_time(Item *item) :Item_str_func(item) {} + Item_func_sec_to_time(Item *item) :Item_str_timefunc(item) {} double val_real() { DBUG_ASSERT(fixed == 1); @@ -615,17 +659,12 @@ public: String *val_str(String *); void fix_length_and_dec() { + Item_str_timefunc::fix_length_and_dec(); collation.set(&my_charset_bin); maybe_null=1; decimals= DATETIME_DEC; - max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; } - enum_field_types field_type() const { return MYSQL_TYPE_TIME; } const char *func_name() const { return "sec_to_time"; } - Field *tmp_table_field(TABLE *t_arg) - { - return (new Field_time(maybe_null, name, t_arg, &my_charset_bin)); - } bool result_as_longlong() { return TRUE; } }; @@ -759,6 +798,15 @@ public: } bool result_as_longlong() { return TRUE; } longlong val_int(); + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + return val_decimal_from_date(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + return save_date_in_field(field); + } }; @@ -777,6 +825,15 @@ public: } bool result_as_longlong() { return TRUE; } longlong val_int(); + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + return val_decimal_from_time(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + return save_time_in_field(field); + } }; @@ -794,12 +851,21 @@ public: } bool result_as_longlong() { return TRUE; } longlong val_int(); + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + return val_decimal_from_date(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + return save_date_in_field(field); + } }; -class Item_func_makedate :public Item_str_func +class Item_func_makedate :public Item_date_func { public: - Item_func_makedate(Item *a,Item *b) :Item_str_func(a,b) {} + Item_func_makedate(Item *a,Item *b) :Item_date_func(a,b) {} String *val_str(String *str); const char *func_name() const { return "makedate"; } enum_field_types field_type() const { return MYSQL_TYPE_DATE; } @@ -812,8 +878,16 @@ public: { return (new Field_date(maybe_null, name, t_arg, &my_charset_bin)); } - bool result_as_longlong() { return TRUE; } longlong val_int(); + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + return val_decimal_from_date(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + return save_date_in_field(field); + } }; @@ -845,45 +919,46 @@ public: } void print(String *str); const char *func_name() const { return "add_time"; } + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + if (cached_field_type == MYSQL_TYPE_TIME) + return val_decimal_from_time(decimal_value); + if (cached_field_type == MYSQL_TYPE_DATETIME) + return val_decimal_from_date(decimal_value); + return Item_str_func::val_decimal(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + if (cached_field_type == MYSQL_TYPE_TIME) + return save_time_in_field(field); + if (cached_field_type == MYSQL_TYPE_DATETIME) + return save_date_in_field(field); + return Item_str_func::save_in_field(field, no_conversions); + } }; -class Item_func_timediff :public Item_str_func +class Item_func_timediff :public Item_str_timefunc { public: Item_func_timediff(Item *a, Item *b) - :Item_str_func(a, b) {} + :Item_str_timefunc(a, b) {} String *val_str(String *str); const char *func_name() const { return "timediff"; } - enum_field_types field_type() const { return MYSQL_TYPE_TIME; } void fix_length_and_dec() { - decimals=0; - max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; + Item_str_timefunc::fix_length_and_dec(); maybe_null= 1; } - Field *tmp_table_field(TABLE *t_arg) - { - return (new Field_time(maybe_null, name, t_arg, &my_charset_bin)); - } }; -class Item_func_maketime :public Item_str_func +class Item_func_maketime :public Item_str_timefunc { public: Item_func_maketime(Item *a, Item *b, Item *c) - :Item_str_func(a, b ,c) {} + :Item_str_timefunc(a, b ,c) {} String *val_str(String *str); const char *func_name() const { return "maketime"; } - enum_field_types field_type() const { return MYSQL_TYPE_TIME; } - void fix_length_and_dec() - { - decimals=0; - max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; - } - Field *tmp_table_field(TABLE *t_arg) - { - return (new Field_time(maybe_null, name, t_arg, &my_charset_bin)); - } }; class Item_func_microsecond :public Item_int_func diff --git a/sql/my_decimal.cc b/sql/my_decimal.cc index 1bd16940b47..f33609e0168 100644 --- a/sql/my_decimal.cc +++ b/sql/my_decimal.cc @@ -15,6 +15,8 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "mysql_priv.h" +#include + #ifndef MYSQL_CLIENT /* @@ -190,6 +192,23 @@ int str2my_decimal(uint mask, const char *from, uint length, } +my_decimal *date2my_decimal(TIME *ltime, my_decimal *dec) +{ + longlong date; + date = (ltime->year*100L + ltime->month)*100L + ltime->day; + if (ltime->time_type > MYSQL_TIMESTAMP_DATE) + date= ((date*100L + ltime->hour)*100L+ ltime->minute)*100L + ltime->second; + if (int2my_decimal(E_DEC_FATAL_ERROR, date, FALSE, dec)) + return dec; + if (ltime->second_part) + { + dec->buf[(dec->intg-1) / 9 + 1]= ltime->second_part * 1000; + dec->frac= 6; + } + return dec; +} + + #ifndef DBUG_OFF /* routines for debugging print */ diff --git a/sql/my_decimal.h b/sql/my_decimal.h index b02abacf0a3..af3edade8d6 100644 --- a/sql/my_decimal.h +++ b/sql/my_decimal.h @@ -295,7 +295,12 @@ int string2my_decimal(uint mask, const String *str, my_decimal *d) { return str2my_decimal(mask, str->ptr(), str->length(), str->charset(), d); } -#endif + + +my_decimal *date2my_decimal(TIME *ltime, my_decimal *dec); + + +#endif /*defined(MYSQL_SERVER) || defined(EMBEDDED_LIBRARY) */ inline int double2my_decimal(uint mask, double val, my_decimal *d) From f00e6bd75d24e311016cd72449ed678c0172f6e6 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 30 Oct 2006 09:52:50 +0400 Subject: [PATCH 11/57] Bug #8663 (cant use bigint as input to CAST) decimal->ulong conversion fixed to assign max possible ULONG if decimal is bigger Item_func_unsigned now handles DECIMAL parameter separately as we can't rely on decimal::val_int result here. mysql-test/r/type_newdecimal.result: result fixed mysql-test/t/type_newdecimal.test: testcase sql/item_func.cc: DECIMAL_RESULT should be handled separately here as it's always signed. strings/decimal.c: here we assign max possible ULONG if the decimal value is bigger --- mysql-test/r/type_newdecimal.result | 5 +++++ mysql-test/t/type_newdecimal.test | 6 ++++++ sql/item_func.cc | 9 ++++++++- strings/decimal.c | 2 +- 4 files changed, 20 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/type_newdecimal.result b/mysql-test/r/type_newdecimal.result index 33f1ece0390..cacb945df85 100644 --- a/mysql-test/r/type_newdecimal.result +++ b/mysql-test/r/type_newdecimal.result @@ -1412,3 +1412,8 @@ i2 count(distinct j) 1.0 2 2.0 2 drop table t1; +select cast(19999999999999999999 as unsigned); +cast(19999999999999999999 as unsigned) +18446744073709551615 +Warnings: +Error 1292 Truncated incorrect DECIMAL value: '' diff --git a/mysql-test/t/type_newdecimal.test b/mysql-test/t/type_newdecimal.test index de1ebd74d17..cb85f14ee47 100644 --- a/mysql-test/t/type_newdecimal.test +++ b/mysql-test/t/type_newdecimal.test @@ -1108,3 +1108,9 @@ insert into t1 values (1,1), (1,2), (2,3), (2,4); select i, count(distinct j) from t1 group by i; select i+0.0 as i2, count(distinct j) from t1 group by i2; drop table t1; + +# +# Bug #8663 (cant use bigint as input to CAST) +# +select cast(19999999999999999999 as unsigned); + diff --git a/sql/item_func.cc b/sql/item_func.cc index a294bbd7a71..38294d52a5b 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -964,7 +964,14 @@ longlong Item_func_unsigned::val_int() longlong value; int error; - if (args[0]->cast_to_int_type() != STRING_RESULT) + if (args[0]->cast_to_int_type() == DECIMAL_RESULT) + { + my_decimal tmp, *dec= args[0]->val_decimal(&tmp); + if (!(null_value= args[0]->null_value)) + my_decimal2int(E_DEC_FATAL_ERROR, dec, 1, &value); + return value; + } + else if (args[0]->cast_to_int_type() != STRING_RESULT) { value= args[0]->val_int(); null_value= args[0]->null_value; diff --git a/strings/decimal.c b/strings/decimal.c index 5a0bc0968b6..e0b06685521 100644 --- a/strings/decimal.c +++ b/strings/decimal.c @@ -1036,7 +1036,7 @@ int decimal2ulonglong(decimal_t *from, ulonglong *to) x=x*DIG_BASE + *buf++; if (unlikely(y > ((ulonglong) ULONGLONG_MAX/DIG_BASE) || x < y)) { - *to=y; + *to=ULONGLONG_MAX; return E_DEC_OVERFLOW; } } From 634d3991ab818ea4afc8d98b40de75b22e0cae54 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 31 Oct 2006 11:01:27 +0200 Subject: [PATCH 12/57] Bug #23184: SELECT causes server crash Item::val_xxx() may be called by the server several times at execute time for a single query. Calls to val_xxx() may be very expensive and sometimes (count(distinct), sum(distinct), avg(distinct)) not possible. To avoid that problem the results of calculation for these aggregate functions are cached so that val_xxx() methods just return the calculated value for the second and subsequent calls. mysql-test/r/func_group.result: Bug #23184: SELECT causes server crash - test case mysql-test/t/func_group.test: Bug #23184: SELECT causes server crash - test case sql/item_sum.cc: Bug #23184: SELECT causes server crash - caching of the aggregate function results so no need to recalculate at val_xxx() sql/item_sum.h: Bug #23184: SELECT causes server crash - caching of the aggregate function results so no need to recalculate at val_xxx() --- mysql-test/r/func_group.result | 26 +++++++++++++++++++ mysql-test/t/func_group.test | 25 ++++++++++++++++++ sql/item_sum.cc | 47 ++++++++++++++++++++++++---------- sql/item_sum.h | 34 ++++++++++++++++++------ 4 files changed, 111 insertions(+), 21 deletions(-) diff --git a/mysql-test/r/func_group.result b/mysql-test/r/func_group.result index c6117053a60..23517f7b603 100644 --- a/mysql-test/r/func_group.result +++ b/mysql-test/r/func_group.result @@ -1029,3 +1029,29 @@ t1 CREATE TABLE `t1` ( `stddev(0)` double(8,4) default NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; +CREATE TABLE t1 (a INT, b INT); +INSERT INTO t1 VALUES (1,1),(1,2),(1,3),(1,4),(1,5),(1,6),(1,7),(1,8); +INSERT INTO t1 SELECT a, b+8 FROM t1; +INSERT INTO t1 SELECT a, b+16 FROM t1; +INSERT INTO t1 SELECT a, b+32 FROM t1; +INSERT INTO t1 SELECT a, b+64 FROM t1; +INSERT INTO t1 SELECT a, b+128 FROM t1; +INSERT INTO t1 SELECT a, b+256 FROM t1; +INSERT INTO t1 SELECT a, b+512 FROM t1; +INSERT INTO t1 SELECT a, b+1024 FROM t1; +INSERT INTO t1 SELECT a, b+2048 FROM t1; +INSERT INTO t1 SELECT a, b+4096 FROM t1; +INSERT INTO t1 SELECT a, b+8192 FROM t1; +INSERT INTO t1 SELECT a, b+16384 FROM t1; +INSERT INTO t1 SELECT a, b+32768 FROM t1; +SELECT a,COUNT(DISTINCT b) AS cnt FROM t1 GROUP BY a HAVING cnt > 50; +a cnt +1 65536 +SELECT a,SUM(DISTINCT b) AS sumation FROM t1 GROUP BY a HAVING sumation > 50; +a sumation +1 2147516416 +SELECT a,AVG(DISTINCT b) AS average FROM t1 GROUP BY a HAVING average > 50; +a average +1 32768.5000 +DROP TABLE t1; +End of 5.0 tests diff --git a/mysql-test/t/func_group.test b/mysql-test/t/func_group.test index 079d107fad8..089f5ed9911 100644 --- a/mysql-test/t/func_group.test +++ b/mysql-test/t/func_group.test @@ -700,3 +700,28 @@ create table t1 select stddev(0); show create table t1; drop table t1; +# +# Bug #23184: SELECT causes server crash +# +CREATE TABLE t1 (a INT, b INT); +INSERT INTO t1 VALUES (1,1),(1,2),(1,3),(1,4),(1,5),(1,6),(1,7),(1,8); +INSERT INTO t1 SELECT a, b+8 FROM t1; +INSERT INTO t1 SELECT a, b+16 FROM t1; +INSERT INTO t1 SELECT a, b+32 FROM t1; +INSERT INTO t1 SELECT a, b+64 FROM t1; +INSERT INTO t1 SELECT a, b+128 FROM t1; +INSERT INTO t1 SELECT a, b+256 FROM t1; +INSERT INTO t1 SELECT a, b+512 FROM t1; +INSERT INTO t1 SELECT a, b+1024 FROM t1; +INSERT INTO t1 SELECT a, b+2048 FROM t1; +INSERT INTO t1 SELECT a, b+4096 FROM t1; +INSERT INTO t1 SELECT a, b+8192 FROM t1; +INSERT INTO t1 SELECT a, b+16384 FROM t1; +INSERT INTO t1 SELECT a, b+32768 FROM t1; +SELECT a,COUNT(DISTINCT b) AS cnt FROM t1 GROUP BY a HAVING cnt > 50; +SELECT a,SUM(DISTINCT b) AS sumation FROM t1 GROUP BY a HAVING sumation > 50; +SELECT a,AVG(DISTINCT b) AS average FROM t1 GROUP BY a HAVING average > 50; + +DROP TABLE t1; + +--echo End of 5.0 tests diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 5ca1dbba94b..fccbdfa7925 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -893,6 +893,7 @@ bool Item_sum_distinct::setup(THD *thd) tree= new Unique(simple_raw_key_cmp, &tree_key_length, tree_key_length, thd->variables.max_heap_table_size); + is_evaluated= FALSE; DBUG_RETURN(tree == 0); } @@ -900,6 +901,7 @@ bool Item_sum_distinct::setup(THD *thd) bool Item_sum_distinct::add() { args[0]->save_in_field(table->field[0], FALSE); + is_evaluated= FALSE; if (!table->field[0]->is_null()) { DBUG_ASSERT(tree); @@ -929,6 +931,7 @@ void Item_sum_distinct::clear() DBUG_ASSERT(tree != 0); /* we always have a tree */ null_value= 1; tree->reset(); + is_evaluated= FALSE; DBUG_VOID_RETURN; } @@ -938,6 +941,7 @@ void Item_sum_distinct::cleanup() delete tree; tree= 0; table= 0; + is_evaluated= FALSE; } Item_sum_distinct::~Item_sum_distinct() @@ -949,16 +953,20 @@ Item_sum_distinct::~Item_sum_distinct() void Item_sum_distinct::calculate_val_and_count() { - count= 0; - val.traits->set_zero(&val); - /* - We don't have a tree only if 'setup()' hasn't been called; - this is the case of sql_select.cc:return_zero_rows. - */ - if (tree) + if (!is_evaluated) { - table->field[0]->set_notnull(); - tree->walk(item_sum_distinct_walk, (void*) this); + count= 0; + val.traits->set_zero(&val); + /* + We don't have a tree only if 'setup()' hasn't been called; + this is the case of sql_select.cc:return_zero_rows. + */ + if (tree) + { + table->field[0]->set_notnull(); + tree->walk(item_sum_distinct_walk, (void*) this); + } + is_evaluated= TRUE; } } @@ -1014,9 +1022,13 @@ Item_sum_avg_distinct::fix_length_and_dec() void Item_sum_avg_distinct::calculate_val_and_count() { - Item_sum_distinct::calculate_val_and_count(); - if (count) - val.traits->div(&val, count); + if (!is_evaluated) + { + Item_sum_distinct::calculate_val_and_count(); + if (count) + val.traits->div(&val, count); + is_evaluated= TRUE; + } } @@ -2477,6 +2489,7 @@ void Item_sum_count_distinct::cleanup() */ delete tree; tree= 0; + is_evaluated= FALSE; if (table) { free_tmp_table(table->in_use, table); @@ -2498,6 +2511,7 @@ void Item_sum_count_distinct::make_unique() original= 0; force_copy_fields= 1; tree= 0; + is_evaluated= FALSE; tmp_table_param= 0; always_null= FALSE; } @@ -2617,6 +2631,7 @@ bool Item_sum_count_distinct::setup(THD *thd) but this has to be handled - otherwise someone can crash the server with a DoS attack */ + is_evaluated= FALSE; if (! tree) return TRUE; } @@ -2633,8 +2648,11 @@ Item *Item_sum_count_distinct::copy_or_same(THD* thd) void Item_sum_count_distinct::clear() { /* tree and table can be both null only if always_null */ + is_evaluated= FALSE; if (tree) + { tree->reset(); + } else if (table) { table->file->extra(HA_EXTRA_NO_CACHE); @@ -2655,6 +2673,7 @@ bool Item_sum_count_distinct::add() if ((*field)->is_real_null(0)) return 0; // Don't count NULL + is_evaluated= FALSE; if (tree) { /* @@ -2680,12 +2699,14 @@ longlong Item_sum_count_distinct::val_int() return LL(0); if (tree) { - ulonglong count; + if (is_evaluated) + return count; if (tree->elements == 0) return (longlong) tree->elements_in_tree(); // everything fits in memory count= 0; tree->walk(count_distinct_walk, (void*) &count); + is_evaluated= TRUE; return (longlong) count; } table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); diff --git a/sql/item_sum.h b/sql/item_sum.h index f1ea95214de..a2b2f7cab92 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -321,12 +321,23 @@ public: class Item_sum_num :public Item_sum { +protected: + /* + val_xxx() functions may be called several times during the execution of a + query. Derived classes that require extensive calculation in val_xxx() + maintain cache of aggregate value. This variable governs the validity of + that cache. + */ + bool is_evaluated; public: - Item_sum_num() :Item_sum() {} - Item_sum_num(Item *item_par) :Item_sum(item_par) {} - Item_sum_num(Item *a, Item* b) :Item_sum(a,b) {} - Item_sum_num(List &list) :Item_sum(list) {} - Item_sum_num(THD *thd, Item_sum_num *item) :Item_sum(thd, item) {} + Item_sum_num() :Item_sum(),is_evaluated(FALSE) {} + Item_sum_num(Item *item_par) + :Item_sum(item_par), is_evaluated(FALSE) {} + Item_sum_num(Item *a, Item* b) :Item_sum(a,b),is_evaluated(FALSE) {} + Item_sum_num(List &list) + :Item_sum(list), is_evaluated(FALSE) {} + Item_sum_num(THD *thd, Item_sum_num *item) + :Item_sum(thd, item),is_evaluated(item->is_evaluated) {} bool fix_fields(THD *, Item **); longlong val_int() { @@ -508,6 +519,12 @@ class Item_sum_count_distinct :public Item_sum_int to help get things set up, but we insert nothing in it */ Unique *tree; + /* + Storage for the value of count between calls to val_int() so val_int() + will not recalculate on each call. Validitiy of the value is stored in + is_evaluated. + */ + longlong count; /* Following is 0 normal object and pointer to original one for copy (to correctly free resources) @@ -525,14 +542,15 @@ class Item_sum_count_distinct :public Item_sum_int public: Item_sum_count_distinct(List &list) :Item_sum_int(list), table(0), field_lengths(0), tmp_table_param(0), - force_copy_fields(0), tree(0), original(0), always_null(FALSE) + force_copy_fields(0), tree(0), count(0), + original(0), always_null(FALSE) { quick_group= 0; } Item_sum_count_distinct(THD *thd, Item_sum_count_distinct *item) :Item_sum_int(thd, item), table(item->table), field_lengths(item->field_lengths), tmp_table_param(item->tmp_table_param), - force_copy_fields(0), tree(item->tree), original(item), - tree_key_length(item->tree_key_length), + force_copy_fields(0), tree(item->tree), count(item->count), + original(item), tree_key_length(item->tree_key_length), always_null(item->always_null) {} ~Item_sum_count_distinct(); From 48df3b96a1719141749c05e4080c57366e9d0fbe Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 31 Oct 2006 20:51:09 +0300 Subject: [PATCH 13/57] BUG#8804: wrong results for NULL IN (SELECT ...) Evaluate "NULL IN (SELECT ...)" in a special way: Disable pushed-down conditions and their "consequences": = Do full table scans instead of unique_[index_subquery] lookups. = Change appropriate "ref_or_null" accesses to full table scans in subquery's joins. Also cache value of NULL IN (SELECT ...) if the SELECT is not correlated wrt any upper select. mysql-test/r/subselect.result: BUG#8804: wrong results for NULL IN (SELECT ...): - Updated test results sql/item.h: BUG#8804: wrong results for NULL IN (SELECT ...): - Added comments sql/item_cmpfunc.cc: BUG#8804: wrong results for NULL IN (SELECT ...): Made Item_in_optimizer to: - cache the value of "NULL IN (uncorrelated select)" - Turn off pushed-down predicates when evaluating "NULL IN (SELECT ...)" sql/item_cmpfunc.h: BUG#8804: wrong results for NULL IN (SELECT ...): - Made Item_in_optimizer cache the value of "NULL IN (uncorrelated select)" - Added comments sql/item_subselect.cc: BUG#8804: wrong results for NULL IN (SELECT ...): - When needed, wrap the predicates we push into subquery into an Item_func_trig_cond so we're able to turn them off when evaluating NULL IN (SELECT ...). - Added code to evaluate NULL IN (SELECT ...) in a special way: = In [unique_]index_subquery, do full table scan to see if there are any rows. = For other subqueries, change ref[_or_null] to ALL if the ref[_or_null] was created from pushed-down predicate. sql/item_subselect.h: BUG#8804: wrong results for NULL IN (SELECT ...): - Added Item_subselect::is_correlated - Added comments sql/records.cc: BUG#8804: wrong results for NULL IN (SELECT ...): - Make rr_sequential() non-static sql/sql_lex.cc: BUG#8804: wrong results for NULL IN (SELECT ...): - Added st_select_lex::is_correlated and Item_subselect::is_correlated. sql/sql_lex.h: BUG#8804: wrong results for NULL IN (SELECT ...): - Added st_select_lex::is_correlated sql/sql_select.cc: BUG#8804: wrong results for NULL IN (SELECT ...): - Added KEY_FIELD::outer_ref to keep track of which ref accesses are created from predicates that were pushed down into the subquery. sql/sql_select.h: BUG#8804: wrong results for NULL IN (SELECT ...): - Added KEYUSE::outer_ref mysql-test/r/subselect3.result: New BitKeeper file ``mysql-test/r/subselect3.result'' mysql-test/t/subselect3.test: New BitKeeper file ``mysql-test/t/subselect3.test'' --- mysql-test/r/subselect.result | 10 +- mysql-test/r/subselect3.result | 153 ++++++++++++ mysql-test/t/subselect3.test | 137 +++++++++++ sql/item.h | 10 + sql/item_cmpfunc.cc | 34 ++- sql/item_cmpfunc.h | 48 +++- sql/item_subselect.cc | 414 ++++++++++++++++++++++++++++----- sql/item_subselect.h | 70 +++++- sql/records.cc | 5 +- sql/sql_lex.cc | 4 +- sql/sql_lex.h | 4 +- sql/sql_select.cc | 112 +++++++-- sql/sql_select.h | 11 +- 13 files changed, 904 insertions(+), 108 deletions(-) create mode 100644 mysql-test/r/subselect3.result create mode 100644 mysql-test/t/subselect3.test diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index 55d48030a07..82c70e19f9c 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -744,7 +744,7 @@ id select_type table type possible_keys key key_len ref rows Extra 3 DEPENDENT UNION NULL NULL NULL NULL NULL NULL NULL No tables used NULL UNION RESULT ALL NULL NULL NULL NULL NULL Warnings: -Note 1003 select `test`.`t2`.`id` AS `id` from `test`.`t2` where (`test`.`t2`.`id`,(select 1 AS `1` having ((`test`.`t2`.`id`) = (1)) union select 3 AS `3` having ((`test`.`t2`.`id`) = (3)))) +Note 1003 select `test`.`t2`.`id` AS `id` from `test`.`t2` where (`test`.`t2`.`id`,(select 1 AS `1` having trigcond(((`test`.`t2`.`id`) = (1))) union select 3 AS `3` having trigcond(((`test`.`t2`.`id`) = (3))))) SELECT * FROM t2 WHERE id IN (SELECT 5 UNION SELECT 3); id SELECT * FROM t2 WHERE id IN (SELECT 5 UNION SELECT 2); @@ -907,7 +907,7 @@ id select_type table type possible_keys key key_len ref rows Extra 2 DEPENDENT SUBQUERY t2 ref_or_null a a 5 func 2 Using where; Using index 2 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 3 Using where Warnings: -Note 1003 select `test`.`t1`.`a` AS `a`,(`test`.`t1`.`a`,(select 1 AS `Not_used` from `test`.`t2` join `test`.`t3` where ((`test`.`t3`.`a` = `test`.`t2`.`a`) and (((`test`.`t1`.`a`) = `test`.`t2`.`a`) or isnull(`test`.`t2`.`a`))) having (`test`.`t2`.`a`))) AS `t1.a in (select t2.a from t2,t3 where t3.a=t2.a)` from `test`.`t1` +Note 1003 select `test`.`t1`.`a` AS `a`,(`test`.`t1`.`a`,(select 1 AS `Not_used` from `test`.`t2` join `test`.`t3` where ((`test`.`t3`.`a` = `test`.`t2`.`a`) and trigcond((((`test`.`t1`.`a`) = `test`.`t2`.`a`) or isnull(`test`.`t2`.`a`)))) having trigcond((`test`.`t2`.`a`)))) AS `t1.a in (select t2.a from t2,t3 where t3.a=t2.a)` from `test`.`t1` drop table t1,t2,t3; create table t1 (a float); select 10.5 IN (SELECT * from t1 LIMIT 1); @@ -2817,19 +2817,19 @@ id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 8 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 9 Using where Warnings: -Note 1003 select `test`.`t1`.`one` AS `one`,`test`.`t1`.`two` AS `two`,((`test`.`t1`.`one`,`test`.`t1`.`two`),(select `test`.`t2`.`one` AS `one`,`test`.`t2`.`two` AS `two` from `test`.`t2` where ((`test`.`t2`.`flag` = _latin1'0') and (((`test`.`t1`.`one`) = `test`.`t2`.`one`) or isnull(`test`.`t2`.`one`)) and (((`test`.`t1`.`two`) = `test`.`t2`.`two`) or isnull(`test`.`t2`.`two`))) having ((`test`.`t2`.`one`) and (`test`.`t2`.`two`)))) AS `test` from `test`.`t1` +Note 1003 select `test`.`t1`.`one` AS `one`,`test`.`t1`.`two` AS `two`,((`test`.`t1`.`one`,`test`.`t1`.`two`),(select `test`.`t2`.`one` AS `one`,`test`.`t2`.`two` AS `two` from `test`.`t2` where ((`test`.`t2`.`flag` = _latin1'0') and trigcond(((((`test`.`t1`.`one`) = `test`.`t2`.`one`) or isnull(`test`.`t2`.`one`)) and (((`test`.`t1`.`two`) = `test`.`t2`.`two`) or isnull(`test`.`t2`.`two`))))) having trigcond(((`test`.`t2`.`one`) and (`test`.`t2`.`two`))))) AS `test` from `test`.`t1` explain extended SELECT one,two from t1 where ROW(one,two) IN (SELECT one,two FROM t2 WHERE flag = 'N'); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 9 Using where Warnings: -Note 1003 select `test`.`t1`.`one` AS `one`,`test`.`t1`.`two` AS `two` from `test`.`t1` where ((`test`.`t1`.`one`,`test`.`t1`.`two`),(select `test`.`t2`.`one` AS `one`,`test`.`t2`.`two` AS `two` from `test`.`t2` where ((`test`.`t2`.`flag` = _latin1'N') and ((`test`.`t1`.`one`) = `test`.`t2`.`one`) and ((`test`.`t1`.`two`) = `test`.`t2`.`two`)))) +Note 1003 select `test`.`t1`.`one` AS `one`,`test`.`t1`.`two` AS `two` from `test`.`t1` where ((`test`.`t1`.`one`,`test`.`t1`.`two`),(select `test`.`t2`.`one` AS `one`,`test`.`t2`.`two` AS `two` from `test`.`t2` where ((`test`.`t2`.`flag` = _latin1'N') and trigcond((((`test`.`t1`.`one`) = `test`.`t2`.`one`) and ((`test`.`t1`.`two`) = `test`.`t2`.`two`)))))) explain extended SELECT one,two,ROW(one,two) IN (SELECT one,two FROM t2 WHERE flag = '0' group by one,two) as 'test' from t1; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 8 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 9 Using where; Using temporary; Using filesort Warnings: -Note 1003 select `test`.`t1`.`one` AS `one`,`test`.`t1`.`two` AS `two`,((`test`.`t1`.`one`,`test`.`t1`.`two`),(select `test`.`t2`.`one` AS `one`,`test`.`t2`.`two` AS `two` from `test`.`t2` where (`test`.`t2`.`flag` = _latin1'0') group by `test`.`t2`.`one`,`test`.`t2`.`two` having ((((`test`.`t1`.`one`) = `test`.`t2`.`one`) or isnull(`test`.`t2`.`one`)) and (((`test`.`t1`.`two`) = `test`.`t2`.`two`) or isnull(`test`.`t2`.`two`)) and (`test`.`t2`.`one`) and (`test`.`t2`.`two`)))) AS `test` from `test`.`t1` +Note 1003 select `test`.`t1`.`one` AS `one`,`test`.`t1`.`two` AS `two`,((`test`.`t1`.`one`,`test`.`t1`.`two`),(select `test`.`t2`.`one` AS `one`,`test`.`t2`.`two` AS `two` from `test`.`t2` where (`test`.`t2`.`flag` = _latin1'0') group by `test`.`t2`.`one`,`test`.`t2`.`two` having trigcond(((((`test`.`t1`.`one`) = `test`.`t2`.`one`) or isnull(`test`.`t2`.`one`)) and (((`test`.`t1`.`two`) = `test`.`t2`.`two`) or isnull(`test`.`t2`.`two`)) and (`test`.`t2`.`one`) and (`test`.`t2`.`two`))))) AS `test` from `test`.`t1` DROP TABLE t1,t2; CREATE TABLE t1 (a char(5), b char(5)); INSERT INTO t1 VALUES (NULL,'aaa'), ('aaa','aaa'); diff --git a/mysql-test/r/subselect3.result b/mysql-test/r/subselect3.result new file mode 100644 index 00000000000..5ab8e448b39 --- /dev/null +++ b/mysql-test/r/subselect3.result @@ -0,0 +1,153 @@ +drop table if exists t0, t1, t2, t3, t4; +create table t1 (oref int, grp int, ie int) ; +insert into t1 (oref, grp, ie) values +(1, 1, 1), +(1, 1, 1), +(1, 2, NULL), +(2, 1, 3), +(3, 1, 4), +(3, 2, NULL); +create table t2 (oref int, a int); +insert into t2 values +(1, 1), +(2, 2), +(3, 3), +(4, NULL), +(2, NULL); +select a, oref, a in (select max(ie) +from t1 where oref=t2.oref group by grp) from t2; +a oref a in (select max(ie) +from t1 where oref=t2.oref group by grp) +1 1 1 +2 2 0 +3 3 NULL +NULL 4 0 +NULL 2 NULL +explain extended +select a, oref, a in (select max(ie) +from t1 where oref=t2.oref group by grp) from t2; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 5 +2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 6 Using where; Using temporary; Using filesort +Warnings: +Note 1276 Field or reference 't2.oref' of SELECT #2 was resolved in SELECT #1 +Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`oref` AS `oref`,(`test`.`t2`.`a`,(select max(`test`.`t1`.`ie`) AS `max(ie)` from `test`.`t1` where (`test`.`t1`.`oref` = `test`.`t2`.`oref`) group by `test`.`t1`.`grp` having trigcond(((`test`.`t2`.`a`) = (max(`test`.`t1`.`ie`)))))) AS `a in (select max(ie) +from t1 where oref=t2.oref group by grp)` from `test`.`t2` +explain extended +select a, oref from t2 +where a in (select max(ie) from t1 where oref=t2.oref group by grp); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 5 Using where +2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 6 Using where; Using temporary; Using filesort +Warnings: +Note 1276 Field or reference 't2.oref' of SELECT #2 was resolved in SELECT #1 +Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`oref` AS `oref` from `test`.`t2` where (`test`.`t2`.`a`,(select max(`test`.`t1`.`ie`) AS `max(ie)` from `test`.`t1` where (`test`.`t1`.`oref` = `test`.`t2`.`oref`) group by `test`.`t1`.`grp` having ((`test`.`t2`.`a`) = (max(`test`.`t1`.`ie`))))) +create table t3 (a int); +insert into t3 values (NULL), (NULL); +flush status; +select a in (select max(ie) from t1 where oref=4 group by grp) from t3; +a in (select max(ie) from t1 where oref=4 group by grp) +0 +0 +show status like 'Handler_read_rnd_next'; +Variable_name Value +Handler_read_rnd_next 11 +select ' ^ This must show 11' Z; +Z + ^ This must show 11 +explain extended select a in (select max(ie) from t1 where oref=4 group by grp) from t3; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t3 ALL NULL NULL NULL NULL 2 +2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 6 Using where; Using temporary; Using filesort +Warnings: +Note 1003 select (`test`.`t3`.`a`,(select max(`test`.`t1`.`ie`) AS `max(ie)` from `test`.`t1` where (`test`.`t1`.`oref` = 4) group by `test`.`t1`.`grp` having trigcond(((`test`.`t3`.`a`) = (max(`test`.`t1`.`ie`)))))) AS `a in (select max(ie) from t1 where oref=4 group by grp)` from `test`.`t3` +drop table t1, t2, t3; +create table t1 (a int, oref int, key(a)); +insert into t1 values +(1, 1), +(1, NULL), +(2, 3), +(2, NULL), +(3, NULL); +create table t2 (a int, oref int); +insert into t2 values (1, 1), (2,2), (NULL, 3), (NULL, 4); +select oref, a, a in (select a from t1 where oref=t2.oref) Z from t2; +oref a Z +1 1 1 +2 2 0 +3 NULL NULL +4 NULL 0 +explain extended +select oref, a, a in (select a from t1 where oref=t2.oref) Z from t2; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 4 +2 DEPENDENT SUBQUERY t1 index_subquery a a 5 func 2 Using index; Using where +Warnings: +Note 1276 Field or reference 't2.oref' of SELECT #2 was resolved in SELECT #1 +Note 1003 select `test`.`t2`.`oref` AS `oref`,`test`.`t2`.`a` AS `a`,(`test`.`t2`.`a`,(((`test`.`t2`.`a`) in t1 on a checking NULL where (`test`.`t1`.`oref` = `test`.`t2`.`oref`)))) AS `Z` from `test`.`t2` +flush status; +select oref, a from t2 where a in (select a from t1 where oref=t2.oref); +oref a +1 1 +show status like '%Handler_read_rnd_next'; +Variable_name Value +Handler_read_rnd_next 5 +delete from t2; +insert into t2 values (NULL, 0),(NULL, 0), (NULL, 0), (NULL, 0); +flush status; +select oref, a, a in (select a from t1 where oref=t2.oref) Z from t2; +oref a Z +0 NULL 0 +0 NULL 0 +0 NULL 0 +0 NULL 0 +show status like '%Handler_read%'; +Variable_name Value +Handler_read_first 0 +Handler_read_key 0 +Handler_read_next 0 +Handler_read_prev 0 +Handler_read_rnd 0 +Handler_read_rnd_next 29 +select 'No key lookups, seq reads: 29= 5 reads from t2 + 4 * 6 reads from t1.' Z; +Z +No key lookups, seq reads: 29= 5 reads from t2 + 4 * 6 reads from t1. +drop table t1, t2; +create table t1 (a int, b int, primary key (a)); +insert into t1 values (1,1), (3,1),(100,1); +create table t2 (a int, b int); +insert into t2 values (1,1),(2,1),(NULL,1),(NULL,0); +select a,b, a in (select a from t1 where t1.b = t2.b) Z from t2 ; +a b Z +1 1 1 +2 1 0 +NULL 1 NULL +NULL 0 0 +drop table t1, t2; +create table t1 (a int, b int, key(a)); +insert into t1 values +(0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9); +create table t2 like t1; +insert into t2 select * from t1; +update t2 set b=1; +create table t3 (a int, oref int); +insert into t3 values (1, 1), (NULL,1), (NULL,0); +select a, oref, +t3.a in (select t1.a from t1, t2 where t1.b=t2.a and t2.b=t3.oref) Z +from t3; +a oref Z +1 1 1 +NULL 1 NULL +NULL 0 0 +explain extended +select a, oref, +t3.a in (select t1.a from t1, t2 where t1.b=t2.a and t2.b=t3.oref) Z +from t3; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t3 ALL NULL NULL NULL NULL 3 +2 DEPENDENT SUBQUERY t1 ref_or_null a a 5 func 4 Using where +2 DEPENDENT SUBQUERY t2 ref a a 5 test.t1.b 1 Using where +Warnings: +Note 1276 Field or reference 't3.oref' of SELECT #2 was resolved in SELECT #1 +Note 1003 select `test`.`t3`.`a` AS `a`,`test`.`t3`.`oref` AS `oref`,(`test`.`t3`.`a`,(select 1 AS `Not_used` from `test`.`t1` join `test`.`t2` where ((`test`.`t2`.`a` = `test`.`t1`.`b`) and (`test`.`t2`.`b` = `test`.`t3`.`oref`) and trigcond((((`test`.`t3`.`a`) = `test`.`t1`.`a`) or isnull(`test`.`t1`.`a`)))) having trigcond((`test`.`t1`.`a`)))) AS `Z` from `test`.`t3` +drop table t1, t2, t3; diff --git a/mysql-test/t/subselect3.test b/mysql-test/t/subselect3.test new file mode 100644 index 00000000000..f7fbafdd17f --- /dev/null +++ b/mysql-test/t/subselect3.test @@ -0,0 +1,137 @@ +--disable_warnings +drop table if exists t0, t1, t2, t3, t4; +--enable_warnings + +# +# 1. Subquery with GROUP/HAVING +# +create table t1 (oref int, grp int, ie int) ; +insert into t1 (oref, grp, ie) values + (1, 1, 1), + (1, 1, 1), + (1, 2, NULL), + + (2, 1, 3), + + (3, 1, 4), + (3, 2, NULL); + +# Ok, for +# select max(ie) from t1 where oref=PARAM group by grp +# we'll have: +# 1 -> (1, NULL) matching + NULL +# 2 -> (3) non-matching +# 3 -> (3, NULL) non-matching + NULL +# 4 -> () nothing. + +create table t2 (oref int, a int); +insert into t2 values + (1, 1), + (2, 2), + (3, 3), + (4, NULL), + (2, NULL); + +# true, false, null, false, null +select a, oref, a in (select max(ie) + from t1 where oref=t2.oref group by grp) from t2; + +# This must have a trigcond +explain extended +select a, oref, a in (select max(ie) + from t1 where oref=t2.oref group by grp) from t2; + +# This must not have a trigcond: +explain extended +select a, oref from t2 +where a in (select max(ie) from t1 where oref=t2.oref group by grp); + + +# Non-correlated subquery, 2 NULL evaluations +create table t3 (a int); +insert into t3 values (NULL), (NULL); +flush status; +select a in (select max(ie) from t1 where oref=4 group by grp) from t3; +show status like 'Handler_read_rnd_next'; +select ' ^ This must show 11' Z; + +# This must show trigcond: +explain extended select a in (select max(ie) from t1 where oref=4 group by grp) from t3; + +drop table t1, t2, t3; + +# +# 2. Subquery handled with 'index_subquery': +# +create table t1 (a int, oref int, key(a)); +insert into t1 values + (1, 1), + (1, NULL), + (2, 3), + (2, NULL), + (3, NULL); + +create table t2 (a int, oref int); +insert into t2 values (1, 1), (2,2), (NULL, 3), (NULL, 4); + +select oref, a, a in (select a from t1 where oref=t2.oref) Z from t2; + +# The next explain shows "using index" but that is just incorrect display +# (there is a bug filed about this). +explain extended +select oref, a, a in (select a from t1 where oref=t2.oref) Z from t2; + +flush status; +select oref, a from t2 where a in (select a from t1 where oref=t2.oref); +# This will only show access to t2: +show status like '%Handler_read_rnd_next'; + +# Check that repeated NULL-scans are not cached (subq. is not correlated): +delete from t2; +insert into t2 values (NULL, 0),(NULL, 0), (NULL, 0), (NULL, 0); + +flush status; +select oref, a, a in (select a from t1 where oref=t2.oref) Z from t2; +show status like '%Handler_read%'; +select 'No key lookups, seq reads: 29= 5 reads from t2 + 4 * 6 reads from t1.' Z; + +drop table t1, t2; + +# +# 3. Subquery handled with 'unique_index_subquery': +# +create table t1 (a int, b int, primary key (a)); +insert into t1 values (1,1), (3,1),(100,1); + +create table t2 (a int, b int); +insert into t2 values (1,1),(2,1),(NULL,1),(NULL,0); + +select a,b, a in (select a from t1 where t1.b = t2.b) Z from t2 ; + +drop table t1, t2; + +# +# 4. Subquery that is a join, with ref access +# +create table t1 (a int, b int, key(a)); +insert into t1 values + (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9); + +create table t2 like t1; +insert into t2 select * from t1; +update t2 set b=1; + +create table t3 (a int, oref int); +insert into t3 values (1, 1), (NULL,1), (NULL,0); +select a, oref, + t3.a in (select t1.a from t1, t2 where t1.b=t2.a and t2.b=t3.oref) Z +from t3; + +# This must have trigcond in WHERE and HAVING: +explain extended +select a, oref, + t3.a in (select t1.a from t1, t2 where t1.b=t2.a and t2.b=t3.oref) Z +from t3; + +drop table t1, t2, t3; + diff --git a/sql/item.h b/sql/item.h index 0cfb0b01fd8..566daa1aaee 100644 --- a/sql/item.h +++ b/sql/item.h @@ -1959,6 +1959,16 @@ public: class Item_in_subselect; + +/* + An object of this class: + - Converts val_XXX() calls to ref->val_XXX_result() calls, like Item_ref. + - Sets owner->was_null=TRUE if it has returned a NULL value from any + val_XXX() function. This allows to inject an Item_ref_null_helper + object into subquery and then check if the subquery has produced a row + with NULL value. +*/ + class Item_ref_null_helper: public Item_ref { protected: diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 9a400d60ae6..540f67ba0ee 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -786,9 +786,41 @@ longlong Item_in_optimizer::val_int() { DBUG_ASSERT(fixed == 1); cache->store(args[0]); + if (cache->null_value) { - null_value= 1; + if (((Item_in_subselect*)args[1])->is_top_level_item()) + { + /* + We're evaluating "NULL IN (SELECT ...)". The result can be NULL or + FALSE, and we can return one instead of another. Just return NULL. + */ + null_value= 1; + } + else + { + if (!((Item_in_subselect*)args[1])->is_correlated && + result_for_null_param != UNKNOWN) + { + /* Use cached value from previous execution */ + null_value= result_for_null_param; + } + else + { + /* + We're evaluating "NULL IN (SELECT ...)". The result is: + FALSE if SELECT produces an empty set, or + NULL otherwise. + We disable the predicates we've pushed down into subselect, run the + subselect and see if it has produced any rows. + */ + ((Item_in_subselect*)args[1])->enable_pushed_conds= FALSE; + longlong tmp= args[1]->val_bool_result(); + result_for_null_param= null_value= + !((Item_in_subselect*)args[1])->engine->no_rows(); + ((Item_in_subselect*)args[1])->enable_pushed_conds= TRUE; + } + } return 0; } bool tmp= args[1]->val_bool_result(); diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index c8439cba303..acad1e51bc9 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -100,25 +100,44 @@ public: }; class Item_cache; +#define UNKNOWN ((my_bool)-1) + + +/* + Item_in_optimizer(left_expr, Item_in_subselect(...)) + + Item_in_optimizer is used to wrap an instance of Item_in_subselect. This + class does the following: + - Evaluate the left expression and store it in Item_cache_* object (to + avoid re-evaluating it many times during subquery execution) + - Shortcut the evaluation of "NULL IN (...)" to NULL in the cases where we + don't care if the result is NULL or FALSE. + + NOTE + It is not quite clear why the above listed functionality should be + placed into a separate class called 'Item_in_optimizer'. +*/ + class Item_in_optimizer: public Item_bool_func { protected: Item_cache *cache; bool save_cache; + /* + Stores the value of "NULL IN (SELECT ...)" for uncorrelated subqueries: + UNKNOWN - "NULL in (SELECT ...)" has not yet been evaluated + FALSE - result is FALSE + TRUE - result is NULL + */ + my_bool result_for_null_param; public: Item_in_optimizer(Item *a, Item_in_subselect *b): - Item_bool_func(a, my_reinterpret_cast(Item *)(b)), cache(0), save_cache(0) + Item_bool_func(a, my_reinterpret_cast(Item *)(b)), cache(0), + save_cache(0), result_for_null_param(UNKNOWN) {} bool fix_fields(THD *, Item **); bool fix_left(THD *thd, Item **ref); bool is_null(); - /* - Item_in_optimizer item is special boolean function. On value request - (one of val, val_int or val_str methods) it evaluate left expression - of IN by storing it value in cache item (one of Item_cache* items), - then it test cache is it NULL. If left expression (cache) is NULL then - Item_in_optimizer return NULL, else it evaluate Item_in_subselect. - */ longlong val_int(); void cleanup(); const char *func_name() const { return ""; } @@ -256,9 +275,11 @@ public: class Item_maxmin_subselect; /* + trigcond(arg) ::= param? arg : TRUE + The class Item_func_trig_cond is used for guarded predicates which are employed only for internal purposes. - A guarded predicates is an object consisting of an a regular or + A guarded predicate is an object consisting of an a regular or a guarded predicate P and a pointer to a boolean guard variable g. A guarded predicate P/g is evaluated to true if the value of the guard g is false, otherwise it is evaluated to the same value that @@ -276,6 +297,10 @@ class Item_maxmin_subselect; Objects of this class are built only for query execution after the execution plan has been already selected. That's why this class needs only val_int out of generic methods. + + Current uses of Item_func_trig_cond objects: + - To wrap selection conditions when executing outer joins + - To wrap condition that is pushed down into subquery */ class Item_func_trig_cond: public Item_bool_func @@ -1019,6 +1044,11 @@ public: /* Functions used by HAVING for rewriting IN subquery */ class Item_in_subselect; + +/* + This is like IS NOT NULL but it also remembers if it ever has + encountered a NULL. +*/ class Item_is_not_null_test :public Item_func_isnull { Item_in_subselect* owner; diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 7015f450aa7..489a647402e 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -37,7 +37,7 @@ inline Item * and_items(Item* cond, Item *item) Item_subselect::Item_subselect(): Item_result_field(), value_assigned(0), thd(0), substitution(0), engine(0), old_engine(0), used_tables_cache(0), have_to_be_excluded(0), - const_item_cache(1), engine_changed(0), changed(0) + const_item_cache(1), engine_changed(0), changed(0), is_correlated(FALSE) { with_subselect= 1; reset(); @@ -192,16 +192,16 @@ bool Item_subselect::fix_fields(THD *thd_param, Item **ref) return res; } -bool Item_subselect::exec() +bool Item_subselect::exec(bool full_scan) { int res; - res= engine->exec(); + res= engine->exec(full_scan); if (engine_changed) { engine_changed= 0; - return exec(); + return exec(full_scan); } return (res); } @@ -441,13 +441,13 @@ bool Item_singlerow_subselect::null_inside() void Item_singlerow_subselect::bring_value() { - exec(); + exec(FALSE); } double Item_singlerow_subselect::val_real() { DBUG_ASSERT(fixed == 1); - if (!exec() && !value->null_value) + if (!exec(FALSE) && !value->null_value) { null_value= 0; return value->val_real(); @@ -462,7 +462,7 @@ double Item_singlerow_subselect::val_real() longlong Item_singlerow_subselect::val_int() { DBUG_ASSERT(fixed == 1); - if (!exec() && !value->null_value) + if (!exec(FALSE) && !value->null_value) { null_value= 0; return value->val_int(); @@ -476,7 +476,7 @@ longlong Item_singlerow_subselect::val_int() String *Item_singlerow_subselect::val_str(String *str) { - if (!exec() && !value->null_value) + if (!exec(FALSE) && !value->null_value) { null_value= 0; return value->val_str(str); @@ -491,7 +491,7 @@ String *Item_singlerow_subselect::val_str(String *str) my_decimal *Item_singlerow_subselect::val_decimal(my_decimal *decimal_value) { - if (!exec() && !value->null_value) + if (!exec(FALSE) && !value->null_value) { null_value= 0; return value->val_decimal(decimal_value); @@ -506,7 +506,7 @@ my_decimal *Item_singlerow_subselect::val_decimal(my_decimal *decimal_value) bool Item_singlerow_subselect::val_bool() { - if (!exec() && !value->null_value) + if (!exec(FALSE) && !value->null_value) { null_value= 0; return value->val_bool(); @@ -557,7 +557,8 @@ bool Item_in_subselect::test_limit(SELECT_LEX_UNIT *unit) Item_in_subselect::Item_in_subselect(Item * left_exp, st_select_lex *select_lex): - Item_exists_subselect(), optimizer(0), transformed(0), upper_item(0) + Item_exists_subselect(), optimizer(0), transformed(0), + enable_pushed_conds(TRUE), upper_item(0) { DBUG_ENTER("Item_in_subselect::Item_in_subselect"); left_expr= left_exp; @@ -602,7 +603,7 @@ void Item_exists_subselect::fix_length_and_dec() double Item_exists_subselect::val_real() { DBUG_ASSERT(fixed == 1); - if (exec()) + if (exec(FALSE)) { reset(); return 0; @@ -613,7 +614,7 @@ double Item_exists_subselect::val_real() longlong Item_exists_subselect::val_int() { DBUG_ASSERT(fixed == 1); - if (exec()) + if (exec(FALSE)) { reset(); return 0; @@ -624,7 +625,7 @@ longlong Item_exists_subselect::val_int() String *Item_exists_subselect::val_str(String *str) { DBUG_ASSERT(fixed == 1); - if (exec()) + if (exec(FALSE)) { reset(); return 0; @@ -637,7 +638,7 @@ String *Item_exists_subselect::val_str(String *str) my_decimal *Item_exists_subselect::val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); - if (exec()) + if (exec(FALSE)) { reset(); return 0; @@ -650,7 +651,7 @@ my_decimal *Item_exists_subselect::val_decimal(my_decimal *decimal_value) bool Item_exists_subselect::val_bool() { DBUG_ASSERT(fixed == 1); - if (exec()) + if (exec(FALSE)) { reset(); return 0; @@ -668,7 +669,7 @@ double Item_in_subselect::val_real() DBUG_ASSERT(0); DBUG_ASSERT(fixed == 1); null_value= 0; - if (exec()) + if (exec(!enable_pushed_conds)) { reset(); null_value= 1; @@ -689,7 +690,7 @@ longlong Item_in_subselect::val_int() DBUG_ASSERT(0); DBUG_ASSERT(fixed == 1); null_value= 0; - if (exec()) + if (exec(!enable_pushed_conds)) { reset(); null_value= 1; @@ -710,7 +711,7 @@ String *Item_in_subselect::val_str(String *str) DBUG_ASSERT(0); DBUG_ASSERT(fixed == 1); null_value= 0; - if (exec()) + if (exec(!enable_pushed_conds)) { reset(); null_value= 1; @@ -730,7 +731,7 @@ bool Item_in_subselect::val_bool() { DBUG_ASSERT(fixed == 1); null_value= 0; - if (exec()) + if (exec(!enable_pushed_conds)) { reset(); null_value= 1; @@ -750,7 +751,7 @@ my_decimal *Item_in_subselect::val_decimal(my_decimal *decimal_value) DBUG_ASSERT(0); null_value= 0; DBUG_ASSERT(fixed == 1); - if (exec()) + if (exec(!enable_pushed_conds)) { reset(); null_value= 1; @@ -763,7 +764,51 @@ my_decimal *Item_in_subselect::val_decimal(my_decimal *decimal_value) } -/* Rewrite a single-column IN/ALL/ANY subselect. */ +/* + Rewrite a single-column IN/ALL/ANY subselect + + SYNOPSIS + Item_in_subselect::single_value_transformer() + join + func + + DESCRIPTION + Rewrite a single-column subquery using rule-based approach. The subquery + + oe $cmp$ (SELECT sel FROM ... WHERE subq_where HAVING subq_having) + + First, try to convert the subquery to scalar-result subquery in one of + the forms: + + - oe $cmp$ (SELECT MAX(...) ) // handled by Item_singlerow_subselect + - oe $cmp$ (SELECT ...) // handled by Item_maxminsubselect + + If that fails, the subquery will be handled with class Item_in_optimizer, + Inject the predicates into subquery, i.e. convert it to: + + - If the subquery has aggregates, GROUP BY, or HAVING, convert to + + SELECT sel FROM ... HAVING subq_having AND + trigcond(oe $cmp$ ref_or_null_helper) + + the addition is wrapped into trigger only when we want to distinguish + between NULL and FALSE results. + + - Else, if we don't care if subquery result is NULL or FALSE, convert to + + SELECT 1 ... WHERE (oe $CMP$ ie) AND subq_where + + - Else convert to: + + SELECT 1 WHERE ... + WHERE subq_where AND trigcond((oe $CMP$ ie) OR ie IS NULL) + HAVING subq_having AND trigcond((ie)) + + RETURN + RES_OK - Transformed successfully (or done nothing?) + RES_REDUCE - The subquery was reduced to non-subquery + RES_ERROR - Error +*/ Item_subselect::trans_res Item_in_subselect::single_value_transformer(JOIN *join, @@ -896,8 +941,12 @@ Item_in_subselect::single_value_transformer(JOIN *join, select_lex->uncacheable|= UNCACHEABLE_DEPENDENT; /* Add the left part of a subselect to a WHERE or HAVING clause of - the right part, e.g. SELECT 1 IN (SELECT a FROM t1) => - SELECT Item_in_optimizer(1, SELECT a FROM t1 WHERE a=1) + the right part, e.g. + + SELECT 1 IN (SELECT a FROM t1) => + + SELECT Item_in_optimizer(1, SELECT a FROM t1 WHERE a=1) + HAVING is used only if the right part contains a SUM function, a GROUP BY or a HAVING clause. */ @@ -912,10 +961,15 @@ Item_in_subselect::single_value_transformer(JOIN *join, ref_pointer_array, (char *)"", this->full_name())); -#ifdef CORRECT_BUT_TOO_SLOW_TO_BE_USABLE - if (!abort_on_null && left_expr->maybe_null) - item= new Item_cond_or(new Item_func_isnull(left_expr), item); -#endif + if (!abort_on_null && ((Item*)select_lex->item_list.head())->maybe_null) + { + /* + We can encounter "NULL IN (SELECT ...)". Wrap the added condition + within a trigger. + */ + item= new Item_func_trig_cond(item, &enable_pushed_conds); + } + /* AND and comparison functions can't be changed during fix_fields() we can assign select_lex->having here, and pass 0 as last @@ -944,10 +998,13 @@ Item_in_subselect::single_value_transformer(JOIN *join, select_lex->item_list.push_back(new Item_int("Not_used", (longlong) 1, 21)); select_lex->ref_pointer_array[0]= select_lex->item_list.head(); + item= func->create(expr, item); if (!abort_on_null && orig_item->maybe_null) { - having= new Item_is_not_null_test(this, having); + having= + new Item_func_trig_cond(new Item_is_not_null_test(this, having), + &enable_pushed_conds); /* Item_is_not_null_test can't be changed during fix_fields() we can assign select_lex->having here, and pass 0 as last @@ -967,12 +1024,15 @@ Item_in_subselect::single_value_transformer(JOIN *join, select_lex->having_fix_field= 0; if (tmp) DBUG_RETURN(RES_ERROR); + /* + NOTE: It is important that we add this "IS NULL" here, even when + orig_item can't be NULL. This is needed so that this predicate is + only used by ref[_or_null] analyzer (and, e.g. is not used by const + propagation). + */ item= new Item_cond_or(item, new Item_func_isnull(orig_item)); -#ifdef CORRECT_BUT_TOO_SLOW_TO_BE_USABLE - if (left_expr->maybe_null) - item= new Item_cond_or(new Item_func_isnull(left_expr), item); -#endif + item= new Item_func_trig_cond(item, &enable_pushed_conds); } item->name= (char *)in_additional_cond; /* @@ -999,13 +1059,14 @@ Item_in_subselect::single_value_transformer(JOIN *join, we can assign select_lex->having here, and pass 0 as last argument (reference) to fix_fields() */ - select_lex->having= - join->having= - func->create(expr, + Item *new_having= + func->create(expr, new Item_ref_null_helper(&select_lex->context, this, select_lex->ref_pointer_array, (char *)"", (char *)"")); + new_having= new Item_func_trig_cond(new_having, &enable_pushed_conds); + select_lex->having= join->having= new_having; select_lex->having_fix_field= 1; /* @@ -1210,6 +1271,8 @@ Item_in_subselect::row_value_transformer(JOIN *join) where_item= and_items(where_item, item); } + if (where_item) + where_item= new Item_func_trig_cond(where_item, &enable_pushed_conds); /* AND can't be changed during fix_fields() we can assign select_lex->where here, and pass 0 as last @@ -1223,6 +1286,8 @@ Item_in_subselect::row_value_transformer(JOIN *join) if (having_item) { bool res; + having_item= new Item_func_trig_cond(having_item, &enable_pushed_conds); + select_lex->having= join->having= and_items(join->having, having_item); select_lex->having->top_level_item(); /* @@ -1439,6 +1504,27 @@ bool subselect_union_engine::is_executed() const } +/* + Check if last execution of the subquery engine produced any rows + + SYNOPSIS + subselect_union_engine::no_rows() + + DESCRIPTION + Check if last execution of the subquery engine produced any rows. The + return value is undefined if last execution ended in an error. + + RETURN + TRUE - Last subselect execution has produced no rows + FALSE - Otherwise +*/ + +bool subselect_union_engine::no_rows() +{ + /* Check if we got any rows when reading UNION result from temp. table: */ + return test(!unit->fake_select_lex->join->send_records); +} + void subselect_uniquesubquery_engine::cleanup() { DBUG_ENTER("subselect_uniquesubquery_engine::cleanup"); @@ -1504,6 +1590,29 @@ int subselect_uniquesubquery_engine::prepare() return 1; } + +/* + Check if last execution of the subquery engine produced any rows + + SYNOPSIS + subselect_single_select_engine::no_rows() + + DESCRIPTION + Check if last execution of the subquery engine produced any rows. The + return value is undefined if last execution ended in an error. + + RETURN + TRUE - Last subselect execution has produced no rows + FALSE - Otherwise +*/ + +bool subselect_single_select_engine::no_rows() +{ +// return test(!join->send_records); + return !item->assigned(); +} + + static Item_result set_row(List &item_list, Item *item, Item_cache **row, bool *maybe_null) { @@ -1557,7 +1666,11 @@ void subselect_uniquesubquery_engine::fix_length_and_dec(Item_cache **row) DBUG_ASSERT(0); } -int subselect_single_select_engine::exec() +int init_read_record_seq(JOIN_TAB *tab); +int join_read_always_key_or_null(JOIN_TAB *tab); +int join_read_next_same_or_null(READ_RECORD *info); + +int subselect_single_select_engine::exec(bool full_scan) { DBUG_ENTER("subselect_single_select_engine::exec"); char const *save_where= thd->where; @@ -1595,7 +1708,43 @@ int subselect_single_select_engine::exec() if (!executed) { item->reset_value_registration(); + if (full_scan) + { + /* + We should not apply optimizations based on the condition that was + pushed down into the subquery. Those optimizations are ref[_or_null] + acceses. Change them to be full table scans. + */ + for (uint i=join->const_tables ; i < join->tables ; i++) + { + JOIN_TAB *tab=join->join_tab+i; + if (tab->keyuse && tab->keyuse->outer_ref) + { + tab->read_first_record= init_read_record_seq; + tab->read_record.record= tab->table->record[0]; + tab->read_record.thd= join->thd; + tab->read_record.ref_length= tab->table->file->ref_length; + } + } + } + join->exec(); + + if (full_scan) + { + /* Enable the optimizations back */ + for (uint i=join->const_tables ; i < join->tables ; i++) + { + JOIN_TAB *tab=join->join_tab+i; + if (tab->keyuse && tab->keyuse->outer_ref) + { + tab->read_record.record= 0; + tab->read_record.ref_length= 0; + tab->read_first_record= join_read_always_key_or_null; + tab->read_record.read_record= join_read_next_same_or_null; + } + } + } executed= 1; thd->where= save_where; thd->lex->current_select= save_select; @@ -1606,29 +1755,161 @@ int subselect_single_select_engine::exec() DBUG_RETURN(0); } -int subselect_union_engine::exec() +int subselect_union_engine::exec(bool full_scan) { char const *save_where= thd->where; + /* + Ignore the full_scan parameter: the pushed down predicates are only used + for filtering, and the caller has disabled them if necessary. + */ int res= unit->exec(); thd->where= save_where; return res; } -int subselect_uniquesubquery_engine::exec() +/* + Search for at least on row satisfying select condition + + SYNOPSIS + subselect_uniquesubquery_engine::scan_table() + + DESCRIPTION + Scan the table using sequential access until we find at least one row + satisfying select condition. + + The result of this function (info about whether a row was found) is + stored in this->empty_result_set. + + RETURN + FALSE - OK + TRUE - Error +*/ + +int subselect_uniquesubquery_engine::scan_table() +{ + int error; + TABLE *table= tab->table; + DBUG_ENTER("subselect_uniquesubquery_engine::scan_table"); + + empty_result_set= TRUE; + bool is_uncorrelated= !cond || !(cond->used_tables() & OUTER_REF_TABLE_BIT); + + if (table->file->inited) + table->file->ha_index_end(); + + table->file->ha_rnd_init(1); + table->file->extra_opt(HA_EXTRA_CACHE, + current_thd->variables.read_buff_size); + table->null_row= 0; + for (;;) + { + error=table->file->rnd_next(table->record[0]); + if (error && error != HA_ERR_END_OF_FILE) + { + error= report_error(table, error); + break; + } + /* No more rows */ + if (table->status) + break; + + if (!cond || cond->val_int()) + { + empty_result_set= FALSE; + break; + } + } + + table->file->ha_rnd_end(); + DBUG_RETURN(error != 0); +} + + +/* + Copy ref key and check for null parts in it + + SYNOPSIS + subselect_uniquesubquery_engine::copy_ref_key() + + DESCRIPTION + Copy ref key and check for null parts in it. + + RETURN + FALSE - ok, index lookup key without keys copied. + TRUE - an error occured while copying the key +*/ + +bool subselect_uniquesubquery_engine::copy_ref_key() +{ + DBUG_ENTER("subselect_uniquesubquery_engine::copy_ref_key"); + + for (store_key **copy= tab->ref.key_copy ; *copy ; copy++) + { + tab->ref.key_err= (*copy)->copy(); + + /* + When there is a NULL part in the key we don't need to make index + lookup for such key thus we don't need to copy whole key. + If we later should do a sequential scan return OK. Fail otherwise. + + See also the comment for the subselect_uniquesubquery_engine::exec() + function. + */ + null_keypart= (*copy)->null_key; + bool top_level= ((Item_in_subselect *) item)->is_top_level_item(); + if (null_keypart && !top_level) + break; + if ((tab->ref.key_err) & 1 || (null_keypart && top_level)) + { + tab->table->status= STATUS_NOT_FOUND; + DBUG_RETURN(1); + } + } + DBUG_RETURN(0); +} + + +/* + Execute subselect + + SYNOPSIS + subselect_uniquesubquery_engine::exec() + + DESCRIPTION + Find rows corresponding to the ref key using index access. + If some part of the lookup key is NULL, then we're evaluating + NULL IN (SELECT ... ) + This is a special case, we don't need to search for NULL in the table, + instead, the result value is + - NULL if select produces empty row set + - FALSE otherwise. + + In some cases (IN subselect is a top level item, i.e. abort_on_null==TRUE) + the caller doesn't distinguish between NULL and FALSE result and we just + return FALSE. + Otherwise we make a full table scan to see if there is at least one matching row. + + NOTE + + RETURN + FALSE - ok + TRUE - an error occured while scanning +*/ + +int subselect_uniquesubquery_engine::exec(bool full_scan) { DBUG_ENTER("subselect_uniquesubquery_engine::exec"); int error; TABLE *table= tab->table; - for (store_key **copy=tab->ref.key_copy ; *copy ; copy++) - { - if ((tab->ref.key_err= (*copy)->copy()) & 1) - { - table->status= STATUS_NOT_FOUND; - DBUG_RETURN(1); - } - } + + /* TODO: change to use of 'full_scan' here? */ + if (copy_ref_key()) + DBUG_RETURN(1); + if (null_keypart) + DBUG_RETURN(scan_table()); + if (!table->file->inited) table->file->ha_index_init(tab->ref.key); error= table->file->index_read(table->record[0], @@ -1657,14 +1938,35 @@ subselect_uniquesubquery_engine::~subselect_uniquesubquery_engine() } -int subselect_indexsubquery_engine::exec() +/* + Index-lookup subselect 'engine' - run the subquery + + SYNOPSIS + subselect_uniquesubquery_engine:exec() + full_scan + + DESCRIPTION + Resolve subquery using index lookup(s). + First look for specified constant, + If not found and we need to check for NULLs, do that too. + + NULL IN (SELECT ...) is a special case. + + RETURN + 0 + 1 +*/ + +int subselect_indexsubquery_engine::exec(bool full_scan) { - DBUG_ENTER("subselect_indexsubselect_engine::exec"); + DBUG_ENTER("subselect_indexsubquery_engine::exec"); int error; bool null_finding= 0; TABLE *table= tab->table; ((Item_in_subselect *) item)->value= 0; + empty_result_set= TRUE; + null_keypart= 0; if (check_null) { @@ -1673,14 +1975,12 @@ int subselect_indexsubquery_engine::exec() ((Item_in_subselect *) item)->was_null= 0; } - for (store_key **copy=tab->ref.key_copy ; *copy ; copy++) - { - if ((tab->ref.key_err= (*copy)->copy()) & 1) - { - table->status= STATUS_NOT_FOUND; - DBUG_RETURN(1); - } - } + /* Copy the ref key and check for nulls... */ + if (copy_ref_key()) + DBUG_RETURN(1); + + if (null_keypart) + DBUG_RETURN(scan_table()); if (!table->file->inited) table->file->ha_index_init(tab->ref.key); diff --git a/sql/item_subselect.h b/sql/item_subselect.h index 45df4f3880d..9410dbc465e 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -60,6 +60,9 @@ public: /* subquery is transformed */ bool changed; + /* TRUE <=> The underlying SELECT is correlated w.r.t some ancestor select */ + bool is_correlated; + enum trans_res {RES_OK, RES_REDUCE, RES_ERROR}; enum subs_type {UNKNOWN_SUBS, SINGLEROW_SUBS, EXISTS_SUBS, IN_SUBS, ALL_SUBS, ANY_SUBS}; @@ -92,7 +95,7 @@ public: return null_value; } bool fix_fields(THD *thd, Item **ref); - virtual bool exec(); + virtual bool exec(bool full_scan); virtual void fix_length_and_dec(); table_map used_tables() const; table_map not_null_tables() const { return 0; } @@ -215,7 +218,20 @@ public: friend class subselect_indexsubquery_engine; }; -/* IN subselect */ + +/* + IN subselect: this represents "left_exr IN (SELECT ...)" + + This class has: + - (as a descendant of Item_subselect) a "subquery execution engine" which + allows it to evaluate subqueries. (and this class participates in + execution by having was_null variable where part of execution result + is stored. + - Transformation methods (todo: more on this). + + This class is not used directly, it is "wrapped" into Item_in_optimizer + which provides some small bits of subquery evaluation. +*/ class Item_in_subselect :public Item_exists_subselect { @@ -231,12 +247,14 @@ protected: bool abort_on_null; bool transformed; public: + /* Used to trigger on/off conditions that were pushed down to subselect */ + bool enable_pushed_conds; Item_func_not_all *upper_item; // point on NOT/NOP before ALL/SOME subquery Item_in_subselect(Item * left_expr, st_select_lex *select_lex); Item_in_subselect() :Item_exists_subselect(), optimizer(0), abort_on_null(0), transformed(0), - upper_item(0) + enable_pushed_conds(TRUE), upper_item(0) {} subs_type substype() { return IN_SUBS; } @@ -256,6 +274,7 @@ public: my_decimal *val_decimal(my_decimal *); bool val_bool(); void top_level_item() { abort_on_null=1; } + inline bool is_top_level_item() { return abort_on_null; } bool test_limit(st_select_lex_unit *unit); void print(String *str); bool fix_fields(THD *thd, Item **ref); @@ -313,7 +332,28 @@ public: THD * get_thd() { return thd; } virtual int prepare()= 0; virtual void fix_length_and_dec(Item_cache** row)= 0; - virtual int exec()= 0; + /* + Execute the engine + + SYNOPSIS + exec() + full_scan TRUE - Pushed-down predicates are disabled, the engine + must disable made based on those predicates. + FALSE - Pushed-down predicates are in effect. + DESCRIPTION + Execute the engine. The result of execution is subquery value that is + either captured by previously set up select_result-based 'sink' or + stored somewhere by the exec() method itself. + + A required side effect: if full_scan==TRUE, subselect_engine->no_rows() + should return correct result. + + RETURN + 0 - OK + 1 - Either an execution error, or the engine was be "changed", and + caller should call exec() again for the new engine. + */ + virtual int exec(bool full_scan)= 0; virtual uint cols()= 0; /* return number of columns in select */ virtual uint8 uncacheable()= 0; /* query is uncacheable */ enum Item_result type() { return res_type; } @@ -325,6 +365,8 @@ public: virtual bool change_result(Item_subselect *si, select_subselect *result)= 0; virtual bool no_tables()= 0; virtual bool is_executed() const { return FALSE; } + /* Check if subquery produced any rows during last query execution */ + virtual bool no_rows() = 0; }; @@ -342,7 +384,7 @@ public: void cleanup(); int prepare(); void fix_length_and_dec(Item_cache** row); - int exec(); + int exec(bool full_scan); uint cols(); uint8 uncacheable(); void exclude(); @@ -351,6 +393,7 @@ public: bool change_result(Item_subselect *si, select_subselect *result); bool no_tables(); bool is_executed() const { return executed; } + bool no_rows(); }; @@ -364,7 +407,7 @@ public: void cleanup(); int prepare(); void fix_length_and_dec(Item_cache** row); - int exec(); + int exec(bool full_scan); uint cols(); uint8 uncacheable(); void exclude(); @@ -373,6 +416,7 @@ public: bool change_result(Item_subselect *si, select_subselect *result); bool no_tables(); bool is_executed() const; + bool no_rows(); }; @@ -382,6 +426,12 @@ class subselect_uniquesubquery_engine: public subselect_engine protected: st_join_table *tab; Item *cond; + /* + TRUE<=> last execution produced empty set. Valid only when left + expression is NULL. + */ + bool empty_result_set; + bool null_keypart; /* TRUE <=> constructed search tuple has a NULL */ public: // constructor can assign THD because it will be called after JOIN::prepare @@ -395,7 +445,7 @@ public: void cleanup(); int prepare(); void fix_length_and_dec(Item_cache** row); - int exec(); + int exec(bool full_scan); uint cols() { return 1; } uint8 uncacheable() { return UNCACHEABLE_DEPENDENT; } void exclude(); @@ -403,11 +453,15 @@ public: void print (String *str); bool change_result(Item_subselect *si, select_subselect *result); bool no_tables(); + int scan_table(); + bool copy_ref_key(); + bool no_rows() { return empty_result_set; } }; class subselect_indexsubquery_engine: public subselect_uniquesubquery_engine { + /* FALSE for 'ref', TRUE for 'ref-or-null'. */ bool check_null; public: @@ -418,7 +472,7 @@ public: :subselect_uniquesubquery_engine(thd, tab_arg, subs, where), check_null(chk_null) {} - int exec(); + int exec(bool full_scan); void print (String *str); }; diff --git a/sql/records.cc b/sql/records.cc index b352f9f395a..3e254fa3648 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -20,7 +20,7 @@ #include "mysql_priv.h" static int rr_quick(READ_RECORD *info); -static int rr_sequential(READ_RECORD *info); +int rr_sequential(READ_RECORD *info); static int rr_from_tempfile(READ_RECORD *info); static int rr_unpack_from_tempfile(READ_RECORD *info); static int rr_unpack_from_buffer(READ_RECORD *info); @@ -184,6 +184,7 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, } /* init_read_record */ + void end_read_record(READ_RECORD *info) { /* free cache if used */ if (info->cache) @@ -289,7 +290,7 @@ static int rr_index(READ_RECORD *info) } -static int rr_sequential(READ_RECORD *info) +int rr_sequential(READ_RECORD *info) { int tmp; while ((tmp=info->file->rnd_next(info->record))) diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index af81960f9ef..5eb6ea25b68 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -1179,7 +1179,7 @@ void st_select_lex::init_select() select_limit= 0; /* denotes the default limit = HA_POS_ERROR */ offset_limit= 0; /* denotes the default offset = 0 */ with_sum_func= 0; - + is_correlated= 0; } /* @@ -1373,6 +1373,8 @@ void st_select_lex::mark_as_dependent(SELECT_LEX *last) SELECT_LEX_UNIT *munit= s->master_unit(); munit->uncacheable|= UNCACHEABLE_DEPENDENT; } + is_correlated= TRUE; + this->master_unit()->item->is_correlated= TRUE; } bool st_select_lex_node::set_braces(bool value) { return 1; } diff --git a/sql/sql_lex.h b/sql/sql_lex.h index a3173b73d6d..027b012542e 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -470,7 +470,7 @@ public: void set_thd(THD *thd_arg) { thd= thd_arg; } friend void lex_start(THD *thd, uchar *buf, uint length); - friend int subselect_union_engine::exec(); + friend int subselect_union_engine::exec(bool); List *get_unit_column_types(); }; @@ -562,6 +562,8 @@ public: query processing end even if we use temporary table */ bool subquery_in_having; + /* TRUE <=> this SELECT is correlated w.r.t. some ancestor select */ + bool is_correlated; /* This variable is required to ensure proper work of subqueries and stored procedures. Generally, one should use the states of diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 0f0642280ce..df333e7c9ab 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -158,8 +158,8 @@ static int join_read_prev_same(READ_RECORD *info); static int join_read_prev(READ_RECORD *info); static int join_ft_read_first(JOIN_TAB *tab); static int join_ft_read_next(READ_RECORD *info); -static int join_read_always_key_or_null(JOIN_TAB *tab); -static int join_read_next_same_or_null(READ_RECORD *info); +int join_read_always_key_or_null(JOIN_TAB *tab); +int join_read_next_same_or_null(READ_RECORD *info); static COND *make_cond_for_table(COND *cond,table_map table, table_map used_table); static Item* part_of_refkey(TABLE *form,Field *field); @@ -512,11 +512,12 @@ err: DBUG_RETURN(-1); /* purecov: inspected */ } + /* test if it is known for optimisation IN subquery - SYNOPSYS - JOIN::test_in_subselect + SYNOPSIS + JOIN::test_in_subselect() where - pointer for variable in which conditions should be stored if subquery is known @@ -550,6 +551,35 @@ bool JOIN::test_in_subselect(Item **where) } +/* + Check if the passed HAVING clause is a clause added by subquery optimizer + + SYNOPSIS + is_having_subq_predicates() + having Having clause + + RETURN + TRUE The passed HAVING clause was added by the subquery optimizer + FALSE Otherwise +*/ + +bool is_having_subq_predicates(Item *having) +{ + if (having->type() == Item::FUNC_ITEM) + { + if (((Item_func *) having)->functype() == Item_func::ISNOTNULLTEST_FUNC) + return TRUE; + if (((Item_func *) having)->functype() == Item_func::TRIG_COND_FUNC) + { + having= ((Item_func*)having)->arguments()[0]; + if (((Item_func *) having)->functype() == Item_func::ISNOTNULLTEST_FUNC) + return TRUE; + } + return TRUE; + } + return FALSE; +} + /* global select optimisation. return 0 - success @@ -1016,9 +1046,7 @@ JOIN::optimize() } } else if (join_tab[0].type == JT_REF_OR_NULL && join_tab[0].ref.items[0]->name == in_left_expr_name && - having->type() == Item::FUNC_ITEM && - ((Item_func *) having)->functype() == - Item_func::ISNOTNULLTEST_FUNC) + is_having_subq_predicates(having)) { join_tab[0].type= JT_INDEX_SUBQUERY; error= 0; @@ -2512,6 +2540,9 @@ typedef struct key_field_t { // Used when finding key fields when val IS NULL. */ bool null_rejecting; + + /* TRUE<=> This ref access is an outer subquery reference access */ + bool outer_ref; } KEY_FIELD; /* Values in optimize */ @@ -2810,6 +2841,7 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, Item_func *cond, cond->functype() == Item_func::MULT_EQUAL_FUNC) && ((*value)->type() == Item::FIELD_ITEM) && ((Item_field*)*value)->field->maybe_null()); + (*key_fields)->outer_ref= FALSE; (*key_fields)++; } @@ -2868,7 +2900,7 @@ add_key_equal_fields(KEY_FIELD **key_fields, uint and_level, } static void -add_key_fields(KEY_FIELD **key_fields,uint *and_level, +add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, COND *cond, table_map usable_tables, SARGABLE_PARAM **sargables) { @@ -2881,28 +2913,54 @@ add_key_fields(KEY_FIELD **key_fields,uint *and_level, { Item *item; while ((item=li++)) - add_key_fields(key_fields,and_level,item,usable_tables,sargables); + add_key_fields(join, key_fields, and_level, item, usable_tables, + sargables); for (; org_key_fields != *key_fields ; org_key_fields++) org_key_fields->level= *and_level; } else { (*and_level)++; - add_key_fields(key_fields,and_level,li++,usable_tables,sargables); + add_key_fields(join, key_fields, and_level, li++, usable_tables, + sargables); Item *item; while ((item=li++)) { KEY_FIELD *start_key_fields= *key_fields; (*and_level)++; - add_key_fields(key_fields,and_level,item,usable_tables,sargables); + add_key_fields(join, key_fields, and_level, item, usable_tables, + sargables); *key_fields=merge_key_fields(org_key_fields,start_key_fields, *key_fields,++(*and_level)); } } return; } - /* If item is of type 'field op field/constant' add it to key_fields */ + /* + Subquery optimization: check if the encountered condition is one + added by condition push down into subquery. + */ + { + if (cond->type() == Item::FUNC_ITEM && + ((Item_func*)cond)->functype() == Item_func::TRIG_COND_FUNC) + { + cond= ((Item_func*)cond)->arguments()[0]; + if (!join->group_list && !join->order && + join->unit->item && + join->unit->item->substype() == Item_subselect::IN_SUBS && + !join->unit->first_select()->next_select()) + { + add_key_fields(join, key_fields, and_level, cond, usable_tables, + sargables); + // Indicate that this ref access candidate is for subquery lookup: + (*key_fields)[-1].outer_ref= TRUE; + } + return; + } + } + + /* If item is of type 'field op field/constant' add it to key_fields */ if (cond->type() != Item::FUNC_ITEM) return; Item_func *cond_func= (Item_func*) cond; @@ -3076,6 +3134,7 @@ add_key_part(DYNAMIC_ARRAY *keyuse_array,KEY_FIELD *key_field) keyuse.used_tables=key_field->val->used_tables(); keyuse.optimize= key_field->optimize & KEY_OPTIMIZE_REF_OR_NULL; keyuse.null_rejecting= key_field->null_rejecting; + keyuse.outer_ref= key_field->outer_ref; VOID(insert_dynamic(keyuse_array,(gptr) &keyuse)); } } @@ -3198,7 +3257,7 @@ sort_keyuse(KEYUSE *a,KEYUSE *b) Here we can add 'ref' access candidates for t1 and t2, but not for t3. */ -static void add_key_fields_for_nj(TABLE_LIST *nested_join_table, +static void add_key_fields_for_nj(JOIN *join, TABLE_LIST *nested_join_table, KEY_FIELD **end, uint *and_level, SARGABLE_PARAM **sargables) { @@ -3210,12 +3269,13 @@ static void add_key_fields_for_nj(TABLE_LIST *nested_join_table, while ((table= li++)) { if (table->nested_join) - add_key_fields_for_nj(table, end, and_level, sargables); + add_key_fields_for_nj(join, table, end, and_level, sargables); else if (!table->on_expr) tables |= table->table->map; } - add_key_fields(end, and_level, nested_join_table->on_expr, tables, sargables); + add_key_fields(join, end, and_level, nested_join_table->on_expr, tables, + sargables); } @@ -3290,7 +3350,8 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, return TRUE; if (cond) { - add_key_fields(&end,&and_level,cond,normal_tables,sargables); + add_key_fields(join_tab->join, &end, &and_level, cond, normal_tables, + sargables); for (; field != end ; field++) { add_key_part(keyuse,field); @@ -3312,8 +3373,9 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, into account as well. */ if (*join_tab[i].on_expr_ref) - add_key_fields(&end,&and_level,*join_tab[i].on_expr_ref, - join_tab[i].table->map,sargables); + add_key_fields(join_tab->join, &end, &and_level, + *join_tab[i].on_expr_ref, + join_tab[i].table->map, sargables); } /* Process ON conditions for the nested joins */ @@ -3323,7 +3385,8 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, while ((table= li++)) { if (table->nested_join) - add_key_fields_for_nj(table, &end, &and_level, sargables); + add_key_fields_for_nj(join_tab->join, table, &end, &and_level, + sargables); } } @@ -10784,6 +10847,13 @@ join_init_quick_read_record(JOIN_TAB *tab) } +int rr_sequential(READ_RECORD *info); +int init_read_record_seq(JOIN_TAB *tab) +{ + tab->read_record.read_record= rr_sequential; + return tab->read_record.file->ha_rnd_init(1); +} + static int test_if_quick_select(JOIN_TAB *tab) { @@ -10912,7 +10982,7 @@ join_ft_read_next(READ_RECORD *info) Reading of key with key reference and one part that may be NULL */ -static int +int join_read_always_key_or_null(JOIN_TAB *tab) { int res; @@ -10928,7 +10998,7 @@ join_read_always_key_or_null(JOIN_TAB *tab) } -static int +int join_read_next_same_or_null(READ_RECORD *info) { int error; diff --git a/sql/sql_select.h b/sql/sql_select.h index 30b8f834ddf..629b44538d8 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -36,6 +36,8 @@ typedef struct keyuse_t { satisfied if val has NULL 'value'. */ bool null_rejecting; + /* TRUE<=> This ref access is an outer subquery reference access */ + bool outer_ref; } KEYUSE; class store_key; @@ -455,10 +457,11 @@ class store_key :public Sql_alloc Field *to_field; // Store data here char *null_ptr; char err; - public: +public: + bool null_key; /* TRUE <=> the value of the key has a null part */ enum store_key_result { STORE_KEY_OK, STORE_KEY_FATAL, STORE_KEY_CONV }; store_key(THD *thd, Field *field_arg, char *ptr, char *null, uint length) - :null_ptr(null),err(0) + :null_ptr(null), err(0), null_key(0) { if (field_arg->type() == FIELD_TYPE_BLOB) { @@ -496,6 +499,7 @@ class store_key_field: public store_key enum store_key_result copy() { copy_field.do_copy(©_field); + null_key= to_field->is_null(); return err != 0 ? STORE_KEY_FATAL : STORE_KEY_OK; } const char *name() const { return field_name; } @@ -516,8 +520,8 @@ public: enum store_key_result copy() { int res= item->save_in_field(to_field, 1); + null_key= to_field->is_null() || item->null_value; return (err != 0 || res > 2 ? STORE_KEY_FATAL : (store_key_result) res); - } const char *name() const { return "func"; } }; @@ -547,6 +551,7 @@ public: err= res; } } + null_key= to_field->is_null() || item->null_value; return (err > 2 ? STORE_KEY_FATAL : (store_key_result) err); } const char *name() const { return "const"; } From 84526c67ea072c8d22b100a191049b5d7bffbda6 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 31 Oct 2006 21:30:40 +0300 Subject: [PATCH 14/57] BUG#8804: Better comment + TODO section with suggestion how to speedup the fix. --- sql/item_subselect.cc | 41 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 37 insertions(+), 4 deletions(-) diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 489a647402e..27cd376001e 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -1946,11 +1946,44 @@ subselect_uniquesubquery_engine::~subselect_uniquesubquery_engine() full_scan DESCRIPTION - Resolve subquery using index lookup(s). - First look for specified constant, - If not found and we need to check for NULLs, do that too. + The engine is used to resolve subqueries in form - NULL IN (SELECT ...) is a special case. + oe IN (SELECT key FROM tbl WHERE subq_where) + + The value of the predicate is calculated as follows: + 1. If oe IS NULL, this is a special case, do a full table scan on + table tbl and search for row that satisfies subq_where. If such + row is found, return NULL, otherwise return FALSE. + 2. Make an index lookup via key=oe, search for a row that satisfies + subq_where. If found, return TRUE. + 3. If check_null==TRUE, make another lookup via key=NULL, search for a + row that satisfies subq_where. If found, return NULL, otherwise + return FALSE. + + TODO + The step #1 can be optimized further when the index has several key + parts. Consider a subquery: + + (oe1, oe2) IN (SELECT keypart1, keypart2 FROM tbl WHERE subq_where) + + and suppose we need to evaluate it for {oe1, oe2}=={const1, NULL}. + Current code will do a full table scan and obtain correct result. There + is a better option: instead of evaluating + + SELECT keypart1, keypart2 FROM tbl WHERE subq_where (1) + + and checking if it has produced any matching rows, evaluate + + SELECT keypart2 FROM tbl WHERE subq_where AND keypart1=const1 (2) + + If this query produces a row, the result is NULL (as we're evaluating + "(const1, NULL) IN { (const1, X), ... }", which has a value of UNKNOWN, + i.e. NULL). If the query produces no rows, the result is FALSE. + + We currently evaluate (1) by doing a full table scan. (2) can be + evaluated by doing a "ref" scan on "keypart1=const1", which can be much + cheaper. We can use index statistics to quickly check whether "ref" scan + will be cheaper than full table scan. RETURN 0 From eb5abc14b310b63bd89495d4e2bd07ec28816944 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 1 Nov 2006 00:27:51 +0300 Subject: [PATCH 15/57] BUG#8804: Incorrect results for NULL IN (SELECT ...): review fixes: - Better comments - Remove redundant and dead code. --- sql/item_subselect.cc | 38 ++++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 27cd376001e..432a8882f5f 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -769,43 +769,47 @@ my_decimal *Item_in_subselect::val_decimal(my_decimal *decimal_value) SYNOPSIS Item_in_subselect::single_value_transformer() - join - func + join Join object of the subquery (i.e. 'child' join). + func Subquery comparison creator DESCRIPTION Rewrite a single-column subquery using rule-based approach. The subquery - oe $cmp$ (SELECT sel FROM ... WHERE subq_where HAVING subq_having) + oe $cmp$ (SELECT ie FROM ... WHERE subq_where ... HAVING subq_having) First, try to convert the subquery to scalar-result subquery in one of the forms: - oe $cmp$ (SELECT MAX(...) ) // handled by Item_singlerow_subselect - - oe $cmp$ (SELECT ...) // handled by Item_maxminsubselect + - oe $cmp$ (SELECT ...) // handled by Item_maxmin_subselect If that fails, the subquery will be handled with class Item_in_optimizer, Inject the predicates into subquery, i.e. convert it to: - If the subquery has aggregates, GROUP BY, or HAVING, convert to - SELECT sel FROM ... HAVING subq_having AND + SELECT ie FROM ... HAVING subq_having AND trigcond(oe $cmp$ ref_or_null_helper) the addition is wrapped into trigger only when we want to distinguish between NULL and FALSE results. - - Else, if we don't care if subquery result is NULL or FALSE, convert to - - SELECT 1 ... WHERE (oe $CMP$ ie) AND subq_where + - Otherwise (no aggregates/GROUP BY/HAVING) convert it to one of the + following: - - Else convert to: + = If we don't need to distinguish between NULL and FALSE subquery: + + SELECT 1 FROM ... WHERE (oe $cmp$ ie) AND subq_where - SELECT 1 WHERE ... - WHERE subq_where AND trigcond((oe $CMP$ ie) OR ie IS NULL) - HAVING subq_having AND trigcond((ie)) + = If we need to distinguish between those: + + SELECT 1 FROM ... + WHERE subq_where AND trigcond((oe $cmp$ ie) OR (ie IS NULL)) + HAVING trigcond((ie)) RETURN - RES_OK - Transformed successfully (or done nothing?) + RES_OK - OK, either subquery was transformed, or appopriate + predicates where injected into it. RES_REDUCE - The subquery was reduced to non-subquery RES_ERROR - Error */ @@ -1010,10 +1014,7 @@ Item_in_subselect::single_value_transformer(JOIN *join, we can assign select_lex->having here, and pass 0 as last argument (reference) to fix_fields() */ - select_lex->having= - join->having= (join->having ? - new Item_cond_and(having, join->having) : - having); + select_lex->having= join->having= having; select_lex->having_fix_field= 1; /* we do not check join->having->fixed, because Item_and (from @@ -1608,7 +1609,6 @@ int subselect_uniquesubquery_engine::prepare() bool subselect_single_select_engine::no_rows() { -// return test(!join->send_records); return !item->assigned(); } @@ -1791,9 +1791,7 @@ int subselect_uniquesubquery_engine::scan_table() int error; TABLE *table= tab->table; DBUG_ENTER("subselect_uniquesubquery_engine::scan_table"); - empty_result_set= TRUE; - bool is_uncorrelated= !cond || !(cond->used_tables() & OUTER_REF_TABLE_BIT); if (table->file->inited) table->file->ha_index_end(); From 2a7cf59fc92ce322671bb687e2941275dd23d1c5 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 31 Oct 2006 17:31:56 -0800 Subject: [PATCH 16/57] Fixed bug #21727. This is a performance issue for queries with subqueries evaluation of which requires filesort. Allocation of memory for the sort buffer at each evaluation of a subquery may take a significant amount of time if the buffer is rather big. With the fix we allocate the buffer at the first evaluation of the subquery and reuse it at each subsequent evaluation. mysql-test/r/subselect.result: Added a test case for bug #21727. mysql-test/t/subselect.test: Added a test case for bug #21727. sql/item_subselect.h: Fixed bug #21727. This is a performance issue for queries with subqueries evaluation of which requires filesort. Added an implementation for Item_subselect::is_uncacheable() returning TRUE if the engine if the subselect is uncacheable. sql/mysql_priv.h: Fixed bug #21727. This is a performance issue for queries with subqueries evaluation of which requires filesort. Added a new boolean parameter to the filesort_free_buffers procedure. If the value of this parameter is TRUE the procedure frees the sort_keys buffpek buffers. sql/records.cc: Fixed bug #21727. This is a performance issue for queries with subqueries evaluation of which requires filesort. Added a new boolean parameter to the filesort_free_buffers procedure. If the value of this parameter is TRUE the procedure frees the sort_keys buffpek buffers. sql/sql_base.cc: Fixed bug #21727. Made sure that st_table::pos_in_table_list would be always initialized. sql/sql_select.cc: Fixed bug #21727. This is a performance issue for queries with subqueries evaluation of which requires filesort. Added a new boolean parameter to the filesort_free_buffers procedure. If the value of this parameter is TRUE the procedure frees the sort_keys buffpek buffers. sql/sql_show.cc: Fixed bug #21727. This is a performance issue for queries with subqueries evaluation of which requires filesort. Added a new boolean parameter to the filesort_free_buffers procedure. If the value of this parameter is TRUE the procedure frees the sort_keys buffpek buffers. sql/sql_table.cc: Fixed bug #21727. This is a performance issue for queries with subqueries evaluation of which requires filesort. Cleanup. sql/table.cc: Fixed bug #21727. This is a performance issue for queries with subqueries evaluation of which requires filesort. Added st_table_list::in_subselect() returning for a table the subselect that contains the FROM list this table is taken from (if there is any). sql/table.h: Fixed bug #21727. This is a performance issue for queries with subqueries evaluation of which requires filesort. Added fields for sort_keys and buffpek buffers to the FILESORT_INFO structure. --- mysql-test/r/subselect.result | 16 ++++++++++++++ mysql-test/t/subselect.test | 37 +++++++++++++++++++++++++++++++ sql/filesort.cc | 41 +++++++++++++++++++++++++++++------ sql/item_subselect.h | 7 ++++++ sql/mysql_priv.h | 2 +- sql/records.cc | 2 +- sql/sql_base.cc | 2 ++ sql/sql_select.cc | 6 ++--- sql/sql_show.cc | 2 +- sql/sql_table.cc | 3 --- sql/table.cc | 17 +++++++++++++++ sql/table.h | 5 +++++ 12 files changed, 124 insertions(+), 16 deletions(-) diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index 57d6199675d..d05ec36a24f 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -3545,3 +3545,19 @@ FROM t1 GROUP BY t1.a LIMIT 1) 2 2 DROP TABLE t1,t2; +CREATE TABLE t1 (a int, b int auto_increment, PRIMARY KEY (b)); +CREATE TABLE t2 (x int auto_increment, y int, z int, +PRIMARY KEY (x), FOREIGN KEY (y) REFERENCES t1 (b)); +SET SESSION sort_buffer_size = 32 * 1024; +SELECT SQL_NO_CACHE COUNT(*) +FROM (SELECT a, b, (SELECT x FROM t2 WHERE y=b ORDER BY z DESC LIMIT 1) c +FROM t1) t; +COUNT(*) +3000 +SET SESSION sort_buffer_size = 8 * 1024 * 1024; +SELECT SQL_NO_CACHE COUNT(*) +FROM (SELECT a, b, (SELECT x FROM t2 WHERE y=b ORDER BY z DESC LIMIT 1) c +FROM t1) t; +COUNT(*) +3000 +DROP TABLE t1,t2; diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test index 6d5082c360b..2f3ae3347e8 100644 --- a/mysql-test/t/subselect.test +++ b/mysql-test/t/subselect.test @@ -2426,3 +2426,40 @@ SELECT ( FROM t1 t2 GROUP BY t2.a; DROP TABLE t1,t2; + +# +# Bug #21727: Correlated subquery that requires filesort: +# slow with big sort_buffer_size +# + +CREATE TABLE t1 (a int, b int auto_increment, PRIMARY KEY (b)); +CREATE TABLE t2 (x int auto_increment, y int, z int, + PRIMARY KEY (x), FOREIGN KEY (y) REFERENCES t1 (b)); + +disable_query_log; +let $1=3000; +while ($1) +{ + eval INSERT INTO t1(a) VALUES(RAND()*1000); + eval SELECT MAX(b) FROM t1 INTO @id; + let $2=10; + while ($2) + { + eval INSERT INTO t2(y,z) VALUES(@id,RAND()*1000); + dec $2; + } + dec $1; +} +enable_query_log; + +SET SESSION sort_buffer_size = 32 * 1024; +SELECT SQL_NO_CACHE COUNT(*) + FROM (SELECT a, b, (SELECT x FROM t2 WHERE y=b ORDER BY z DESC LIMIT 1) c + FROM t1) t; + +SET SESSION sort_buffer_size = 8 * 1024 * 1024; +SELECT SQL_NO_CACHE COUNT(*) + FROM (SELECT a, b, (SELECT x FROM t2 WHERE y=b ORDER BY z DESC LIMIT 1) c + FROM t1) t; + +DROP TABLE t1,t2; diff --git a/sql/filesort.cc b/sql/filesort.cc index f13354d5c72..6e74d978eda 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -109,6 +109,8 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, DBUG_PUSH(""); /* No DBUG here */ #endif FILESORT_INFO table_sort; + TABLE_LIST *tab= table->pos_in_table_list; + Item_subselect *subselect= tab ? tab->containing_subselect() : 0; /* Don't use table->sort in filesort as it is also used by QUICK_INDEX_MERGE_SELECT. Work with a copy and put it back at the end @@ -121,7 +123,6 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, my_b_clear(&tempfile); my_b_clear(&buffpek_pointers); buffpek=0; - sort_keys= (uchar **) NULL; error= 1; bzero((char*) ¶m,sizeof(param)); param.sort_length= sortlength(thd, sortorder, s_length, &multi_byte_charset); @@ -202,13 +203,15 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, ulong old_memavl; ulong keys= memavl/(param.rec_length+sizeof(char*)); param.keys=(uint) min(records+1, keys); - if ((sort_keys= (uchar **) make_char_array(param.keys, param.rec_length, - MYF(0)))) + if (table_sort.sort_keys || + (table_sort.sort_keys= (uchar **) make_char_array(param.keys, param.rec_length, + MYF(0)))) break; old_memavl=memavl; if ((memavl=memavl/4*3) < min_sort_memory && old_memavl > min_sort_memory) memavl= min_sort_memory; } + sort_keys= table_sort.sort_keys; if (memavl < min_sort_memory) { my_error(ER_OUTOFMEMORY,MYF(ME_ERROR+ME_WAITTANG), @@ -235,8 +238,12 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, } else { - if (!(buffpek=read_buffpek_from_file(&buffpek_pointers, maxbuffer))) + if (!table_sort.buffpek && table_sort.buffpek_len < maxbuffer && + !(table_sort.buffpek= + (byte *) read_buffpek_from_file(&buffpek_pointers, maxbuffer))) goto err; + buffpek= (BUFFPEK *) table_sort.buffpek; + table_sort.buffpek_len= maxbuffer; close_cached_file(&buffpek_pointers); /* Open cached file if it isn't open */ if (! my_b_inited(outfile) && @@ -269,8 +276,14 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, err: if (param.tmp_buffer) x_free(param.tmp_buffer); - x_free((gptr) sort_keys); - x_free((gptr) buffpek); + if (!subselect || !subselect->is_uncacheable()) + { + x_free((gptr) sort_keys); + table_sort.sort_keys= 0; + x_free((gptr) buffpek); + table_sort.buffpek= 0; + table_sort.buffpek_len= 0; + } close_cached_file(&tempfile); close_cached_file(&buffpek_pointers); if (my_b_inited(outfile)) @@ -301,13 +314,27 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, } /* filesort */ -void filesort_free_buffers(TABLE *table) +void filesort_free_buffers(TABLE *table, bool full) { if (table->sort.record_pointers) { my_free((gptr) table->sort.record_pointers,MYF(0)); table->sort.record_pointers=0; } + if (full) + { + if (table->sort.sort_keys ) + { + x_free((gptr) table->sort.sort_keys); + table->sort.sort_keys= 0; + } + if (table->sort.buffpek) + { + x_free((gptr) table->sort.buffpek); + table->sort.buffpek= 0; + table->sort.buffpek_len= 0; + } + } if (table->sort.addon_buf) { my_free((char *) table->sort.addon_buf, MYF(0)); diff --git a/sql/item_subselect.h b/sql/item_subselect.h index 45df4f3880d..1a8111069e6 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -114,6 +114,7 @@ public: single select and union subqueries only. */ bool is_evaluated() const; + bool is_uncacheable() const; /* Used by max/min subquery to initialize value presence registration @@ -428,3 +429,9 @@ inline bool Item_subselect::is_evaluated() const return engine->is_executed(); } +inline bool Item_subselect::is_uncacheable() const +{ + return engine->uncacheable(); +} + + diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index ea3b3a9bd83..13e44b49b53 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1465,7 +1465,7 @@ void end_read_record(READ_RECORD *info); ha_rows filesort(THD *thd, TABLE *form,struct st_sort_field *sortorder, uint s_length, SQL_SELECT *select, ha_rows max_rows, ha_rows *examined_rows); -void filesort_free_buffers(TABLE *table); +void filesort_free_buffers(TABLE *table, bool full); void change_double_for_sort(double nr,byte *to); double my_double_round(double value, int dec, bool truncate); int get_quick_record(SQL_SELECT *select); diff --git a/sql/records.cc b/sql/records.cc index b352f9f395a..4fcbc25c10f 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -193,7 +193,7 @@ void end_read_record(READ_RECORD *info) } if (info->table) { - filesort_free_buffers(info->table); + filesort_free_buffers(info->table,0); (void) info->file->extra(HA_EXTRA_NO_CACHE); if (info->read_record != rr_quick) // otherwise quick_range does it (void) info->file->ha_index_or_rnd_end(); diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 0939fb3a47e..3984ceac6a9 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -1478,6 +1478,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, table->file->ft_handler= 0; if (table->timestamp_field) table->timestamp_field_type= table->timestamp_field->get_auto_set_type(); + table->pos_in_table_list= table_list; table_list->updatable= 1; // It is not derived table nor non-updatable VIEW DBUG_ASSERT(table->key_read == 0); DBUG_RETURN(table); @@ -2762,6 +2763,7 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, if (thd->slave_thread) slave_open_temp_tables++; } + tmp_table->pos_in_table_list= 0; DBUG_RETURN(tmp_table); } diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 1dce7390ef1..d00795e0b14 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1260,14 +1260,14 @@ JOIN::reinit() exec_tmp_table1->file->extra(HA_EXTRA_RESET_STATE); exec_tmp_table1->file->delete_all_rows(); free_io_cache(exec_tmp_table1); - filesort_free_buffers(exec_tmp_table1); + filesort_free_buffers(exec_tmp_table1,0); } if (exec_tmp_table2) { exec_tmp_table2->file->extra(HA_EXTRA_RESET_STATE); exec_tmp_table2->file->delete_all_rows(); free_io_cache(exec_tmp_table2); - filesort_free_buffers(exec_tmp_table2); + filesort_free_buffers(exec_tmp_table2,0); } if (items0) set_items_ref_array(items0); @@ -6066,7 +6066,7 @@ void JOIN::cleanup(bool full) if (tables > const_tables) // Test for not-const tables { free_io_cache(table[const_tables]); - filesort_free_buffers(table[const_tables]); + filesort_free_buffers(table[const_tables],full); } if (full) diff --git a/sql/sql_show.cc b/sql/sql_show.cc index eb78f4fbdae..6367be4a1d4 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -3977,7 +3977,7 @@ bool get_schema_tables_result(JOIN *join) table_list->table->file->extra(HA_EXTRA_RESET_STATE); table_list->table->file->delete_all_rows(); free_io_cache(table_list->table); - filesort_free_buffers(table_list->table); + filesort_free_buffers(table_list->table,1); table_list->table->null_row= 0; } else diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 8864cf3c8bc..2803bfb9917 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -2266,7 +2266,6 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, goto send_result; } - table->table->pos_in_table_list= table; if ((table->table->db_stat & HA_READ_ONLY) && open_for_modify) { char buff[FN_REFLEN + MYSQL_ERRMSG_SIZE]; @@ -4256,8 +4255,6 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) } else { - t->pos_in_table_list= table; - if (t->file->table_flags() & HA_HAS_CHECKSUM && !(check_opt->flags & T_EXTEND)) protocol->store((ulonglong)t->file->checksum()); diff --git a/sql/table.cc b/sql/table.cc index 4dd3494f834..851b747dc83 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -3018,6 +3018,23 @@ void st_table_list::reinit_before_use(THD *thd) embedding->nested_join->join_list.head() == embedded); } +/* + Return subselect that contains the FROM list this table is taken from + + SYNOPSIS + st_table_list::containing_subselect() + + RETURN + Subselect item for the subquery that contains the FROM list + this table is taken from if there is any + 0 - otherwise + +*/ + +Item_subselect *st_table_list::containing_subselect() +{ + return (select_lex ? select_lex->master_unit()->item : 0); +} /***************************************************************************** ** Instansiate templates diff --git a/sql/table.h b/sql/table.h index 5136ac2c4db..f0190353328 100644 --- a/sql/table.h +++ b/sql/table.h @@ -18,6 +18,7 @@ /* Structs that defines the TABLE */ class Item; /* Needed by ORDER */ +class Item_subselect; class GRANT_TABLE; class st_select_lex_unit; class st_select_lex; @@ -68,6 +69,9 @@ enum frm_type_enum typedef struct st_filesort_info { IO_CACHE *io_cache; /* If sorted through filebyte */ + uchar **sort_keys; /* Buffer for sorting keys */ + byte *buffpek; /* Buffer for buffpek structures */ + uint buffpek_len; /* Max number of buffpeks in the buffer */ byte *addon_buf; /* Pointer to a buffer if sorted with fields */ uint addon_length; /* Length of the buffer */ struct st_sort_addon_field *addon_field; /* Pointer to the fields info */ @@ -678,6 +682,7 @@ typedef struct st_table_list procedure. */ void reinit_before_use(THD *thd); + Item_subselect *containing_subselect(); private: bool prep_check_option(THD *thd, uint8 check_opt_type); From d922d5b6be1d45069bd87614a2688c49ef25ad8b Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 1 Nov 2006 14:22:11 +0400 Subject: [PATCH 17/57] test result fixed --- mysql-test/r/type_newdecimal.result | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/r/type_newdecimal.result b/mysql-test/r/type_newdecimal.result index f24014da285..23e6f8266e9 100644 --- a/mysql-test/r/type_newdecimal.result +++ b/mysql-test/r/type_newdecimal.result @@ -1418,5 +1418,5 @@ insert into t1 values (CAST('10:11:12' AS time)); select * from t1; f1 20101112000000.000014 -20101112.000000 +101112.000000 drop table t1; From c095f98ff7d18e2e5de1adf629147f2b199fbfaf Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 3 Nov 2006 18:48:16 +0200 Subject: [PATCH 18/57] Bug #22457: Column alias in ORDER BY works, but not if in an expression The parser is allocating Item_field for references by name in ORDER BY expressions. Such expressions however may point not only to Item_field in the select list (or to a table column) but also to an arbitrary Item. This causes Item_field::fix_fields to throw an error about missing column. The fix substitutes Item_field for the reference with an Item_ref when not pointing to Item_field. mysql-test/r/order_by.result: Bug #22457: Column alias in ORDER BY works, but not if in an expression - test case mysql-test/t/order_by.test: Bug #22457: Column alias in ORDER BY works, but not if in an expression - test case sql/item.cc: Bug #22457: Column alias in ORDER BY works, but not if in an expression - transform the Item_field made by the parser into Item_ref if it doesn't point to Item_field and it is in allowed context --- mysql-test/r/order_by.result | 27 +++++++++++++++++++++++++++ mysql-test/t/order_by.test | 16 ++++++++++++++++ sql/item.cc | 33 ++++++++++++++++++++++++++++++--- 3 files changed, 73 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/order_by.result b/mysql-test/r/order_by.result index 8126e223f55..320bb89b62e 100644 --- a/mysql-test/r/order_by.result +++ b/mysql-test/r/order_by.result @@ -820,3 +820,30 @@ b a 20 1 10 2 DROP TABLE t1; +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1),(2); +SELECT a + 1 AS num FROM t1 ORDER BY 30 - num; +num +3 +2 +SELECT CONCAT('test', a) AS str FROM t1 ORDER BY UPPER(str); +str +test1 +test2 +SELECT a + 1 AS num FROM t1 GROUP BY 30 - num; +num +3 +2 +SELECT a + 1 AS num FROM t1 HAVING 30 - num; +num +2 +3 +SELECT a + 1 AS num, num + 1 FROM t1; +ERROR 42S22: Unknown column 'num' in 'field list' +SELECT a + 1 AS num, (select num + 2 FROM t1 LIMIT 1) FROM t1; +num (select num + 2 FROM t1 LIMIT 1) +2 4 +3 5 +SELECT a.a + 1 AS num FROM t1 a JOIN t1 b ON num = b.a; +ERROR 42S22: Unknown column 'num' in 'on clause' +DROP TABLE t1; diff --git a/mysql-test/t/order_by.test b/mysql-test/t/order_by.test index 1664afc70f9..a8024be7032 100644 --- a/mysql-test/t/order_by.test +++ b/mysql-test/t/order_by.test @@ -559,4 +559,20 @@ INSERT INTO t1 VALUES (1,30), (2,20), (1,10), (2,30), (1,20), (2,10); DROP TABLE t1; +# +# Bug #22457: Column alias in ORDER BY works, but not if in an expression +# + +CREATE TABLE t1 (a INT); INSERT INTO t1 VALUES (1),(2); +SELECT a + 1 AS num FROM t1 ORDER BY 30 - num; +SELECT CONCAT('test', a) AS str FROM t1 ORDER BY UPPER(str); +SELECT a + 1 AS num FROM t1 GROUP BY 30 - num; +SELECT a + 1 AS num FROM t1 HAVING 30 - num; +--error 1054 +SELECT a + 1 AS num, num + 1 FROM t1; +SELECT a + 1 AS num, (select num + 2 FROM t1 LIMIT 1) FROM t1; +--error 1054 +SELECT a.a + 1 AS num FROM t1 a JOIN t1 b ON num = b.a; +DROP TABLE t1; + # End of 4.1 tests diff --git a/sql/item.cc b/sql/item.cc index 94f0a24fcc3..45d7856b2c1 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -1761,10 +1761,37 @@ bool Item_field::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) Item** res= find_item_in_list(this, thd->lex->current_select->item_list, &counter, REPORT_EXCEPT_NOT_FOUND, ¬_used); - if (res != (Item **)not_found_item && (*res)->type() == Item::FIELD_ITEM) + if (res != (Item **)not_found_item) { - set_field((*((Item_field**)res))->field); - return 0; + if ((*res)->type() == Item::FIELD_ITEM) + { + /* + It's an Item_field referencing another Item_field in the select + list. + use the field from the Item_field in the select list and leave + the Item_field instance in place. + */ + set_field((*((Item_field**)res))->field); + return 0; + } + else + { + /* + It's not an Item_field in the select list so we must make a new + Item_ref to point to the Item in the select list and replace the + Item_field created by the parser with the new Item_ref. + */ + Item_ref *rf= new Item_ref(db_name,table_name,field_name); + if (!rf) + return 1; + thd->change_item_tree(ref, rf); + /* + Because Item_ref never substitutes itself with other items + in Item_ref::fix_fields(), we can safely use the original + pointer to it even after fix_fields() + */ + return rf->fix_fields(thd, tables, ref) || rf->check_cols(1); + } } } From 7b126a91c00d7c2bca321da0de13f47220da1972 Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 5 Nov 2006 22:42:23 +0400 Subject: [PATCH 19/57] merging mysql-test/r/gis-rtree.result: test result fixed --- mysql-test/r/gis-rtree.result | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/mysql-test/r/gis-rtree.result b/mysql-test/r/gis-rtree.result index 61a96afd8dc..95211ad9133 100644 --- a/mysql-test/r/gis-rtree.result +++ b/mysql-test/r/gis-rtree.result @@ -862,13 +862,6 @@ CHECK TABLE t1 EXTENDED; Table Op Msg_type Msg_text test.t1 check status OK DROP TABLE t1; -CREATE TABLE t1(foo GEOMETRY NOT NULL, SPATIAL INDEX(foo) ); -INSERT INTO t1(foo) VALUES (NULL); -ERROR 23000: Column 'foo' cannot be null -INSERT INTO t1() VALUES (); -ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field -INSERT INTO t1(foo) VALUES (''); -ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field CREATE TABLE t1 (foo GEOMETRY NOT NULL, SPATIAL INDEX(foo) ); INSERT INTO t1 (foo) VALUES (PointFromWKB(POINT(1,1))); INSERT INTO t1 (foo) VALUES (PointFromWKB(POINT(1,0))); @@ -880,3 +873,11 @@ SELECT 1 FROM t1 WHERE foo != PointFromWKB(POINT(0,0)); 1 1 DROP TABLE t1; +CREATE TABLE t1(foo GEOMETRY NOT NULL, SPATIAL INDEX(foo) ); +INSERT INTO t1(foo) VALUES (NULL); +ERROR 23000: Column 'foo' cannot be null +INSERT INTO t1() VALUES (); +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +INSERT INTO t1(foo) VALUES (''); +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +DROP TABLE t1; From 9f75d43ec864bfb84faffa9c1c943424f41f06db Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 6 Nov 2006 19:12:19 +0400 Subject: [PATCH 20/57] merging mysql-test/r/gis-rtree.result: merging fix --- mysql-test/r/gis-rtree.result | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/mysql-test/r/gis-rtree.result b/mysql-test/r/gis-rtree.result index 64b21ca988d..e8134a50496 100644 --- a/mysql-test/r/gis-rtree.result +++ b/mysql-test/r/gis-rtree.result @@ -862,13 +862,6 @@ CHECK TABLE t1 EXTENDED; Table Op Msg_type Msg_text test.t1 check status OK DROP TABLE t1; -CREATE TABLE t1(foo GEOMETRY NOT NULL, SPATIAL INDEX(foo) ); -INSERT INTO t1(foo) VALUES (NULL); -ERROR 23000: Column 'foo' cannot be null -INSERT INTO t1() VALUES (); -ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field -INSERT INTO t1(foo) VALUES (''); -ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field CREATE TABLE t1 (foo GEOMETRY NOT NULL, SPATIAL INDEX(foo) ); INSERT INTO t1 (foo) VALUES (PointFromWKB(POINT(1,1))); INSERT INTO t1 (foo) VALUES (PointFromWKB(POINT(1,0))); @@ -880,3 +873,11 @@ SELECT 1 FROM t1 WHERE foo != PointFromWKB(POINT(0,0)); 1 1 DROP TABLE t1; +CREATE TABLE t1(foo GEOMETRY NOT NULL, SPATIAL INDEX(foo) ); +INSERT INTO t1(foo) VALUES (NULL); +ERROR 23000: Column 'foo' cannot be null +INSERT INTO t1() VALUES (); +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +INSERT INTO t1(foo) VALUES (''); +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +DROP TABLE t1; From 030d080d8b4eb8154aa4da13b28c5b81cba09b03 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 6 Nov 2006 22:33:18 +0400 Subject: [PATCH 21/57] bug #19491 (5.0-related additional fixes) include/my_time.h: we need to use it outside the my_time.cc mysql-test/r/gis-rtree.result: result fixed sql-common/my_time.c: 'static' removed sql/field.cc: checks for invalid datetimes added --- include/my_time.h | 2 ++ mysql-test/r/gis-rtree.result | 15 ++++++++------- sql-common/my_time.c | 4 ++-- sql/field.cc | 28 ++++++++++++++++++++++++++++ 4 files changed, 40 insertions(+), 9 deletions(-) diff --git a/include/my_time.h b/include/my_time.h index e52ef69475d..17cc10a0221 100644 --- a/include/my_time.h +++ b/include/my_time.h @@ -49,6 +49,8 @@ typedef long my_time_t; #define TIME_NO_ZERO_DATE (TIME_NO_ZERO_IN_DATE*2) #define TIME_INVALID_DATES (TIME_NO_ZERO_DATE*2) +my_bool check_date(const MYSQL_TIME *ltime, my_bool not_zero_date, + ulong flags, int *was_cut); enum enum_mysql_timestamp_type str_to_datetime(const char *str, uint length, MYSQL_TIME *l_time, uint flags, int *was_cut); diff --git a/mysql-test/r/gis-rtree.result b/mysql-test/r/gis-rtree.result index 64b21ca988d..e8134a50496 100644 --- a/mysql-test/r/gis-rtree.result +++ b/mysql-test/r/gis-rtree.result @@ -862,13 +862,6 @@ CHECK TABLE t1 EXTENDED; Table Op Msg_type Msg_text test.t1 check status OK DROP TABLE t1; -CREATE TABLE t1(foo GEOMETRY NOT NULL, SPATIAL INDEX(foo) ); -INSERT INTO t1(foo) VALUES (NULL); -ERROR 23000: Column 'foo' cannot be null -INSERT INTO t1() VALUES (); -ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field -INSERT INTO t1(foo) VALUES (''); -ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field CREATE TABLE t1 (foo GEOMETRY NOT NULL, SPATIAL INDEX(foo) ); INSERT INTO t1 (foo) VALUES (PointFromWKB(POINT(1,1))); INSERT INTO t1 (foo) VALUES (PointFromWKB(POINT(1,0))); @@ -880,3 +873,11 @@ SELECT 1 FROM t1 WHERE foo != PointFromWKB(POINT(0,0)); 1 1 DROP TABLE t1; +CREATE TABLE t1(foo GEOMETRY NOT NULL, SPATIAL INDEX(foo) ); +INSERT INTO t1(foo) VALUES (NULL); +ERROR 23000: Column 'foo' cannot be null +INSERT INTO t1() VALUES (); +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +INSERT INTO t1(foo) VALUES (''); +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +DROP TABLE t1; diff --git a/sql-common/my_time.c b/sql-common/my_time.c index 93bf23ed284..eea36adcaf3 100644 --- a/sql-common/my_time.c +++ b/sql-common/my_time.c @@ -76,8 +76,8 @@ uint calc_days_in_year(uint year) 1 error */ -static my_bool check_date(const MYSQL_TIME *ltime, my_bool not_zero_date, - ulong flags, int *was_cut) +my_bool check_date(const MYSQL_TIME *ltime, my_bool not_zero_date, + ulong flags, int *was_cut) { if (not_zero_date) { diff --git a/sql/field.cc b/sql/field.cc index b05398afe75..a09c97fb356 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -5407,7 +5407,21 @@ int Field_newdate::store_time(TIME *ltime,timestamp_type type) long tmp; int error= 0; if (type == MYSQL_TIMESTAMP_DATE || type == MYSQL_TIMESTAMP_DATETIME) + { tmp=ltime->year*16*32+ltime->month*32+ltime->day; + if ((my_bool)check_date(ltime, tmp, + (TIME_FUZZY_DATE | + (current_thd->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | + MODE_INVALID_DATES))), &error)) + { + char buff[12]; + String str(buff, sizeof(buff), &my_charset_latin1); + make_date((DATE_TIME_FORMAT *) 0, ltime, &str); + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, + str.ptr(), str.length(), MYSQL_TIMESTAMP_DATE, 1); + } + } else { tmp=0; @@ -5616,8 +5630,22 @@ int Field_datetime::store_time(TIME *ltime,timestamp_type type) structure always fit into DATETIME range. */ if (type == MYSQL_TIMESTAMP_DATE || type == MYSQL_TIMESTAMP_DATETIME) + { tmp=((ltime->year*10000L+ltime->month*100+ltime->day)*LL(1000000)+ (ltime->hour*10000L+ltime->minute*100+ltime->second)); + if ((my_bool)check_date(ltime, tmp, + (TIME_FUZZY_DATE | + (current_thd->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | + MODE_INVALID_DATES))), &error)) + { + char buff[12]; + String str(buff, sizeof(buff), &my_charset_latin1); + make_datetime((DATE_TIME_FORMAT *) 0, ltime, &str); + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, + str.ptr(), str.length(), MYSQL_TIMESTAMP_DATETIME,1); + } + } else { tmp=0; From c0487fb97057727c9587cc05150df64f118c2d31 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 7 Nov 2006 14:39:20 +0100 Subject: [PATCH 22/57] item.cc: merge fixes sql/item.cc: merge fixes --- sql/item.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sql/item.cc b/sql/item.cc index 6e243c3d603..dc92edd651d 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -3727,16 +3727,16 @@ bool Item_field::fix_fields(THD *thd, Item **reference) Item_ref to point to the Item in the select list and replace the Item_field created by the parser with the new Item_ref. */ - Item_ref *rf= new Item_ref(db_name,table_name,field_name); + Item_ref *rf= new Item_ref(context, db_name,table_name,field_name); if (!rf) return 1; - thd->change_item_tree(ref, rf); + thd->change_item_tree(reference, rf); /* Because Item_ref never substitutes itself with other items in Item_ref::fix_fields(), we can safely use the original pointer to it even after fix_fields() */ - return rf->fix_fields(thd, tables, ref) || rf->check_cols(1); + return rf->fix_fields(thd, reference) || rf->check_cols(1); } } } From 5af4fd256321748a5ac7c8d4407f8ce977345e04 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 7 Nov 2006 18:16:17 +0200 Subject: [PATCH 23/57] Bug #11032: getObject() returns a String for a sub-query of type datetime - When returning metadata for scalar subqueries the actual type of the column was calculated based on the value type, which limits the actual type of a scalar subselect to the set of (currently) 3 basic types : integer, double precision or string. This is the reason that columns of types other then the basic ones (e.g. date/time) are reported as being of the corresponding basic type. Fixed by storing/returning information for the column type in addition to the result type. mysql-test/r/subselect.result: Bug #11032: getObject() returns a String for a sub-query of type datetime - test case mysql-test/t/subselect.test: Bug #11032: getObject() returns a String for a sub-query of type datetime - test case sql/item_subselect.cc: Bug #11032: getObject() returns a String for a sub-query of type datetime - store and return the field type as well in addition to result type for single row subqueries sql/item_subselect.h: Bug #11032: getObject() returns a String for a sub-query of type datetime - store and return the field type as well in addition to result type for single row subqueries --- mysql-test/r/subselect.result | 17 +++++++++++++++++ mysql-test/t/subselect.test | 16 ++++++++++++++++ sql/item_subselect.cc | 34 ++++++++++++++++++++++++---------- sql/item_subselect.h | 7 +++++++ 4 files changed, 64 insertions(+), 10 deletions(-) diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index 28fbfc86657..a3d1bafcb0d 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -2997,3 +2997,20 @@ a a IN (SELECT a FROM t1) 2 NULL 3 1 DROP TABLE t1,t2; +CREATE TABLE t1 (a DATETIME); +INSERT INTO t1 VALUES ('1998-09-23'), ('2003-03-25'); +CREATE TABLE t2 AS SELECT +(SELECT a FROM t1 WHERE a < '2000-01-01') AS sub_a +FROM t1 WHERE a > '2000-01-01'; +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `sub_a` datetime default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +CREATE TABLE t3 AS (SELECT a FROM t1 WHERE a < '2000-01-01') UNION (SELECT a FROM t1 WHERE a > '2000-01-01'); +SHOW CREATE TABLE t3; +Table Create Table +t3 CREATE TABLE `t3` ( + `a` datetime default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1,t2,t3; diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test index ac035c72d18..11b7fcc4d8f 100644 --- a/mysql-test/t/subselect.test +++ b/mysql-test/t/subselect.test @@ -1965,4 +1965,20 @@ SELECT a, a IN (SELECT a FROM t1) FROM t2; DROP TABLE t1,t2; +# +# Bug #11302: getObject() returns a String for a sub-query of type datetime +# +CREATE TABLE t1 (a DATETIME); +INSERT INTO t1 VALUES ('1998-09-23'), ('2003-03-25'); + +CREATE TABLE t2 AS SELECT + (SELECT a FROM t1 WHERE a < '2000-01-01') AS sub_a + FROM t1 WHERE a > '2000-01-01'; +SHOW CREATE TABLE t2; + +CREATE TABLE t3 AS (SELECT a FROM t1 WHERE a < '2000-01-01') UNION (SELECT a FROM t1 WHERE a > '2000-01-01'); +SHOW CREATE TABLE t3; + +DROP TABLE t1,t2,t3; + # End of 4.1 tests diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 1ab81d1862d..cd1f8f83821 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -391,6 +391,15 @@ enum Item_result Item_singlerow_subselect::result_type() const return engine->type(); } +/* + Don't rely on the result type to calculate field type. + Ask the engine instead. +*/ +enum_field_types Item_singlerow_subselect::field_type() const +{ + return engine->field_type(); +} + void Item_singlerow_subselect::fix_length_and_dec() { if ((max_columns= engine->cols()) == 1) @@ -1357,31 +1366,35 @@ int subselect_uniquesubquery_engine::prepare() return 1; } -static Item_result set_row(List &item_list, Item *item, - Item_cache **row, bool *maybe_null) +/* + makes storage for the output values for the subquery and calcuates + their data and column types and their nullability. +*/ +void subselect_engine::set_row(List &item_list, Item_cache **row) { - Item_result res_type= STRING_RESULT; Item *sel_item; List_iterator_fast li(item_list); + res_type= STRING_RESULT; + res_field_type= FIELD_TYPE_VAR_STRING; for (uint i= 0; (sel_item= li++); i++) { item->max_length= sel_item->max_length; res_type= sel_item->result_type(); + res_field_type= sel_item->field_type(); item->decimals= sel_item->decimals; - *maybe_null= sel_item->maybe_null; + maybe_null= sel_item->maybe_null; if (!(row[i]= Item_cache::get_cache(res_type))) - return STRING_RESULT; // we should return something + return; row[i]->setup(sel_item); } if (item_list.elements > 1) res_type= ROW_RESULT; - return res_type; } void subselect_single_select_engine::fix_length_and_dec(Item_cache **row) { DBUG_ASSERT(row || select_lex->item_list.elements==1); - res_type= set_row(select_lex->item_list, item, row, &maybe_null); + set_row(select_lex->item_list, row); item->collation.set(row[0]->collation); if (cols() != 1) maybe_null= 0; @@ -1393,13 +1406,14 @@ void subselect_union_engine::fix_length_and_dec(Item_cache **row) if (unit->first_select()->item_list.elements == 1) { - res_type= set_row(unit->types, item, row, &maybe_null); + set_row(unit->types, row); item->collation.set(row[0]->collation); } else { - bool fake= 0; - res_type= set_row(unit->types, item, row, &fake); + bool maybe_null_saved= maybe_null; + set_row(unit->types, row); + maybe_null= maybe_null_saved; } } diff --git a/sql/item_subselect.h b/sql/item_subselect.h index 93171ad64a1..7b064bfe92c 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -142,6 +142,7 @@ public: longlong val_int (); String *val_str (String *); enum Item_result result_type() const; + enum_field_types field_type() const; void fix_length_and_dec(); uint cols(); @@ -273,6 +274,7 @@ protected: THD *thd; /* pointer to current THD */ Item_subselect *item; /* item, that use this engine */ enum Item_result res_type; /* type of results */ + enum_field_types res_field_type; /* column type of the results */ bool maybe_null; /* may be null (first item in select) */ public: @@ -282,6 +284,7 @@ public: result= res; item= si; res_type= STRING_RESULT; + res_field_type= FIELD_TYPE_VAR_STRING; maybe_null= 0; } virtual ~subselect_engine() {}; // to satisfy compiler @@ -296,6 +299,7 @@ public: virtual uint cols()= 0; /* return number of columnss in select */ virtual uint8 uncacheable()= 0; /* query is uncacheable */ enum Item_result type() { return res_type; } + enum_field_types field_type() { return res_field_type; } virtual void exclude()= 0; bool may_be_null() { return maybe_null; }; virtual table_map upper_select_const_tables()= 0; @@ -303,6 +307,9 @@ public: virtual void print(String *str)= 0; virtual int change_item(Item_subselect *si, select_subselect *result)= 0; virtual bool no_tables()= 0; + +protected: + void set_row(List &item_list, Item_cache **row); }; From 955e0b1304e4de75a5b6f14d20b9705255e90b56 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 7 Nov 2006 21:02:41 +0400 Subject: [PATCH 24/57] bug fixed sql/field.cc: datatime length is 19, not 12 --- sql/field.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/field.cc b/sql/field.cc index a09c97fb356..61bbea5e615 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -5639,7 +5639,7 @@ int Field_datetime::store_time(TIME *ltime,timestamp_type type) (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | MODE_INVALID_DATES))), &error)) { - char buff[12]; + char buff[19]; String str(buff, sizeof(buff), &my_charset_latin1); make_datetime((DATE_TIME_FORMAT *) 0, ltime, &str); set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, From 1d2e456f59a81748091a4cd0657940c085dc4af4 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 8 Nov 2006 02:26:50 +0300 Subject: [PATCH 25/57] BUG#24056: Crash in subquery: Don't assume that condition that was pushed down into subquery has produced exactly one KEY_FIELD element - it could produce several or none at all, handle all of those cases. --- sql/sql_select.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sql/sql_select.cc b/sql/sql_select.cc index f92217302f8..cfc068cec86 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -2951,10 +2951,12 @@ add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, join->unit->item->substype() == Item_subselect::IN_SUBS && !join->unit->first_select()->next_select()) { + KEY_FIELD *save= *key_fields; add_key_fields(join, key_fields, and_level, cond, usable_tables, sargables); // Indicate that this ref access candidate is for subquery lookup: - (*key_fields)[-1].outer_ref= TRUE; + for (; save != *key_fields; save++) + save->outer_ref= TRUE; } return; } From 93b36e8e0524a931809927ab48723bf148a29e1c Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 8 Nov 2006 15:15:56 +0200 Subject: [PATCH 26/57] Make a new test target for autopush.pl to run memory based tests --- Makefile.am | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/Makefile.am b/Makefile.am index 2aefbd05283..48f84269313 100644 --- a/Makefile.am +++ b/Makefile.am @@ -124,3 +124,10 @@ test-force-pl: cd mysql-test; \ ./mysql-test-run.pl --force && \ ./mysql-test-run.pl --ps-protocol --force + +#used by autopush.pl to run memory based tests +test-force-mem: + cd mysql-test; \ + ./mysql-test-run.pl --force --mem && \ + ./mysql-test-run.pl --ps-protocol --force --mem + From 1df205946285c03f05bc73ef6af91ab86d1a939c Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 8 Nov 2006 20:41:47 +0400 Subject: [PATCH 27/57] merging sql/field.cc: here we can get negative values --- sql/field.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/field.cc b/sql/field.cc index 0af690608cc..802f3725f25 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -4926,7 +4926,7 @@ int Field_time::store_time(TIME *ltime, timestamp_type type) (ltime->minute * 100 + ltime->second); if (ltime->neg) tmp= -tmp; - return Field_time::store((longlong) tmp, TRUE); + return Field_time::store((longlong) tmp, FALSE); } From 0119e00f0dd6639c3f5a80aa67e40b0a0d670e99 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 9 Nov 2006 11:33:43 +0400 Subject: [PATCH 28/57] merging mysql-test/t/disabled.def: #22457 isn't fixed in 5.1 after merging from 5.0 --- mysql-test/t/disabled.def | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index 8c4a76c78a9..44eec94f2db 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -44,3 +44,4 @@ rpl_multi_engine : BUG#22583 2006-09-23 lars #rpl_truncate_7ndb : BUG#21298 2006-07-27 msvensson ndb_binlog_discover : bug#21806 2006-08-24 ndb_autodiscover3 : bug#21806 +order_by : GKodinov - please fix bug #22457 in 5.1 also From 4b60bdc9c7e4cff239ec8e1839a497ff64c8cf09 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 9 Nov 2006 12:41:15 +0400 Subject: [PATCH 29/57] merging mysql-test/r/subselect.result: result fixed --- mysql-test/r/subselect.result | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index 0b72814797c..51d8227b9f3 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -3025,13 +3025,13 @@ FROM t1 WHERE a > '2000-01-01'; SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( - `sub_a` datetime default NULL + `sub_a` datetime DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 CREATE TABLE t3 AS (SELECT a FROM t1 WHERE a < '2000-01-01') UNION (SELECT a FROM t1 WHERE a > '2000-01-01'); SHOW CREATE TABLE t3; Table Create Table t3 CREATE TABLE `t3` ( - `a` datetime default NULL + `a` datetime DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1,t2,t3; create table t1 (df decimal(5,1)); From bffe8b8a3cab1c8a617428fae922d2b15bdb437b Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 9 Nov 2006 12:49:39 +0400 Subject: [PATCH 30/57] merging mysql-test/r/subselect3.result: 'filtered' column added mysql-test/r/udf.result: 'filtered' column added --- mysql-test/r/subselect3.result | 32 ++++++++++++++++---------------- mysql-test/r/udf.result | 24 ++++++++++++------------ 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/mysql-test/r/subselect3.result b/mysql-test/r/subselect3.result index 5ab8e448b39..10cc2729f3e 100644 --- a/mysql-test/r/subselect3.result +++ b/mysql-test/r/subselect3.result @@ -26,9 +26,9 @@ NULL 2 NULL explain extended select a, oref, a in (select max(ie) from t1 where oref=t2.oref group by grp) from t2; -id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t2 ALL NULL NULL NULL NULL 5 -2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 6 Using where; Using temporary; Using filesort +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 5 100.00 +2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 6 100.00 Using where; Using temporary; Using filesort Warnings: Note 1276 Field or reference 't2.oref' of SELECT #2 was resolved in SELECT #1 Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`oref` AS `oref`,(`test`.`t2`.`a`,(select max(`test`.`t1`.`ie`) AS `max(ie)` from `test`.`t1` where (`test`.`t1`.`oref` = `test`.`t2`.`oref`) group by `test`.`t1`.`grp` having trigcond(((`test`.`t2`.`a`) = (max(`test`.`t1`.`ie`)))))) AS `a in (select max(ie) @@ -36,9 +36,9 @@ from t1 where oref=t2.oref group by grp)` from `test`.`t2` explain extended select a, oref from t2 where a in (select max(ie) from t1 where oref=t2.oref group by grp); -id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t2 ALL NULL NULL NULL NULL 5 Using where -2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 6 Using where; Using temporary; Using filesort +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 5 100.00 Using where +2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 6 100.00 Using where; Using temporary; Using filesort Warnings: Note 1276 Field or reference 't2.oref' of SELECT #2 was resolved in SELECT #1 Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`oref` AS `oref` from `test`.`t2` where (`test`.`t2`.`a`,(select max(`test`.`t1`.`ie`) AS `max(ie)` from `test`.`t1` where (`test`.`t1`.`oref` = `test`.`t2`.`oref`) group by `test`.`t1`.`grp` having ((`test`.`t2`.`a`) = (max(`test`.`t1`.`ie`))))) @@ -56,9 +56,9 @@ select ' ^ This must show 11' Z; Z ^ This must show 11 explain extended select a in (select max(ie) from t1 where oref=4 group by grp) from t3; -id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t3 ALL NULL NULL NULL NULL 2 -2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 6 Using where; Using temporary; Using filesort +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t3 ALL NULL NULL NULL NULL 2 100.00 +2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 6 100.00 Using where; Using temporary; Using filesort Warnings: Note 1003 select (`test`.`t3`.`a`,(select max(`test`.`t1`.`ie`) AS `max(ie)` from `test`.`t1` where (`test`.`t1`.`oref` = 4) group by `test`.`t1`.`grp` having trigcond(((`test`.`t3`.`a`) = (max(`test`.`t1`.`ie`)))))) AS `a in (select max(ie) from t1 where oref=4 group by grp)` from `test`.`t3` drop table t1, t2, t3; @@ -79,9 +79,9 @@ oref a Z 4 NULL 0 explain extended select oref, a, a in (select a from t1 where oref=t2.oref) Z from t2; -id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t2 ALL NULL NULL NULL NULL 4 -2 DEPENDENT SUBQUERY t1 index_subquery a a 5 func 2 Using index; Using where +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 4 100.00 +2 DEPENDENT SUBQUERY t1 index_subquery a a 5 func 2 100.00 Using index; Using where Warnings: Note 1276 Field or reference 't2.oref' of SELECT #2 was resolved in SELECT #1 Note 1003 select `test`.`t2`.`oref` AS `oref`,`test`.`t2`.`a` AS `a`,(`test`.`t2`.`a`,(((`test`.`t2`.`a`) in t1 on a checking NULL where (`test`.`t1`.`oref` = `test`.`t2`.`oref`)))) AS `Z` from `test`.`t2` @@ -143,10 +143,10 @@ explain extended select a, oref, t3.a in (select t1.a from t1, t2 where t1.b=t2.a and t2.b=t3.oref) Z from t3; -id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t3 ALL NULL NULL NULL NULL 3 -2 DEPENDENT SUBQUERY t1 ref_or_null a a 5 func 4 Using where -2 DEPENDENT SUBQUERY t2 ref a a 5 test.t1.b 1 Using where +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t3 ALL NULL NULL NULL NULL 3 100.00 +2 DEPENDENT SUBQUERY t1 ref_or_null a a 5 func 4 100.00 Using where +2 DEPENDENT SUBQUERY t2 ref a a 5 test.t1.b 1 100.00 Using where Warnings: Note 1276 Field or reference 't3.oref' of SELECT #2 was resolved in SELECT #1 Note 1003 select `test`.`t3`.`a` AS `a`,`test`.`t3`.`oref` AS `oref`,(`test`.`t3`.`a`,(select 1 AS `Not_used` from `test`.`t1` join `test`.`t2` where ((`test`.`t2`.`a` = `test`.`t1`.`b`) and (`test`.`t2`.`b` = `test`.`t3`.`oref`) and trigcond((((`test`.`t3`.`a`) = `test`.`t1`.`a`) or isnull(`test`.`t1`.`a`)))) having trigcond((`test`.`t1`.`a`)))) AS `Z` from `test`.`t3` diff --git a/mysql-test/r/udf.result b/mysql-test/r/udf.result index 30c335a12c7..16a1c73af80 100644 --- a/mysql-test/r/udf.result +++ b/mysql-test/r/udf.result @@ -118,13 +118,13 @@ myfunc_int(a AS attr_name) 1 2 EXPLAIN EXTENDED SELECT myfunc_int(a AS attr_name) FROM t1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ALL NULL NULL NULL NULL 2 +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Warnings: Note 1003 select myfunc_int(`test`.`t1`.`a` AS `attr_name`) AS `myfunc_int(a AS attr_name)` from `test`.`t1` EXPLAIN EXTENDED SELECT myfunc_int(a) FROM t1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ALL NULL NULL NULL NULL 2 +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Warnings: Note 1003 select myfunc_int(`test`.`t1`.`a` AS `a`) AS `myfunc_int(a)` from `test`.`t1` SELECT a,c FROM v1; @@ -146,23 +146,23 @@ c 1 2 EXPLAIN EXTENDED SELECT myfunc_int(MIN(b) xx) as c FROM t1 GROUP BY a; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using temporary; Using filesort Warnings: Note 1003 select myfunc_int(min(`test`.`t1`.`b`) AS `xx`) AS `c` from `test`.`t1` group by `test`.`t1`.`a` EXPLAIN EXTENDED SELECT test.fn(MIN(b)) as c FROM t1 GROUP BY a; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using temporary; Using filesort Warnings: Note 1003 select `test`.`fn`(min(`test`.`t1`.`b`)) AS `c` from `test`.`t1` group by `test`.`t1`.`a` EXPLAIN EXTENDED SELECT myfunc_int(fn(MIN(b))) as c FROM t1 GROUP BY a; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using temporary; Using filesort Warnings: Note 1003 select myfunc_int(`test`.`fn`(min(`test`.`t1`.`b`)) AS `fn(MIN(b))`) AS `c` from `test`.`t1` group by `test`.`t1`.`a` EXPLAIN EXTENDED SELECT myfunc_int(test.fn(MIN(b))) as c FROM t1 GROUP BY a; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using temporary; Using filesort Warnings: Note 1003 select myfunc_int(`test`.`fn`(min(`test`.`t1`.`b`)) AS `test.fn(MIN(b))`) AS `c` from `test`.`t1` group by `test`.`t1`.`a` SELECT myfunc_int(MIN(b) xx) as c FROM t1 GROUP BY a; From 59b45b5b0ab0bfe470919ba5175e79307928cd79 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 9 Nov 2006 16:55:42 +0200 Subject: [PATCH 31/57] Bug #20191: getTableName gives wrong or inconsistent result when using VIEWs When compiling GROUP BY Item_ref instances are dereferenced in setup_copy_fields(), i.e. replaced with the corresponding Item_field (if they point to one) or Item_copy_string for the other cases. Since the Item_ref (in the Item_field case) is no longer used the information about the aliases stored in it is lost. Fixed by preserving the column, table and DB alias on dereferencing Item_ref mysql-test/r/metadata.result: Bug #20191: getTableName gives wrong or inconsistent result when using VIEWs - test case mysql-test/t/metadata.test: Bug #20191: getTableName gives wrong or inconsistent result when using VIEWs - test case sql/item.cc: Bug #20191: getTableName gives wrong or inconsistent result when using VIEWs - use the table and db name to fill up the metadata for columns sql/sql_select.cc: Bug #20191: getTableName gives wrong or inconsistent result when using VIEWs - preserve the field, table and DB name on dereferencing an Item_ref --- mysql-test/r/metadata.result | 34 ++++++++++++++++++++++++++++++++++ mysql-test/t/metadata.test | 19 +++++++++++++++++++ sql/item.cc | 4 ++++ sql/sql_select.cc | 11 +++++++++-- 4 files changed, 66 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/metadata.result b/mysql-test/r/metadata.result index 50b0b6ae294..34e961395c4 100644 --- a/mysql-test/r/metadata.result +++ b/mysql-test/r/metadata.result @@ -96,3 +96,37 @@ i 2 affected rows: 1 affected rows: 0 +create table t1 (id int(10)); +insert into t1 values (1); +CREATE VIEW v1 AS select t1.id as id from t1; +CREATE VIEW v2 AS select t1.id as renamed from t1; +CREATE VIEW v3 AS select t1.id + 12 as renamed from t1; +select * from v1 group by id limit 1; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def test t1 v1 id id 3 10 1 Y 32768 0 63 +id +1 +select * from v1 group by id limit 0; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def test t1 v1 id id 3 10 0 Y 32768 0 63 +id +select * from v1 where id=1000 group by id; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def test t1 v1 id id 3 10 0 Y 32768 0 63 +id +select * from v1 where id=1 group by id; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def test t1 v1 id id 3 10 1 Y 32768 0 63 +id +1 +select * from v2 where renamed=1 group by renamed; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def test t1 v2 id renamed 3 10 1 Y 32768 0 63 +renamed +1 +select * from v3 where renamed=1 group by renamed; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def v3 renamed 8 12 0 Y 32896 0 63 +renamed +drop table t1; +drop view v1,v2,v3; diff --git a/mysql-test/t/metadata.test b/mysql-test/t/metadata.test index 65338448555..a6ebfdc14c1 100644 --- a/mysql-test/t/metadata.test +++ b/mysql-test/t/metadata.test @@ -61,4 +61,23 @@ drop table t1;// delimiter ;// --disable_info +# +# Bug #20191: getTableName gives wrong or inconsistent result when using VIEWs +# +--enable_metadata +create table t1 (id int(10)); +insert into t1 values (1); +CREATE VIEW v1 AS select t1.id as id from t1; +CREATE VIEW v2 AS select t1.id as renamed from t1; +CREATE VIEW v3 AS select t1.id + 12 as renamed from t1; +select * from v1 group by id limit 1; +select * from v1 group by id limit 0; +select * from v1 where id=1000 group by id; +select * from v1 where id=1 group by id; +select * from v2 where renamed=1 group by renamed; +select * from v3 where renamed=1 group by renamed; +drop table t1; +drop view v1,v2,v3; +--disable_metadata + # End of 4.1 tests diff --git a/sql/item.cc b/sql/item.cc index dc92edd651d..76f0332b4ab 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -4202,6 +4202,10 @@ void Item_field::make_field(Send_field *tmp_field) DBUG_ASSERT(tmp_field->table_name != 0); if (name) tmp_field->col_name=name; // Use user supplied name + if (table_name) + tmp_field->table_name= table_name; + if (db_name) + tmp_field->db_name= db_name; } diff --git a/sql/sql_select.cc b/sql/sql_select.cc index f92217302f8..db8091501d0 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -13594,9 +13594,16 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param, if (real_pos->type() == Item::FIELD_ITEM) { Item_field *item; - pos= real_pos; - if (!(item= new Item_field(thd, ((Item_field*) pos)))) + if (!(item= new Item_field(thd, ((Item_field*) real_pos)))) goto err; + if (pos->type() == Item::REF_ITEM) + { + /* preserve the names of the ref when dereferncing */ + Item_ref *ref= (Item_ref *) pos; + item->db_name= ref->db_name; + item->table_name= ref->table_name; + item->name= ref->name; + } pos= item; if (item->field->flags & BLOB_FLAG) { From bbc5d13891e09447d399112dfa9fa1b9da929b64 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 10 Nov 2006 12:43:44 +0200 Subject: [PATCH 32/57] fixed bad 5.0->5.1 merge --- mysql-test/t/disabled.def | 1 - sql/item.cc | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index 44eec94f2db..8c4a76c78a9 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -44,4 +44,3 @@ rpl_multi_engine : BUG#22583 2006-09-23 lars #rpl_truncate_7ndb : BUG#21298 2006-07-27 msvensson ndb_binlog_discover : bug#21806 2006-08-24 ndb_autodiscover3 : bug#21806 -order_by : GKodinov - please fix bug #22457 in 5.1 also diff --git a/sql/item.cc b/sql/item.cc index 58c2cb94a19..ba289106f4f 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -3741,8 +3741,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference) Item** res= find_item_in_list(this, thd->lex->current_select->item_list, &counter, REPORT_EXCEPT_NOT_FOUND, ¬_used); - if (res != (Item **)not_found_item && - (*res)->type() == Item::FIELD_ITEM) + if (res != (Item **)not_found_item) { if ((*res)->type() == Item::FIELD_ITEM) { From f53af7b8e5a8913af0625031304eb824b6330e4b Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 13 Nov 2006 12:28:55 +0200 Subject: [PATCH 33/57] Bug #19216: Client crashes on long SELECT The server sends a number of columns to the client. It uses a limited "fast" function for that instead of the general one. This fast function cannot send numbers larger than 2 bytes. This causes the client to expect smaller number of columns. The client writes outside of the allocated memory buffer as a result. Fixed the server to use the general function to send column count. Fixed the client to check the column count before writing column data. mysql-test/t/mysql_client.test: Bug #19216: Client crashes on long SELECT - test case sql/protocol.cc: Bug #19216: Client crashes on long SELECT - renamed the function for bether comprehention and made it local - used the right (non-local) function to transfer the column count in Protocol::send_fields sql/protocol.h: Bug #19216: Client crashes on long SELECT - made optimized net_store_length local sql-common/client.c: Bug #19216: Client crashes on long SELECT - fixed the client to check for older servers (without the fix). --- mysql-test/t/mysql_client.test | 18 ++++++++++++++++++ sql-common/client.c | 2 ++ sql/protocol.cc | 18 +++++++++--------- sql/protocol.h | 1 - 4 files changed, 29 insertions(+), 10 deletions(-) diff --git a/mysql-test/t/mysql_client.test b/mysql-test/t/mysql_client.test index b382357dacf..7bd7c762c5c 100644 --- a/mysql-test/t/mysql_client.test +++ b/mysql-test/t/mysql_client.test @@ -33,3 +33,21 @@ # --exec echo 'help' | $MYSQL > $MYSQLTEST_VARDIR/tmp/bug20328.tmp --exec echo 'help ' | $MYSQL > $MYSQLTEST_VARDIR/tmp/bug20328.tmp + +# +# Bug #19216: Client crashes on long SELECT +# +--exec echo "select" > $MYSQLTEST_VARDIR/tmp/b19216.tmp +# 3400 * 20 makes 68000 columns that is more than the max number that can fit +# in a 16 bit number. +let $i= 3400; +while ($i) +{ + --exec echo "'a','a','a','a','a','a','a','a','a','a','a','a','a','a','a','a','a','a','a','a'," >> $MYSQLTEST_VARDIR/tmp/b19216.tmp + dec $i; +} + +--exec echo "'b';" >> $MYSQLTEST_VARDIR/tmp/b19216.tmp +--disable_query_log +--exec $MYSQL < $MYSQLTEST_VARDIR/tmp/b19216.tmp >/dev/null +--enable_query_log diff --git a/sql-common/client.c b/sql-common/client.c index ff5f1ef150a..fb32eea33c7 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -1173,6 +1173,8 @@ unpack_fields(MYSQL_DATA *data,MEM_ROOT *alloc,uint fields, for (row=data->data; row ; row = row->next,field++) { uchar *pos; + /* fields count may be wrong */ + DBUG_ASSERT ((field - result) < fields); cli_fetch_lengths(&lengths[0], row->data, default_value ? 8 : 7); field->catalog = strdup_root(alloc,(char*) row->data[0]); field->db = strdup_root(alloc,(char*) row->data[1]); diff --git a/sql/protocol.cc b/sql/protocol.cc index a2287740f1e..7c7dfaf7bef 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -43,7 +43,7 @@ bool Protocol_prep::net_store_data(const char *from, uint length) packet->realloc(packet_length+9+length)) return 1; char *to=(char*) net_store_length((char*) packet->ptr()+packet_length, - (ulonglong) length); + length); memcpy(to,from,length); packet->length((uint) (to+length-packet->ptr())); return 0; @@ -297,8 +297,8 @@ send_ok(THD *thd, ha_rows affected_rows, ulonglong id, const char *message) DBUG_VOID_RETURN; buff[0]=0; // No fields - pos=net_store_length(buff+1,(ulonglong) affected_rows); - pos=net_store_length(pos, (ulonglong) id); + pos=net_store_length(buff+1,affected_rows); + pos=net_store_length(pos, id); if (thd->client_capabilities & CLIENT_PROTOCOL_41) { DBUG_PRINT("info", @@ -416,7 +416,7 @@ bool send_old_password_request(THD *thd) ulonglong for bigger numbers. */ -char *net_store_length(char *pkg, uint length) +static char *net_store_length_fast(char *pkg, uint length) { uchar *packet=(uchar*) pkg; if (length < 251) @@ -439,7 +439,7 @@ char *net_store_length(char *pkg, uint length) char *net_store_data(char *to,const char *from, uint length) { - to=net_store_length(to,length); + to=net_store_length_fast(to,length); memcpy(to,from,length); return to+length; } @@ -448,7 +448,7 @@ char *net_store_data(char *to,int32 from) { char buff[20]; uint length=(uint) (int10_to_str(from,buff,10)-buff); - to=net_store_length(to,length); + to=net_store_length_fast(to,length); memcpy(to,buff,length); return to+length; } @@ -457,7 +457,7 @@ char *net_store_data(char *to,longlong from) { char buff[22]; uint length=(uint) (longlong10_to_str(from,buff,10)-buff); - to=net_store_length(to,length); + to=net_store_length_fast(to,length); memcpy(to,buff,length); return to+length; } @@ -520,7 +520,7 @@ bool Protocol::send_fields(List *list, uint flag) if (flag & 1) { // Packet with number of elements - char *pos=net_store_length(buff, (uint) list->elements); + char *pos=net_store_length(buff, list->elements); (void) my_net_write(&thd->net, buff,(uint) (pos-buff)); } @@ -648,7 +648,7 @@ bool Protocol::send_records_num(List *list, ulonglong records) { char *pos; char buff[20]; - pos=net_store_length(buff, (uint) list->elements); + pos=net_store_length(buff, list->elements); pos=net_store_length(pos, records); return my_net_write(&thd->net, buff,(uint) (pos-buff)); } diff --git a/sql/protocol.h b/sql/protocol.h index 32d6acccddf..ce3adb41df5 100644 --- a/sql/protocol.h +++ b/sql/protocol.h @@ -177,7 +177,6 @@ void send_ok(THD *thd, ha_rows affected_rows=0L, ulonglong id=0L, const char *info=0); void send_eof(THD *thd, bool no_flush=0); bool send_old_password_request(THD *thd); -char *net_store_length(char *packet,uint length); char *net_store_data(char *to,const char *from, uint length); char *net_store_data(char *to,int32 from); char *net_store_data(char *to,longlong from); From 405dcd2df29e4693a0981c186d0c55b2f9a37b84 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 13 Nov 2006 15:37:04 +0200 Subject: [PATCH 34/57] merge 4.1->5.0 of the test suite for bug 19216 --- mysql-test/t/mysql.test | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/mysql-test/t/mysql.test b/mysql-test/t/mysql.test index f3296e6f706..a1a4b40a3dd 100644 --- a/mysql-test/t/mysql.test +++ b/mysql-test/t/mysql.test @@ -142,6 +142,24 @@ drop table t1; --exec $MYSQL -e 'help ' > $MYSQLTEST_VARDIR/tmp/bug20328_2.result --exec diff $MYSQLTEST_VARDIR/tmp/bug20328_1.result $MYSQLTEST_VARDIR/tmp/bug20328_2.result +# +# Bug #19216: Client crashes on long SELECT +# +--exec echo "select" > $MYSQLTEST_VARDIR/tmp/b19216.tmp +# 3400 * 20 makes 68000 columns that is more than the max number that can fit +# in a 16 bit number. +let $i= 3400; +while ($i) +{ + --exec echo "'a','a','a','a','a','a','a','a','a','a','a','a','a','a','a','a','a','a','a','a'," >> $MYSQLTEST_VARDIR/tmp/b19216.tmp + dec $i; +} + +--exec echo "'b';" >> $MYSQLTEST_VARDIR/tmp/b19216.tmp +--disable_query_log +--exec $MYSQL < $MYSQLTEST_VARDIR/tmp/b19216.tmp >/dev/null +--enable_query_log + # # Bug #20103: Escaping with backslash does not work # From 1019dd404c273b722daa4039d42b1a920439e6b1 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 14 Nov 2006 19:50:44 +0300 Subject: [PATCH 35/57] Bug#20045: Server crash on INSERT ... SELECT ... FROM non-mergeable view The regression is caused by the fix for bug 14767. When INSERT ... SELECT used a view in the SELECT list that was not inlined, and there was an active transaction, the server could crash in Query_cache::invalidate. On INSERT ... SELECT only the table being inserted into is invalidated. Thus views that can't be inlined are skipped from invalidation. The bug manifests itself in two ways so there is 2 test cases. One checks that the only the table being inserted into is invalidated. And the second one checks that there is no crash on INSERT ... SELECT. mysql-test/t/query_cache.test: Added a test case for bug#20045: Server crash on INSERT ... SELECT ... FROM non-mergeable view mysql-test/r/query_cache.result: Added a test case for bug#20045: Server crash on INSERT ... SELECT ... FROM non-mergeable view sql/sql_parse.cc: Bug#20045: Server crash on INSERT ... SELECT ... FROM non-mergeable view On INSERT ... SELECT only the table being inserted into is invalidated. --- mysql-test/r/query_cache.result | 48 +++++++++++++++++++++++++++++++++ mysql-test/t/query_cache.test | 29 ++++++++++++++++++++ sql/sql_parse.cc | 4 +++ 3 files changed, 81 insertions(+) diff --git a/mysql-test/r/query_cache.result b/mysql-test/r/query_cache.result index 5224280e134..7645ad488cc 100644 --- a/mysql-test/r/query_cache.result +++ b/mysql-test/r/query_cache.result @@ -1274,3 +1274,51 @@ Variable_name Value Last_query_cost 0.000000 drop table t1; SET GLOBAL query_cache_size=0; +set global query_cache_size=1024*1024; +flush status; +create table t1 (a int); +insert into t1 (a) values (1), (2), (3); +select * from t1; +a +1 +2 +3 +show status like 'Qcache_hits'; +Variable_name Value +Qcache_hits 0 +select * from t1; +a +1 +2 +3 +show status like 'Qcache_hits'; +Variable_name Value +Qcache_hits 1 +create table t2 like t1; +select * from t1; +a +1 +2 +3 +show status like 'Qcache_hits'; +Variable_name Value +Qcache_hits 2 +insert into t2 select * from t1; +select * from t1; +a +1 +2 +3 +show status like 'Qcache_hits'; +Variable_name Value +Qcache_hits 3 +drop table t1, t2; +create table t1(c1 int); +create table t2(c1 int); +create table t3(c1 int); +create view v1 as select t3.c1 as c1 from t3,t2 where t3.c1 = t2.c1; +start transaction; +insert into t1(c1) select c1 from v1; +drop table t1, t2, t3; +drop view v1; +set global query_cache_size=0; diff --git a/mysql-test/t/query_cache.test b/mysql-test/t/query_cache.test index d416f34ce45..06eb76027b6 100644 --- a/mysql-test/t/query_cache.test +++ b/mysql-test/t/query_cache.test @@ -870,3 +870,32 @@ select * from t1 where a > 3; show status like 'last_query_cost'; drop table t1; SET GLOBAL query_cache_size=0; + +# +# Bug #20045: Server crash on INSERT ... SELECT ... FROM non-mergeable view +# +set global query_cache_size=1024*1024; +flush status; +create table t1 (a int); +insert into t1 (a) values (1), (2), (3); +select * from t1; +show status like 'Qcache_hits'; +select * from t1; +show status like 'Qcache_hits'; +create table t2 like t1; +select * from t1; +show status like 'Qcache_hits'; +insert into t2 select * from t1; +select * from t1; +show status like 'Qcache_hits'; +drop table t1, t2; + +create table t1(c1 int); +create table t2(c1 int); +create table t3(c1 int); +create view v1 as select t3.c1 as c1 from t3,t2 where t3.c1 = t2.c1; +start transaction; +insert into t1(c1) select c1 from v1; +drop table t1, t2, t3; +drop view v1; +set global query_cache_size=0; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index c62c286cfdb..191ba4d12fb 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -3421,8 +3421,12 @@ end_with_restore_list: if (first_table->lock_type == TL_WRITE_CONCURRENT_INSERT && thd->lock) { + /* INSERT ... SELECT should invalidate only the very first table */ + TABLE_LIST *save_table= first_table->next_local; + first_table->next_local= 0; mysql_unlock_tables(thd, thd->lock); query_cache_invalidate3(thd, first_table, 1); + first_table->next_local= save_table; thd->lock=0; } delete result; From daaddeb656c26c685962fef69d19e7f264620e2b Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 15 Nov 2006 19:17:52 +0100 Subject: [PATCH 36/57] Dummy push to force pushbuild retest mysql-test/t/disabled.def: Dummy push --- mysql-test/t/disabled.def | 1 - 1 file changed, 1 deletion(-) diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index eaea7c710b0..df56165950f 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -12,4 +12,3 @@ ndb_load : Bug#17233 user_limits : Bug#23921 random failure of user_limits.test - From 83a7bbf730a0b274fa1050eda174a3f718b6007a Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 16 Nov 2006 21:23:34 +0400 Subject: [PATCH 37/57] merging --- client/mysqltest.c | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/client/mysqltest.c b/client/mysqltest.c index f2f2dc85d72..b73ee831cf3 100644 --- a/client/mysqltest.c +++ b/client/mysqltest.c @@ -183,18 +183,6 @@ DYNAMIC_ARRAY q_lines; #include "sslopt-vars.h" -struct connection -{ - MYSQL mysql; - char *name; - - const char *cur_query; - int cur_query_len; - pthread_mutex_t mutex; - pthread_cond_t cond; - int query_done; -}; - struct { int read_lines,current_line; @@ -234,6 +222,12 @@ struct st_connection MYSQL* util_mysql; char *name; MYSQL_STMT* stmt; + + const char *cur_query; + int cur_query_len; + pthread_mutex_t mutex; + pthread_cond_t cond; + int query_done; }; struct st_connection connections[128]; struct st_connection* cur_con, *next_con, *connections_end; @@ -493,7 +487,7 @@ void handle_no_error(struct st_command*); */ pthread_handler_decl(send_one_query, arg) { - struct connection *cn= (struct connection*)arg; + struct st_connection *cn= (struct st_connection*)arg; mysql_thread_init(); VOID(mysql_send_query(&cn->mysql, cn->cur_query, cn->cur_query_len)); @@ -507,7 +501,7 @@ pthread_handler_decl(send_one_query, arg) return 0; } -static int do_send_query(struct connection *cn, const char *q, int q_len, +static int do_send_query(struct st_connection *cn, const char *q, int q_len, int flags) { pthread_t tid; @@ -4570,7 +4564,7 @@ int append_warnings(DYNAMIC_STRING *ds, MYSQL* mysql) error - function will not return */ -void run_query_normal(struct connection *cn, *mysql, struct st_command *command, +void run_query_normal(struct st_connection *cn, struct st_command *command, int flags, char *query, int query_len, DYNAMIC_STRING *ds, DYNAMIC_STRING *ds_warnings) { @@ -4598,7 +4592,7 @@ void run_query_normal(struct connection *cn, *mysql, struct st_command *command, Here we handle 'reap' command, so we need to check if the query's thread was finished and probably wait */ - else if (flags & QUERY_REAP) + else if (flags & QUERY_REAP_FLAG) { pthread_mutex_lock(&cn->mutex); while (!cn->query_done) @@ -5096,8 +5090,9 @@ int util_query(MYSQL* org_mysql, const char* query){ */ -void run_query(MYSQL *mysql, struct st_command *command, int flags) +void run_query(struct st_connection *cn, struct st_command *command, int flags) { + MYSQL *mysql= &cn->mysql; DYNAMIC_STRING *ds; DYNAMIC_STRING ds_result; DYNAMIC_STRING ds_warnings; @@ -5254,7 +5249,7 @@ void run_query(MYSQL *mysql, struct st_command *command, int flags) match_re(&ps_re, query)) run_query_stmt(mysql, command, query, query_len, ds, &ds_warnings); else - run_query_normal(mysql, command, flags, query, query_len, + run_query_normal(cn, command, flags, query, query_len, ds, &ds_warnings); if (sp_created) @@ -5746,7 +5741,7 @@ int main(int argc, char **argv) strmake(command->require_file, save_file, sizeof(save_file)); save_file[0]= 0; } - run_query(cur, command, flags); + run_query(cur_con, command, flags); command_executed++; command->last_argument= command->end; break; From e78fd1d14b7b151c4968702a28292d1d41b4f2ea Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 16 Nov 2006 23:00:48 +0400 Subject: [PATCH 38/57] merging --- Makefile.am | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile.am b/Makefile.am index 48f84269313..12a867c1ad7 100644 --- a/Makefile.am +++ b/Makefile.am @@ -126,7 +126,7 @@ test-force-pl: ./mysql-test-run.pl --ps-protocol --force #used by autopush.pl to run memory based tests -test-force-mem: +test-force-pl-mem: cd mysql-test; \ ./mysql-test-run.pl --force --mem && \ ./mysql-test-run.pl --ps-protocol --force --mem From bde03bcec4457189d3993d970d144cf646ce14d6 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 17 Nov 2006 11:15:40 +0400 Subject: [PATCH 39/57] merging --- sql/item_subselect.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index d2f8e092d00..551795acd53 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -1639,7 +1639,7 @@ void subselect_engine::set_row(List &item_list, Item_cache **row) res_field_type= sel_item->field_type(); item->decimals= sel_item->decimals; item->unsigned_flag= sel_item->unsigned_flag; - *maybe_null= sel_item->maybe_null; + maybe_null= sel_item->maybe_null; if (!(row[i]= Item_cache::get_cache(res_type))) return; row[i]->setup(sel_item); From 3ebdcee5c64f29e7a4d4876b25f1e7afe6b52a60 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 17 Nov 2006 12:02:36 +0400 Subject: [PATCH 40/57] merging --- mysql-test/r/order_by.result | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/mysql-test/r/order_by.result b/mysql-test/r/order_by.result index be29b310434..7b04c1acdc0 100644 --- a/mysql-test/r/order_by.result +++ b/mysql-test/r/order_by.result @@ -760,13 +760,6 @@ xxxxxxxxxxxxxxxxxxxaa xxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxz drop table t1; -create table t1 (a int not null, b int not null, c int not null); -insert t1 values (1,1,1),(1,1,2),(1,2,1); -select a, b from t1 group by a, b order by sum(c); -a b -1 2 -1 1 -drop table t1; create table t1 ( `sid` decimal(8,0) default null, `wnid` varchar(11) not null default '', @@ -881,6 +874,13 @@ num (select num + 2 FROM t1 LIMIT 1) SELECT a.a + 1 AS num FROM t1 a JOIN t1 b ON num = b.a; ERROR 42S22: Unknown column 'num' in 'on clause' DROP TABLE t1; +create table t1 (a int not null, b int not null, c int not null); +insert t1 values (1,1,1),(1,1,2),(1,2,1); +select a, b from t1 group by a, b order by sum(c); +a b +1 2 +1 1 +drop table t1; CREATE TABLE t1 (a int, b int, PRIMARY KEY (a)); INSERT INTO t1 VALUES (1,1), (2,2), (3,3); explain SELECT t1.b as a, t2.b as c FROM From a3d041559d2a466da9e0115c0f8f1044de98db70 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 17 Nov 2006 14:30:08 +0400 Subject: [PATCH 41/57] ABI fix include/mysql_h.ic: this one should actually be removed from the ABI --- include/mysql_h.ic | 1 - 1 file changed, 1 deletion(-) diff --git a/include/mysql_h.ic b/include/mysql_h.ic index 30ef44a1ccb..44c36c84747 100644 --- a/include/mysql_h.ic +++ b/include/mysql_h.ic @@ -154,7 +154,6 @@ struct __attribute__((aligned(__alignof__(void *)), aligned(__alignof__(unsigned struct st_mysql_methods const * methods; void * thd; my_bool * unbuffered_fetch_owner; - struct st_mysql_stmt * current_stmt; }; # 571 "mysql.h" struct __attribute__((aligned(__alignof__(void *)), aligned(__alignof__(unsigned long int)))) st_mysql_bind From 934a231825080f1c7812a67318f0cf83c05854a9 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 18 Nov 2006 21:49:59 +0400 Subject: [PATCH 42/57] merging mysql-test/t/disabled.def: meging bug --- mysql-test/t/disabled.def | 1 + sql/item_subselect.cc | 2 +- sql/sql_yacc.yy | 3 +-- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index 0d3b7cdfdeb..63fa8440e43 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -32,3 +32,4 @@ rpl_multi_engine : BUG#22583 2006-09-23 lars #ndb_binlog_ddl_multi : BUG#18976 2006-04-10 kent CRBR: multiple binlog, second binlog may miss schema log events ndb_binlog_discover : bug#21806 2006-08-24 ndb_autodiscover3 : bug#21806 +udf : for GKodinov to fix. Your fix for #21809 stopped working in 5.1 after the merge. diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index bd0f4fc56c5..c0fa2c718bc 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -1682,7 +1682,7 @@ void subselect_engine::set_row(List &item_list, Item_cache **row) res_field_type= sel_item->field_type(); item->decimals= sel_item->decimals; item->unsigned_flag= sel_item->unsigned_flag; - *maybe_null= sel_item->maybe_null; + maybe_null= sel_item->maybe_null; if (!(row[i]= Item_cache::get_cache(res_type))) return; row[i]->setup(sel_item); diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 4014192db02..28230fcf06d 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -798,7 +798,6 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %type expr_list udf_expr_list udf_expr_list2 when_list ident_list ident_list_arg opt_expr_list - expr_list_opt %type option_type opt_var_type opt_var_ident_type @@ -6377,7 +6376,7 @@ function_call_generic: lex->current_select->udf_list.push_front(udf); #endif } - expr_list_opt ')' + opt_expr_list ')' { THD *thd= YYTHD; LEX *lex= Lex; From 1e3f42b246dd39f1d2e59c79a42b455106f35038 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 20 Nov 2006 12:46:47 +0200 Subject: [PATCH 43/57] 5.0-opt -> 5.1-opt merge fixed. --- mysql-test/t/disabled.def | 1 - sql/sql_yacc.yy | 7 +++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index 63fa8440e43..0d3b7cdfdeb 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -32,4 +32,3 @@ rpl_multi_engine : BUG#22583 2006-09-23 lars #ndb_binlog_ddl_multi : BUG#18976 2006-04-10 kent CRBR: multiple binlog, second binlog may miss schema log events ndb_binlog_discover : bug#21806 2006-08-24 ndb_autodiscover3 : bug#21806 -udf : for GKodinov to fix. Your fix for #21809 stopped working in 5.1 after the merge. diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 28230fcf06d..26dc484059e 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -6372,11 +6372,10 @@ function_call_generic: } } /* Temporary placing the result of find_udf in $3 */ - $$= udf; lex->current_select->udf_list.push_front(udf); #endif } - opt_expr_list ')' + udf_expr_list ')' { THD *thd= YYTHD; LEX *lex= Lex; @@ -6401,7 +6400,7 @@ function_call_generic: { #ifdef HAVE_DLOPEN /* Retrieving the result of find_udf */ - udf_func *udf= $3; + udf_func *udf; LEX *lex= Lex; if (NULL != (udf= lex->current_select->udf_list.pop())) @@ -6427,7 +6426,7 @@ function_call_generic: YYABORT; } } - | ident '.' ident '(' udf_expr_list ')' + | ident '.' ident '(' opt_expr_list ')' { THD *thd= YYTHD; Create_qfunc *builder; From 25c21042dbfeebff42d1cf7880da64ba7cd2bbfa Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 20 Nov 2006 13:42:22 +0200 Subject: [PATCH 44/57] fix for the merge of --mem support in autopush.pl in 5.1-opt --- Makefile.am | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile.am b/Makefile.am index df5a81e77a9..c0798fbb5c7 100644 --- a/Makefile.am +++ b/Makefile.am @@ -150,6 +150,7 @@ test-force-mem: test-pl: test test-full-pl: test-full test-force-pl: test-force +test-force-pl-mem: test-force-mem test-force-full-pl: test-force-full # Don't update the files from bitkeeper From 14decc4fbc04b8ce0c5efb7d76b711c6e60a0266 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 20 Nov 2006 22:42:06 +0200 Subject: [PATCH 45/57] Remove compiler warnings (Mostly in DBUG_PRINT() and unused arguments) Fixed bug in query cache when used with traceing (--with-debug) Fixed memory leak in mysqldump Removed warnings from mysqltest scripts (replaced -- with #) BitKeeper/etc/ignore: added mysql-test/r/*.warnings BUILD/SETUP.sh: Don't build with BDB as default client/client_priv.h: Added OPT_DEBUG_INFO client/mysqlbinlog.cc: Fixed compiler warning client/mysqldump.c: Removed compiler warnings Added option --debug-info to detect memory leaks Fixed memory leak Don't backup cluster replication tables (if used with 5.1) cmd-line-utils/readline/bind.c: Fixed compiler warning cmd-line-utils/readline/chardefs.h: Fixed compiler warning cmd-line-utils/readline/complete.c: Fixed compiler warning cmd-line-utils/readline/display.c: Fixed compiler warning cmd-line-utils/readline/histexpand.c: Fixed compiler warning cmd-line-utils/readline/input.c: Fixed compiler warning cmd-line-utils/readline/isearch.c: Fixed compiler warning cmd-line-utils/readline/kill.c: Fixed compiler warning cmd-line-utils/readline/macro.c: Fixed compiler warning cmd-line-utils/readline/misc.c: Fixed compiler warning cmd-line-utils/readline/nls.c: Fixed compiler warning cmd-line-utils/readline/readline.c: Fixed compiler warning cmd-line-utils/readline/rltty.c: Fixed compiler warning cmd-line-utils/readline/search.c: Fixed compiler warning cmd-line-utils/readline/terminal.c: Fixed compiler warning cmd-line-utils/readline/text.c: Fixed compiler warning cmd-line-utils/readline/tilde.c: Fixed compiler warning cmd-line-utils/readline/undo.c: Fixed compiler warning cmd-line-utils/readline/util.c: Fixed compiler warning cmd-line-utils/readline/vi_mode.c: Fixed compiler warning dbug/dbug_analyze.c: Fixed compiler warning extra/yassl/src/ssl.cpp: Fixed compiler warning extra/yassl/testsuite/testsuite.cpp: Fixed compiler warning heap/_check.c: Fixed compiler warning heap/hp_delete.c: Fixed compiler warning heap/hp_hash.c: Fixed compiler warning heap/hp_open.c: Fixed compiler warning heap/hp_rkey.c: Fixed compiler warning heap/hp_rrnd.c: Fixed compiler warning heap/hp_write.c: Fixed compiler warning libmysql/libmysql.c: Fixed compiler warning libmysqld/libmysqld.c: Fixed compiler warning myisam/mi_close.c: Fixed compiler warning myisam/mi_delete.c: Fixed compiler warning myisam/mi_dynrec.c: Fixed compiler warning myisam/mi_keycache.c: Fixed compiler warning myisam/mi_page.c: Fixed compiler warning myisam/mi_statrec.c: Fixed compiler warning myisam/mi_test2.c: Fixed compiler warning myisam/mi_write.c: Fixed compiler warning myisam/myisampack.c: Fixed compiler warning myisammrg/myrg_extra.c: Fixed compiler warning mysql-test/mysql-test-run.pl: Remove .reject, .progress, .log and .warnings flag at start cluster -> mysql database (for 5.1) mysql-test/include/federated.inc: Remove mysqltest warnings mysql-test/include/sp-vars.inc: Remove mysqltest warnings mysql-test/mysql-test-run.sh: Fix so that 'make test' works again Remove .reject, .progress .log and .warnings files at startup mysql-test/r/ctype_cp1250_ch.result: Drop used tables at startup mysql-test/t/create.test: Remove mysqltest warnings mysql-test/t/csv.test: Remove mysqltest warnings mysql-test/t/ctype_collate.test: Remove mysqltest warnings mysql-test/t/ctype_cp1250_ch.test: Drop used tables at startup mysql-test/t/ctype_ucs.test: Remove mysqltest warnings mysql-test/t/func_sapdb.test: Remove mysqltest warnings mysql-test/t/func_str.test: Remove mysqltest warnings mysql-test/t/grant.test: Remove mysqltest warnings mysql-test/t/greedy_optimizer.test: Remove mysqltest warnings mysql-test/t/group_min_max.test: Remove mysqltest warnings mysql-test/t/innodb.test: Remove mysqltest warnings mysql-test/t/join.test: Remove mysqltest warnings mysql-test/t/limit.test: Remove mysqltest warnings mysql-test/t/null.test: Remove mysqltest warnings mysql-test/t/select.test: Remove mysqltest warnings mysql-test/t/sp-prelocking.test: Remove mysqltest warnings mysql-test/t/strict.test: Remove mysqltest warnings mysql-test/t/subselect.test: Remove mysqltest warnings mysql-test/t/type_newdecimal.test: Remove mysqltest warnings mysql-test/t/view_grant.test: Remove mysqltest warnings mysys/default.c: Fixed compiler warning mysys/hash.c: Fixed compiler warning mysys/list.c: Fixed compiler warning mysys/mf_iocache.c: Fixed compiler warning mysys/mf_keycache.c: Fixed compiler warning mysys/mf_keycaches.c: Fixed compiler warning mysys/my_alloc.c: Fixed compiler warning mysys/my_dup.c: Fixed compiler warning mysys/my_fopen.c: Fixed compiler warning mysys/my_fstream.c: Fixed compiler warning mysys/my_getwd.c: Fixed compiler warning mysys/my_handler.c: Fixed compiler warning Added missing enums in switch mysys/my_lib.c: Fixed compiler warning mysys/my_lread.c: Fixed compiler warning mysys/my_lwrite.c: Fixed compiler warning mysys/my_malloc.c: Fixed compiler warning mysys/my_pread.c: Fixed compiler warning mysys/my_read.c: Fixed compiler warning mysys/my_realloc.c: Fixed compiler warning mysys/my_seek.c: Fixed compiler warning mysys/my_write.c: Fixed compiler warning mysys/safemalloc.c: Fixed compiler warning mysys/thr_lock.c: Fixed compiler warning mysys/tree.c: Fixed compiler warning mysys/typelib.c: Fixed compiler warning ndb/include/logger/LogHandler.hpp: Changed SetErrorStr() to take const char* to remove compiler warnings (as many arguments to this are const strings) ndb/include/ndb_global.h.in: Added LINT_SET_PTR macro to be able to remove some compiler warnings ndb/include/util/InputStream.hpp: Fixed compiler warning ndb/include/util/OutputStream.hpp: Fixed compiler warning ndb/include/util/SimpleProperties.hpp: Fixed compiler warning ndb/src/common/debugger/EventLogger.cpp: remove if on 'signal' as this is a function pointer and is always true ndb/src/common/debugger/signaldata/BackupSignalData.cpp: Add missing enums ndb/src/common/logger/LogHandler.cpp: Changed SetErrorStr() to take const char* to remove compiler warnings (as many arguments to this are const strings) ndb/src/common/portlib/NdbMutex.c: Fixed compiler warning ndb/src/common/portlib/NdbThread.c: Fixed compiler warning ndb/src/common/transporter/Transporter.cpp: Swapped arguments to remove compiler warnings ndb/src/cw/cpcd/CPCD.hpp: Fixed compiler warning ndb/src/kernel/blocks/backup/Backup.cpp: Fixed compiler warning ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp: Fixed compiler warning ndb/src/kernel/blocks/dbdict/Dbdict.cpp: Fixed compiler warning ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp: Fixed compiler warning ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp: Added missing enums ndb/src/kernel/blocks/qmgr/QmgrMain.cpp: Fixed compiler warning ndb/src/kernel/error/ErrorReporter.cpp: Fixed compiler warning ndb/src/kernel/error/ErrorReporter.hpp: Fixed compiler warning ndb/src/kernel/error/ndbd_exit_codes.c: Fixed compiler warning ndb/src/kernel/vm/TransporterCallback.cpp: Fixed compiler warning ndb/src/mgmapi/mgmapi.cpp: Fixed compiler warning ndb/src/mgmclient/CommandInterpreter.cpp: Fixed compiler warning ndb/src/mgmsrv/MgmtSrvr.cpp: Fixed compiler warning ndb/src/mgmsrv/Services.cpp: Fixed compiler warning ndb/src/ndbapi/ClusterMgr.cpp: Fixed compiler warning ndb/src/ndbapi/DictCache.cpp: Fixed compiler warning ndb/src/ndbapi/Ndb.cpp: Fixed compiler warning ndb/src/ndbapi/NdbOperationDefine.cpp: Fixed compiler warning ndb/src/ndbapi/NdbOperationExec.cpp: Fixed compiler warning ndb/src/ndbapi/NdbOperationInt.cpp: Fixed compiler warning ndb/src/ndbapi/NdbOperationSearch.cpp: Fixed compiler warning ndb/src/ndbapi/NdbTransaction.cpp: Fixed compiler warning ndb/src/ndbapi/Ndbif.cpp: Fixed compiler warning ndb/src/ndbapi/Ndbinit.cpp: Fixed compiler warning ndb/src/ndbapi/ndb_cluster_connection.cpp: Fixed compiler warning ndb/tools/drop_index.cpp: Fixed compiler warning ndb/tools/drop_tab.cpp: Fixed compiler warning ndb/tools/ndb_config.cpp: Fixed compiler warning regex/regexec.c: Fixed compiler warning server-tools/instance-manager/mysql_connection.cc: Fixed compiler warning sql/Makefile.am: Fix for bison 1.875, which adds an attribute statement that gcc 4.1.0 can't parse sql/filesort.cc: Fixed compiler warning sql/ha_archive.cc: Fixed compiler warning sql/ha_federated.cc: Fixed compiler warning sql/ha_federated.h: Fixed compiler warning sql/ha_innodb.cc: Fixed compiler warning sql/ha_myisam.cc: Fixed compiler warning sql/ha_myisammrg.cc: Fixed compiler warning sql/ha_ndbcluster.cc: Fixed compiler warning sql/handler.cc: Fixed compiler warning sql/item_cmpfunc.cc: Fixed compiler warning sql/item_subselect.cc: Fixed compiler warning sql/item_timefunc.cc: Fixed compiler warning sql/log_event.cc: Fixed compiler warning sql/mysqld.cc: Fixed compiler warning sql/net_serv.cc: Fixed compiler warning sql/opt_range.cc: Fixed compiler warning Formated DBUG statements to be as rest of code sql/slave.cc: Fixed compiler warning sql/sql_acl.cc: Fixed compiler warning sql/sql_cache.cc: Fixed compiler warning Fixed bug in query cache when used with DBUG traceing sql/sql_class.cc: Fixed compiler warning sql/sql_class.h: Fixed compiler warning sql/sql_delete.cc: Fixed compiler warning sql/sql_parse.cc: Fixed compiler warning Hack to fix my_sprintf() strings with %b sql/sql_prepare.cc: Fixed compiler warning Hack to fix my_sprintf() strings with %b sql/sql_repl.cc: Fixed compiler warning sql/sql_select.cc: Fixed compiler warning sql/sql_update.cc: Fixed compiler warning sql/sql_view.cc: Fixed compiler warning sql/strfunc.cc: Fixed compiler warning sql-common/client.c: Fixed compiler warning sql-common/my_time.c: Fixed compiler warning sql/table.cc: Fixed compiler warning sql/tztime.cc: Fixed compiler warning sql/unireg.cc: Fixed compiler warning strings/decimal.c: Fixed compiler warning tests/mysql_client_test.c: Fixed compiler warning vio/viosocket.c: Fixed compiler warning vio/viossl.c: Fixed compiler warning vio/viosslfactories.c: Fixed compiler warning --- .bzrignore | 1 + BUILD/SETUP.sh | 6 +- client/client_priv.h | 3 +- client/mysqlbinlog.cc | 2 +- client/mysqldump.c | 141 +++++++++------- cmd-line-utils/readline/bind.c | 40 ++--- cmd-line-utils/readline/chardefs.h | 4 + cmd-line-utils/readline/complete.c | 22 +-- cmd-line-utils/readline/display.c | 17 +- cmd-line-utils/readline/histexpand.c | 6 +- cmd-line-utils/readline/input.c | 2 +- cmd-line-utils/readline/isearch.c | 10 +- cmd-line-utils/readline/kill.c | 31 ++-- cmd-line-utils/readline/macro.c | 10 +- cmd-line-utils/readline/misc.c | 31 ++-- cmd-line-utils/readline/nls.c | 2 +- cmd-line-utils/readline/readline.c | 4 + cmd-line-utils/readline/rltty.c | 7 +- cmd-line-utils/readline/search.c | 14 +- cmd-line-utils/readline/terminal.c | 10 +- cmd-line-utils/readline/text.c | 61 +++---- cmd-line-utils/readline/tilde.c | 2 +- cmd-line-utils/readline/undo.c | 8 +- cmd-line-utils/readline/util.c | 9 +- cmd-line-utils/readline/vi_mode.c | 83 ++++------ dbug/dbug_analyze.c | 14 +- extra/yassl/src/ssl.cpp | 2 +- extra/yassl/testsuite/testsuite.cpp | 4 +- heap/_check.c | 27 +-- heap/hp_delete.c | 4 +- heap/hp_hash.c | 2 +- heap/hp_open.c | 5 +- heap/hp_rkey.c | 2 +- heap/hp_rrnd.c | 4 +- heap/hp_write.c | 8 +- libmysql/libmysql.c | 14 +- libmysqld/libmysqld.c | 2 +- myisam/mi_close.c | 5 +- myisam/mi_delete.c | 9 +- myisam/mi_dynrec.c | 4 +- myisam/mi_keycache.c | 4 +- myisam/mi_page.c | 4 +- myisam/mi_statrec.c | 4 +- myisam/mi_test2.c | 2 +- myisam/mi_write.c | 18 +- myisam/myisampack.c | 15 +- myisammrg/myrg_extra.c | 2 +- mysql-test/include/federated.inc | 2 +- mysql-test/include/sp-vars.inc | 16 +- mysql-test/mysql-test-run.pl | 14 +- mysql-test/mysql-test-run.sh | 20 +-- mysql-test/r/ctype_cp1250_ch.result | 1 + mysql-test/t/create.test | 20 +-- mysql-test/t/csv.test | 12 +- mysql-test/t/ctype_collate.test | 37 ++--- mysql-test/t/ctype_cp1250_ch.test | 4 + mysql-test/t/ctype_ucs.test | 2 +- mysql-test/t/func_sapdb.test | 2 +- mysql-test/t/func_str.test | 20 ++- mysql-test/t/grant.test | 2 +- mysql-test/t/greedy_optimizer.test | 62 +++---- mysql-test/t/group_min_max.test | 156 +++++++++--------- mysql-test/t/innodb.test | 2 +- mysql-test/t/join.test | 58 +++---- mysql-test/t/limit.test | 2 +- mysql-test/t/null.test | 2 +- mysql-test/t/select.test | 4 +- mysql-test/t/sp-prelocking.test | 2 +- mysql-test/t/strict.test | 20 +-- mysql-test/t/subselect.test | 6 +- mysql-test/t/type_newdecimal.test | 4 +- mysql-test/t/view_grant.test | 2 +- mysys/default.c | 6 +- mysys/hash.c | 13 +- mysys/list.c | 2 +- mysys/mf_iocache.c | 9 +- mysys/mf_keycache.c | 32 ++-- mysys/mf_keycaches.c | 4 +- mysys/my_alloc.c | 13 +- mysys/my_dup.c | 2 +- mysys/my_fopen.c | 6 +- mysys/my_fstream.c | 8 +- mysys/my_getwd.c | 2 +- mysys/my_handler.c | 7 + mysys/my_lib.c | 2 +- mysys/my_lread.c | 4 +- mysys/my_lwrite.c | 4 +- mysys/my_malloc.c | 4 +- mysys/my_pread.c | 8 +- mysys/my_read.c | 6 +- mysys/my_realloc.c | 4 +- mysys/my_seek.c | 2 +- mysys/my_write.c | 2 +- mysys/safemalloc.c | 10 +- mysys/thr_lock.c | 15 +- mysys/tree.c | 4 +- mysys/typelib.c | 2 +- ndb/include/logger/LogHandler.hpp | 2 +- ndb/include/ndb_global.h.in | 6 + ndb/include/util/InputStream.hpp | 1 + ndb/include/util/OutputStream.hpp | 1 + ndb/include/util/SimpleProperties.hpp | 7 + ndb/src/common/debugger/EventLogger.cpp | 3 +- .../debugger/signaldata/BackupSignalData.cpp | 3 + ndb/src/common/logger/LogHandler.cpp | 4 +- ndb/src/common/portlib/NdbMutex.c | 4 +- ndb/src/common/portlib/NdbThread.c | 4 +- ndb/src/common/transporter/Transporter.cpp | 4 +- ndb/src/cw/cpcd/CPCD.hpp | 1 + ndb/src/kernel/blocks/backup/Backup.cpp | 128 +++++++------- ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp | 8 +- ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 6 +- ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp | 2 +- ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp | 7 +- ndb/src/kernel/blocks/qmgr/QmgrMain.cpp | 7 +- ndb/src/kernel/error/ErrorReporter.cpp | 1 + ndb/src/kernel/error/ErrorReporter.hpp | 2 +- ndb/src/kernel/error/ndbd_exit_codes.c | 1 + ndb/src/kernel/vm/TransporterCallback.cpp | 2 +- ndb/src/mgmapi/mgmapi.cpp | 4 +- ndb/src/mgmclient/CommandInterpreter.cpp | 4 +- ndb/src/mgmsrv/MgmtSrvr.cpp | 8 +- ndb/src/mgmsrv/Services.cpp | 2 +- ndb/src/ndbapi/ClusterMgr.cpp | 2 +- ndb/src/ndbapi/DictCache.cpp | 3 +- ndb/src/ndbapi/Ndb.cpp | 17 +- ndb/src/ndbapi/NdbOperationDefine.cpp | 4 +- ndb/src/ndbapi/NdbOperationExec.cpp | 2 +- ndb/src/ndbapi/NdbOperationInt.cpp | 4 +- ndb/src/ndbapi/NdbOperationSearch.cpp | 4 +- ndb/src/ndbapi/NdbTransaction.cpp | 4 +- ndb/src/ndbapi/Ndbif.cpp | 4 +- ndb/src/ndbapi/Ndbinit.cpp | 4 +- ndb/src/ndbapi/ndb_cluster_connection.cpp | 2 +- ndb/tools/drop_index.cpp | 3 - ndb/tools/drop_tab.cpp | 3 - ndb/tools/ndb_config.cpp | 2 + regex/regexec.c | 3 +- .../instance-manager/mysql_connection.cc | 20 +-- sql-common/client.c | 10 +- sql-common/my_time.c | 2 +- sql/Makefile.am | 2 + sql/filesort.cc | 2 +- sql/ha_archive.cc | 6 +- sql/ha_federated.cc | 12 +- sql/ha_federated.h | 3 +- sql/ha_innodb.cc | 4 +- sql/ha_myisam.cc | 2 +- sql/ha_myisammrg.cc | 2 +- sql/ha_ndbcluster.cc | 41 ++--- sql/handler.cc | 2 +- sql/item_cmpfunc.cc | 4 +- sql/item_subselect.cc | 2 +- sql/item_timefunc.cc | 3 +- sql/log_event.cc | 15 +- sql/mysqld.cc | 10 +- sql/net_serv.cc | 2 +- sql/opt_range.cc | 20 +-- sql/slave.cc | 17 +- sql/sql_acl.cc | 2 +- sql/sql_cache.cc | 67 ++++---- sql/sql_class.cc | 6 +- sql/sql_class.h | 2 +- sql/sql_delete.cc | 2 +- sql/sql_parse.cc | 12 +- sql/sql_prepare.cc | 12 +- sql/sql_repl.cc | 4 +- sql/sql_select.cc | 2 +- sql/sql_update.cc | 4 +- sql/sql_view.cc | 2 +- sql/strfunc.cc | 2 +- sql/table.cc | 2 +- sql/tztime.cc | 7 +- sql/unireg.cc | 8 +- strings/decimal.c | 2 +- tests/mysql_client_test.c | 7 +- vio/viosocket.c | 16 +- vio/viossl.c | 20 +-- vio/viosslfactories.c | 8 +- 179 files changed, 1007 insertions(+), 968 deletions(-) diff --git a/.bzrignore b/.bzrignore index e167619c1a6..d6086ba4a76 100644 --- a/.bzrignore +++ b/.bzrignore @@ -1322,3 +1322,4 @@ win/vs71cache.txt win/vs8cache.txt zlib/*.ds? zlib/*.vcproj +mysql-test/r/*.warnings diff --git a/BUILD/SETUP.sh b/BUILD/SETUP.sh index 8055f337821..6f3c4222ed2 100755 --- a/BUILD/SETUP.sh +++ b/BUILD/SETUP.sh @@ -56,9 +56,9 @@ global_warnings="-Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wch #debug_extra_warnings="-Wuninitialized" c_warnings="$global_warnings -Wunused" cxx_warnings="$global_warnings -Woverloaded-virtual -Wsign-promo -Wreorder -Wctor-dtor-privacy -Wnon-virtual-dtor" -base_max_configs="--with-innodb --with-berkeley-db --with-ndbcluster --with-archive-storage-engine --with-big-tables --with-blackhole-storage-engine --with-federated-storage-engine --with-csv-storage-engine $SSL_LIBRARY" -base_max_no_ndb_configs="--with-innodb --with-berkeley-db --without-ndbcluster --with-archive-storage-engine --with-big-tables --with-blackhole-storage-engine --with-federated-storage-engine --with-csv-storage-engine $SSL_LIBRARY" -max_leave_isam_configs="--with-innodb --with-berkeley-db --with-ndbcluster --with-archive-storage-engine --with-federated-storage-engine --with-blackhole-storage-engine --with-csv-storage-engine $SSL_LIBRARY --with-embedded-server --with-big-tables" +base_max_configs="--with-innodb --with-ndbcluster --with-archive-storage-engine --with-big-tables --with-blackhole-storage-engine --with-federated-storage-engine --with-csv-storage-engine $SSL_LIBRARY" +base_max_no_ndb_configs="--with-innodb --without-ndbcluster --with-archive-storage-engine --with-big-tables --with-blackhole-storage-engine --with-federated-storage-engine --with-csv-storage-engine $SSL_LIBRARY" +max_leave_isam_configs="--with-innodb --with-ndbcluster --with-archive-storage-engine --with-federated-storage-engine --with-blackhole-storage-engine --with-csv-storage-engine $SSL_LIBRARY --with-embedded-server --with-big-tables" max_configs="$base_max_configs --with-embedded-server" max_no_ndb_configs="$base_max_no_ndb_configs --with-embedded-server" diff --git a/client/client_priv.h b/client/client_priv.h index 9e011144836..7ebbade4ef6 100644 --- a/client/client_priv.h +++ b/client/client_priv.h @@ -51,5 +51,6 @@ enum options_client #endif OPT_TRIGGERS, OPT_IGNORE_TABLE,OPT_INSERT_IGNORE,OPT_SHOW_WARNINGS,OPT_DROP_DATABASE, - OPT_TZ_UTC, OPT_AUTO_CLOSE, OPT_SSL_VERIFY_SERVER_CERT + OPT_TZ_UTC, OPT_AUTO_CLOSE, OPT_SSL_VERIFY_SERVER_CERT, + OPT_DEBUG_INFO }; diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc index c04c2ecabd6..ff4e0b5a5cf 100644 --- a/client/mysqlbinlog.cc +++ b/client/mysqlbinlog.cc @@ -1088,7 +1088,7 @@ could be out of memory"); } if (len < 8 && net->read_pos[0] == 254) break; // end of data - DBUG_PRINT("info",( "len= %u, net->read_pos[5] = %d\n", + DBUG_PRINT("info",( "len: %lu, net->read_pos[5]: %d\n", len, net->read_pos[5])); if (!(ev= Log_event::read_log_event((const char*) net->read_pos + 1 , len - 1, &error_msg, diff --git a/client/mysqldump.c b/client/mysqldump.c index 757bec09b50..b44c3944f41 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -30,14 +30,14 @@ ** master/autocommit code by Brian Aker ** SSL by ** Andrei Errapart -** Tõnu Samuel +** Tõnu Samuel ** XML by Gary Huntress 10/10/01, cleaned up ** and adapted to mysqldump 05/11/01 by Jani Tolonen ** Added --single-transaction option 06/06/2002 by Peter Zaitsev ** 10 Jun 2003: SET NAMES and --no-set-names by Alexander Barkov */ -#define DUMP_VERSION "10.10" +#define DUMP_VERSION "10.11" #include #include @@ -99,7 +99,7 @@ static my_bool verbose= 0, opt_no_create_info= 0, opt_no_data= 0, opt_dump_triggers= 0, opt_routines=0, opt_tz_utc=1; static ulong opt_max_allowed_packet, opt_net_buffer_length; static MYSQL mysql_connection,*mysql=0; -static my_bool insert_pat_inited=0; +static my_bool insert_pat_inited= 0, info_flag; static DYNAMIC_STRING insert_pat; static char *opt_password=0,*current_user=0, *current_host=0,*path=0,*fields_terminated=0, @@ -107,6 +107,7 @@ static char *opt_password=0,*current_user=0, *where=0, *order_by=0, *opt_compatible_mode_str= 0, *err_ptr= 0; +static char **defaults_argv= 0; static char compatible_mode_normal_str[255]; static ulong opt_compatible_mode= 0; #define MYSQL_OPT_MASTER_DATA_EFFECTIVE_SQL 1 @@ -116,7 +117,7 @@ static my_string opt_mysql_unix_port=0; static int first_error=0; static DYNAMIC_STRING extended_row; #include -FILE *md_result_file; +FILE *md_result_file= 0; #ifdef HAVE_SMEM static char *shared_memory_base_name=0; #endif @@ -215,6 +216,8 @@ static struct my_option my_long_options[] = {"debug", '#', "Output debug log", (gptr*) &default_dbug_option, (gptr*) &default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #endif + {"debug-info", OPT_DEBUG_INFO, "Print some debug info at exit.", (gptr*) &info_flag, + (gptr*) &info_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"default-character-set", OPT_DEFAULT_CHARSET, "Set the default character set.", (gptr*) &default_charset, (gptr*) &default_charset, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, @@ -628,14 +631,6 @@ byte* get_table_key(const char *entry, uint *length, } -void init_table_rule_hash(HASH* h) -{ - if (hash_init(h, charset_info, 16, 0, 0, - (hash_get_key) get_table_key, - (hash_free_key) free_table_ent, 0)) - exit(EX_EOM); -} - static my_bool get_one_option(int optid, const struct my_option *opt __attribute__((unused)), char *argument) @@ -678,6 +673,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), break; case '#': DBUG_PUSH(argument ? argument : default_dbug_option); + info_flag= 1; break; #include case 'V': print_version(); exit(0); @@ -718,9 +714,6 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), fprintf(stderr, "Illegal use of option --ignore-table=.\n"); exit(1); } - if (!hash_inited(&ignore_table)) - init_table_rule_hash(&ignore_table); - if (my_hash_insert(&ignore_table, (byte*)my_strdup(argument, MYF(0)))) exit(EX_EOM); break; @@ -796,9 +789,21 @@ static int get_options(int *argc, char ***argv) md_result_file= stdout; load_defaults("my",load_default_groups,argc,argv); + defaults_argv= *argv; - if ((ho_error=handle_options(argc, argv, my_long_options, get_one_option))) - exit(ho_error); + if (hash_init(&ignore_table, charset_info, 16, 0, 0, + (hash_get_key) get_table_key, + (hash_free_key) free_table_ent, 0)) + return(EX_EOM); + /* Don't copy cluster internal log tables */ + if (my_hash_insert(&ignore_table, + (byte*) my_strdup("mysql.apply_status", MYF(MY_WME))) || + my_hash_insert(&ignore_table, + (byte*) my_strdup("mysql.schema", MYF(MY_WME)))) + return(EX_EOM); + + if ((ho_error= handle_options(argc, argv, my_long_options, get_one_option))) + return(ho_error); *mysql_params->p_max_allowed_packet= opt_max_allowed_packet; *mysql_params->p_net_buffer_length= opt_net_buffer_length; @@ -810,7 +815,7 @@ static int get_options(int *argc, char ***argv) { fprintf(stderr, "%s: You must use option --tab with --fields-...\n", my_progname); - return(1); + return(EX_USAGE); } /* Ensure consistency of the set of binlog & locking options */ @@ -820,7 +825,7 @@ static int get_options(int *argc, char ***argv) { fprintf(stderr, "%s: You can't use --single-transaction and " "--lock-all-tables at the same time.\n", my_progname); - return(1); + return(EX_USAGE); } if (opt_master_data) opt_lock_all_tables= !opt_single_transaction; @@ -829,14 +834,14 @@ static int get_options(int *argc, char ***argv) if (enclosed && opt_enclosed) { fprintf(stderr, "%s: You can't use ..enclosed.. and ..optionally-enclosed.. at the same time.\n", my_progname); - return(1); + return(EX_USAGE); } if ((opt_databases || opt_alldbs) && path) { fprintf(stderr, "%s: --databases or --all-databases can't be used with --tab.\n", my_progname); - return(1); + return(EX_USAGE); } if (strcmp(default_charset, charset_info->csname) && !(charset_info= get_charset_by_csname(default_charset, @@ -845,7 +850,7 @@ static int get_options(int *argc, char ***argv) if ((*argc < 1 && !opt_alldbs) || (*argc > 0 && opt_alldbs)) { short_usage(); - return 1; + return EX_USAGE; } if (tty_password) opt_password=get_tty_password(NullS); @@ -920,6 +925,23 @@ static FILE* open_sql_file_for_table(const char* table) } +static void free_resources() +{ + if (md_result_file && md_result_file != stdout) + my_fclose(md_result_file, MYF(0)); + my_free(opt_password, MYF(MY_ALLOW_ZERO_PTR)); + if (hash_inited(&ignore_table)) + hash_free(&ignore_table); + if (extended_insert) + dynstr_free(&extended_row); + if (insert_pat_inited) + dynstr_free(&insert_pat); + if (defaults_argv) + free_defaults(defaults_argv); + my_end(info_flag ? MY_CHECK_ERROR : 0); +} + + static void safe_exit(int error) { if (!first_error) @@ -928,18 +950,19 @@ static void safe_exit(int error) return; if (mysql) mysql_close(mysql); + free_resources(); exit(error); } -/* safe_exit */ /* -** dbConnect -- connects to the host and selects DB. + db_connect -- connects to the host and selects DB. */ -static int dbConnect(char *host, char *user,char *passwd) + +static int connect_to_db(char *host, char *user,char *passwd) { char buff[20+FN_REFLEN]; - DBUG_ENTER("dbConnect"); + DBUG_ENTER("connect_to_db"); verbose_msg("-- Connecting to %s...\n", host ? host : "localhost"); mysql_init(&mysql_connection); @@ -960,11 +983,11 @@ static int dbConnect(char *host, char *user,char *passwd) #endif mysql_options(&mysql_connection, MYSQL_SET_CHARSET_NAME, default_charset); if (!(mysql= mysql_real_connect(&mysql_connection,host,user,passwd, - NULL,opt_mysql_port,opt_mysql_unix_port, - 0))) + NULL,opt_mysql_port,opt_mysql_unix_port, + 0))) { DB_error(&mysql_connection, "when trying to connect"); - return 1; + DBUG_RETURN(1); } /* Don't dump SET NAMES with a pre-4.1 server (bug#7997). @@ -981,7 +1004,7 @@ static int dbConnect(char *host, char *user,char *passwd) if (mysql_query_with_error_report(mysql, 0, buff)) { safe_exit(EX_MYSQLERR); - return 1; + DBUG_RETURN(1); } /* set time_zone to UTC to allow dumping date types between servers with @@ -993,11 +1016,11 @@ static int dbConnect(char *host, char *user,char *passwd) if (mysql_query_with_error_report(mysql, 0, buff)) { safe_exit(EX_MYSQLERR); - return 1; + DBUG_RETURN(1); } } - return 0; -} /* dbConnect */ + DBUG_RETURN(0); +} /* connect_to_db */ /* @@ -1519,7 +1542,11 @@ static uint get_table_structure(char *table, char *db, char *table_type, { complete_insert= opt_complete_insert; if (!insert_pat_inited) - insert_pat_inited= init_dynamic_string(&insert_pat, "", 1024, 1024); + { + insert_pat_inited= 1; + if (init_dynamic_string(&insert_pat, "", 1024, 1024)) + safe_exit(EX_MYSQLERR); + } else dynstr_set(&insert_pat, ""); } @@ -1988,7 +2015,7 @@ continue_xml: */ -static void dump_triggers_for_table (char *table, char *db) +static void dump_triggers_for_table(char *table, char *db) { char *result_table; char name_buff[NAME_LEN*4+3], table_buff[NAME_LEN*2+3]; @@ -2693,6 +2720,8 @@ static int dump_databases(char **db_names) { int result=0; char **db; + DBUG_ENTER("dump_databases"); + for (db= db_names ; *db ; db++) { if (dump_all_tables_in_db(*db)) @@ -2706,7 +2735,7 @@ static int dump_databases(char **db_names) result=1; } } - return result; + DBUG_RETURN(result); } /* dump_databases */ @@ -2721,7 +2750,7 @@ RETURN VALUES 0 Success. 1 Failure. */ -int init_dumping_views(char *qdatabase) +int init_dumping_views(char *qdatabase __attribute__((unused))) { return 0; } /* init_dumping_views */ @@ -2818,12 +2847,11 @@ static int init_dumping(char *database, int init_func(char*)) } /* init_dumping */ +/* Return 1 if we should copy the table */ + my_bool include_table(byte* hash_key, uint len) { - if (hash_search(&ignore_table, (byte*) hash_key, len)) - return FALSE; - - return TRUE; + return !hash_search(&ignore_table, (byte*) hash_key, len); } @@ -2832,10 +2860,10 @@ static int dump_all_tables_in_db(char *database) char *table; uint numrows; char table_buff[NAME_LEN*2+3]; - char hash_key[2*NAME_LEN+2]; /* "db.tablename" */ char *afterdot; int using_mysql_db= my_strcasecmp(&my_charset_latin1, database, "mysql"); + DBUG_ENTER("dump_all_tables_in_db"); afterdot= strmov(hash_key, database); *afterdot++= '.'; @@ -3257,7 +3285,6 @@ static void print_value(FILE *file, MYSQL_RES *result, MYSQL_ROW row, /* - SYNOPSIS Check if we the table is one of the table types that should be ignored: @@ -3297,8 +3324,8 @@ char check_if_ignore_table(const char *table_name, char *table_type) { if (mysql_errno(mysql) != ER_PARSE_ERROR) { /* If old MySQL version */ - verbose_msg("-- Warning: Couldn't get status information for " \ - "table %s (%s)\n", table_name,mysql_error(mysql)); + verbose_msg("-- Warning: Couldn't get status information for " + "table %s (%s)\n", table_name, mysql_error(mysql)); DBUG_RETURN(result); /* assume table is ok */ } } @@ -3653,19 +3680,24 @@ static my_bool get_view_structure(char *table, char* db) int main(int argc, char **argv) { + int exit_code; MY_INIT("mysqldump"); compatible_mode_normal_str[0]= 0; default_charset= (char *)mysql_universal_client_charset; bzero((char*) &ignore_table, sizeof(ignore_table)); - if (get_options(&argc, &argv)) + exit_code= get_options(&argc, &argv); + if (exit_code) { - my_end(0); - exit(EX_USAGE); + free_resources(0); + exit(exit_code); } - if (dbConnect(current_host, current_user, opt_password)) + if (connect_to_db(current_host, current_user, opt_password)) + { + free_resources(0); exit(EX_MYSQLERR); + } if (!path) write_header(md_result_file, *argv); @@ -3712,15 +3744,6 @@ err: dbDisconnect(current_host); if (!path) write_footer(md_result_file); - if (md_result_file != stdout) - my_fclose(md_result_file, MYF(0)); - my_free(opt_password, MYF(MY_ALLOW_ZERO_PTR)); - if (hash_inited(&ignore_table)) - hash_free(&ignore_table); - if (extended_insert) - dynstr_free(&extended_row); - if (insert_pat_inited) - dynstr_free(&insert_pat); - my_end(0); + free_resources(); return(first_error); } /* main */ diff --git a/cmd-line-utils/readline/bind.c b/cmd-line-utils/readline/bind.c index 17f61d1df08..ab1136c7da5 100644 --- a/cmd-line-utils/readline/bind.c +++ b/cmd-line-utils/readline/bind.c @@ -434,7 +434,7 @@ rl_translate_keyseq (seq, array, len) { register int i, c, l, temp; - for (i = l = 0; c = seq[i]; i++) + for (i = l = 0; (c = seq[i]); i++) { if (c == '\\') { @@ -765,8 +765,8 @@ _rl_read_file (filename, sizep) /* Re-read the current keybindings file. */ int -rl_re_read_init_file (count, ignore) - int count, ignore; +rl_re_read_init_file (int count __attribute__((unused)), + int ignore __attribute__((unused))) { int r; r = rl_read_init_file ((const char *)NULL); @@ -987,8 +987,7 @@ parser_if (args) /* Invert the current parser state if there is anything on the stack. */ static int -parser_else (args) - char *args; +parser_else (char *args __attribute__((unused))) { register int i; @@ -1018,8 +1017,7 @@ parser_else (args) /* Terminate a conditional, popping the value of _rl_parsing_conditionalized_out from the stack. */ static int -parser_endif (args) - char *args; +parser_endif (char *args __attribute__((unused))) { if (if_stack_depth) _rl_parsing_conditionalized_out = if_stack[--if_stack_depth]; @@ -1142,7 +1140,7 @@ rl_parse_and_bind (string) { int passc = 0; - for (i = 1; c = string[i]; i++) + for (i = 1; (c = string[i]); i++) { if (passc) { @@ -1218,7 +1216,7 @@ rl_parse_and_bind (string) { int delimiter = string[i++], passc; - for (passc = 0; c = string[i]; i++) + for (passc = 0; (c = string[i]); i++) { if (passc) { @@ -1377,7 +1375,7 @@ static struct { #if defined (VISIBLE_STATS) { "visible-stats", &rl_visible_stats, 0 }, #endif /* VISIBLE_STATS */ - { (char *)NULL, (int *)NULL } + { (char *)NULL, (int *)NULL, 0 } }; static int @@ -1446,7 +1444,7 @@ static struct { { "editing-mode", V_STRING, sv_editmode }, { "isearch-terminators", V_STRING, sv_isrchterm }, { "keymap", V_STRING, sv_keymap }, - { (char *)NULL, 0 } + { (char *)NULL, 0, 0 } }; static int @@ -1466,7 +1464,7 @@ find_string_var (name) values result in 0 (false). */ static int bool_to_int (value) - char *value; +const char *value; { return (value == 0 || *value == '\0' || (_rl_stricmp (value, "on") == 0) || @@ -1725,13 +1723,13 @@ char * rl_get_keymap_name_from_edit_mode () { if (rl_editing_mode == emacs_mode) - return "emacs"; + return (char*) "emacs"; #if defined (VI_MODE) else if (rl_editing_mode == vi_mode) - return "vi"; + return (char*) "vi"; #endif /* VI_MODE */ else - return "none"; + return (char*) "none"; } /* **************************************************************** */ @@ -1966,7 +1964,7 @@ rl_function_dumper (print_readably) fprintf (rl_outstream, "\n"); - for (i = 0; name = names[i]; i++) + for (i = 0; (name = names[i]); i++) { rl_command_func_t *function; char **invokers; @@ -2025,8 +2023,8 @@ rl_function_dumper (print_readably) rl_outstream. If an explicit argument is given, then print the output in such a way that it can be read back in. */ int -rl_dump_functions (count, key) - int count, key; +rl_dump_functions (int count __attribute__((unused)), + int key __attribute__((unused))) { if (rl_dispatching) fprintf (rl_outstream, "\r\n"); @@ -2105,8 +2103,7 @@ rl_macro_dumper (print_readably) } int -rl_dump_macros (count, key) - int count, key; +rl_dump_macros(int count __attribute__((unused)), int key __attribute__((unused))) { if (rl_dispatching) fprintf (rl_outstream, "\r\n"); @@ -2195,8 +2192,7 @@ rl_variable_dumper (print_readably) rl_outstream. If an explicit argument is given, then print the output in such a way that it can be read back in. */ int -rl_dump_variables (count, key) - int count, key; +rl_dump_variables(int count __attribute__((unused)), int key __attribute__((unused))) { if (rl_dispatching) fprintf (rl_outstream, "\r\n"); diff --git a/cmd-line-utils/readline/chardefs.h b/cmd-line-utils/readline/chardefs.h index cb04c982343..04a3b7a8e9c 100644 --- a/cmd-line-utils/readline/chardefs.h +++ b/cmd-line-utils/readline/chardefs.h @@ -59,7 +59,11 @@ #define largest_char 255 /* Largest character value. */ #define CTRL_CHAR(c) ((c) < control_character_threshold && (((c) & 0x80) == 0)) +#if largest_char >= 255 +#define META_CHAR(c) ((c) > meta_character_threshold) +#else #define META_CHAR(c) ((c) > meta_character_threshold && (c) <= largest_char) +#endif #define CTRL(c) ((c) & control_character_mask) #define META(c) ((c) | meta_character_bit) diff --git a/cmd-line-utils/readline/complete.c b/cmd-line-utils/readline/complete.c index df0a698b81f..f4c361789b7 100644 --- a/cmd-line-utils/readline/complete.c +++ b/cmd-line-utils/readline/complete.c @@ -360,15 +360,15 @@ rl_complete (ignore, invoking_key) /* List the possible completions. See description of rl_complete (). */ int -rl_possible_completions (ignore, invoking_key) - int ignore, invoking_key; +rl_possible_completions (int ignore __attribute__((unused)), + int invoking_key __attribute__((unused))) { return (rl_complete_internal ('?')); } int -rl_insert_completions (ignore, invoking_key) - int ignore, invoking_key; +rl_insert_completions (int ignore __attribute__((unused)), + int invoking_key __attribute__((unused))) { return (rl_complete_internal ('*')); } @@ -760,10 +760,7 @@ print_filename (to_print, full_pathname) } static char * -rl_quote_filename (s, rtype, qcp) - char *s; - int rtype; - char *qcp; +rl_quote_filename (char *s, int rtype __attribute__((unused)), char *qcp) { char *r; @@ -871,7 +868,7 @@ _rl_find_completion_word (fp, dp) completion, so use the word break characters to find the substring on which to complete. */ #if defined (HANDLE_MULTIBYTE) - while (rl_point = _rl_find_prev_mbchar (rl_line_buffer, rl_point, MB_FIND_ANY)) + while ((rl_point = _rl_find_prev_mbchar (rl_line_buffer, rl_point, MB_FIND_ANY))) #else while (--rl_point) #endif @@ -1805,7 +1802,7 @@ rl_completion_matches (text, entry_function) match_list = (char **)xmalloc ((match_list_size + 1) * sizeof (char *)); match_list[1] = (char *)NULL; - while (string = (*entry_function) (text, matches)) + while ((string = (*entry_function) (text, matches))) { if (matches + 1 == match_list_size) match_list = (char **)xrealloc @@ -1855,7 +1852,7 @@ rl_username_completion_function (text, state) setpwent (); } - while (entry = getpwent ()) + while ((entry = getpwent ())) { /* Null usernames should result in all users as possible completions. */ if (namelen == 0 || (STREQN (username, entry->pw_name, namelen))) @@ -2091,8 +2088,7 @@ rl_filename_completion_function (text, state) hit the end of the match list, we restore the original unmatched text, ring the bell, and reset the counter to zero. */ int -rl_menu_complete (count, ignore) - int count, ignore; +rl_menu_complete (int count, int ignore __attribute__((unused))) { rl_compentry_func_t *our_func; int matching_filenames, found_quote; diff --git a/cmd-line-utils/readline/display.c b/cmd-line-utils/readline/display.c index 06eaa5e4be2..46b57325e33 100644 --- a/cmd-line-utils/readline/display.c +++ b/cmd-line-utils/readline/display.c @@ -218,7 +218,7 @@ expand_prompt (pmt, lp, lip, niflp, vlp) if (niflp) *niflp = 0; if (vlp) - *vlp = lp ? *lp : strlen (r); + *vlp = lp ? *lp : (int) strlen (r); return r; } @@ -435,7 +435,7 @@ rl_redisplay () return; if (!rl_display_prompt) - rl_display_prompt = ""; + rl_display_prompt = (char*) ""; if (invisible_line == 0) { @@ -757,7 +757,7 @@ rl_redisplay () c_pos = out; lb_linenum = newlines; } - for (i = in; i < in+wc_bytes; i++) + for (i = in; i < (int) (in+wc_bytes); i++) line[out++] = rl_line_buffer[i]; for (i = 0; i < wc_width; i++) CHECK_LPOS(); @@ -835,7 +835,7 @@ rl_redisplay () #define VIS_LLEN(l) ((l) > _rl_vis_botlin ? 0 : (vis_lbreaks[l+1] - vis_lbreaks[l])) #define INV_LLEN(l) (inv_lbreaks[l+1] - inv_lbreaks[l]) #define VIS_CHARS(line) (visible_line + vis_lbreaks[line]) -#define VIS_LINE(line) ((line) > _rl_vis_botlin) ? "" : VIS_CHARS(line) +#define VIS_LINE(line) ((line) > _rl_vis_botlin) ? (char*) "" : VIS_CHARS(line) #define INV_LINE(line) (invisible_line + inv_lbreaks[line]) /* For each line in the buffer, do the updating display. */ @@ -876,7 +876,7 @@ rl_redisplay () _rl_move_vert (linenum); _rl_move_cursor_relative (0, tt); _rl_clear_to_eol - ((linenum == _rl_vis_botlin) ? strlen (tt) : _rl_screenwidth); + ((linenum == _rl_vis_botlin) ? (int) strlen (tt) : _rl_screenwidth); } } _rl_vis_botlin = inv_botlin; @@ -1086,7 +1086,7 @@ update_line (old, new, current_line, omax, nmax, inv_botlin) int col_lendiff, col_temp; #if defined (HANDLE_MULTIBYTE) mbstate_t ps_new, ps_old; - int new_offset, old_offset, tmp; + int new_offset, old_offset; #endif /* If we're at the right edge of a terminal that supports xn, we're @@ -1837,7 +1837,7 @@ rl_reset_line_state () { rl_on_new_line (); - rl_display_prompt = rl_prompt ? rl_prompt : ""; + rl_display_prompt = rl_prompt ? rl_prompt : (char*) ""; forced_display = 1; return 0; } @@ -2212,7 +2212,7 @@ _rl_col_width (str, start, end) int start, end; { wchar_t wc; - mbstate_t ps = {0}; + mbstate_t ps; int tmp, point, width, max; if (end <= start) @@ -2221,6 +2221,7 @@ _rl_col_width (str, start, end) point = 0; max = end; + memset (&ps, 0, sizeof(ps)); while (point < start) { tmp = mbrlen (str + point, max, &ps); diff --git a/cmd-line-utils/readline/histexpand.c b/cmd-line-utils/readline/histexpand.c index 47f97e9a6f7..a09be00a859 100644 --- a/cmd-line-utils/readline/histexpand.c +++ b/cmd-line-utils/readline/histexpand.c @@ -87,14 +87,14 @@ char history_comment_char = '\0'; /* The list of characters which inhibit the expansion of text if found immediately following history_expansion_char. */ -char *history_no_expand_chars = " \t\n\r="; +char *history_no_expand_chars = (char*) " \t\n\r="; /* If set to a non-zero value, single quotes inhibit history expansion. The default is 0. */ int history_quotes_inhibit_expansion = 0; /* Used to split words by history_tokenize_internal. */ -char *history_word_delimiters = HISTORY_WORD_DELIMITERS; +char *history_word_delimiters = (char*) HISTORY_WORD_DELIMITERS; /* If set, this points to a function that is called to verify that a particular history expansion should be performed. */ @@ -203,7 +203,7 @@ get_history_event (string, caller_index, delimiting_quote) } /* Only a closing `?' or a newline delimit a substring search string. */ - for (local_index = i; c = string[i]; i++) + for (local_index = i; (c = string[i]); i++) #if defined (HANDLE_MULTIBYTE) if (MB_CUR_MAX > 1 && rl_byte_oriented == 0) { diff --git a/cmd-line-utils/readline/input.c b/cmd-line-utils/readline/input.c index 1981061eac6..b2f8016050d 100644 --- a/cmd-line-utils/readline/input.c +++ b/cmd-line-utils/readline/input.c @@ -405,7 +405,7 @@ rl_read_key () else { /* If input is coming from a macro, then use that. */ - if (c = _rl_next_macro_key ()) + if ((c= _rl_next_macro_key ())) return (c); /* If the user has an event function, then call it periodically. */ diff --git a/cmd-line-utils/readline/isearch.c b/cmd-line-utils/readline/isearch.c index f7b0f1404e9..9071695dda8 100644 --- a/cmd-line-utils/readline/isearch.c +++ b/cmd-line-utils/readline/isearch.c @@ -68,7 +68,7 @@ static char *prev_line_found; static char *last_isearch_string; static int last_isearch_string_len; -static char *default_isearch_terminators = "\033\012"; +static char *default_isearch_terminators = (char*) "\033\012"; /* Search backwards through the history looking for a string which is typed interactively. Start with the current line. */ @@ -94,9 +94,8 @@ rl_forward_search_history (sign, key) WHERE is the history list number of the current line. If it is -1, then this line is the starting one. */ static void -rl_display_search (search_string, reverse_p, where) - char *search_string; - int reverse_p, where; +rl_display_search (char *search_string, int reverse_p, + int where __attribute__((unused))) { char *message; int msglen, searchlen; @@ -143,8 +142,7 @@ rl_display_search (search_string, reverse_p, where) DIRECTION is which direction to search; >= 0 means forward, < 0 means backwards. */ static int -rl_search_history (direction, invoking_key) - int direction, invoking_key; +rl_search_history (int direction, int invoking_key __attribute__((unused))) { /* The string that the user types in to search for. */ char *search_string; diff --git a/cmd-line-utils/readline/kill.c b/cmd-line-utils/readline/kill.c index 061bdafcf9a..4d31a8ff170 100644 --- a/cmd-line-utils/readline/kill.c +++ b/cmd-line-utils/readline/kill.c @@ -76,8 +76,7 @@ static int rl_yank_nth_arg_internal PARAMS((int, int, int)); /* How to say that you only want to save a certain amount of kill material. */ int -rl_set_retained_kills (num) - int num; +rl_set_retained_kills (int num __attribute__((unused))) { return 0; } @@ -293,8 +292,8 @@ rl_backward_kill_line (direction, ignore) /* Kill the whole line, no matter where point is. */ int -rl_kill_full_line (count, ignore) - int count, ignore; +rl_kill_full_line (int count __attribute__((unused)), + int ignore __attribute__((unused))) { rl_begin_undo_group (); rl_point = 0; @@ -311,8 +310,7 @@ rl_kill_full_line (count, ignore) /* This does what C-w does in Unix. We can't prevent people from using behaviour that they expect. */ int -rl_unix_word_rubout (count, key) - int count, key; +rl_unix_word_rubout (int count, int key __attribute__((unused))) { int orig_point; @@ -344,8 +342,7 @@ rl_unix_word_rubout (count, key) /* This deletes one filename component in a Unix pathname. That is, it deletes backward to directory separator (`/') or whitespace. */ int -rl_unix_filename_rubout (count, key) - int count, key; +rl_unix_filename_rubout (int count, int key __attribute__((unused))) { int orig_point, c; @@ -388,8 +385,8 @@ rl_unix_filename_rubout (count, key) into the line at all, and if you aren't, then you know what you are doing. */ int -rl_unix_line_discard (count, key) - int count, key; +rl_unix_line_discard (int count __attribute__((unused)), + int key __attribute__((unused))) { if (rl_point == 0) rl_ding (); @@ -425,16 +422,16 @@ region_kill_internal (delete) /* Copy the text in the region to the kill ring. */ int -rl_copy_region_to_kill (count, ignore) - int count, ignore; +rl_copy_region_to_kill (int count __attribute__((unused)), + int key __attribute__((unused))) { return (region_kill_internal (0)); } /* Kill the text between the point and mark. */ int -rl_kill_region (count, ignore) - int count, ignore; +rl_kill_region (int count __attribute__((unused)), + int ignore __attribute__((unused))) { int r, npoint; @@ -498,8 +495,7 @@ rl_copy_backward_word (count, key) /* Yank back the last killed text. This ignores arguments. */ int -rl_yank (count, ignore) - int count, ignore; +rl_yank (int count __attribute__((unused)), int ignore __attribute__((unused))) { if (rl_kill_ring == 0) { @@ -517,8 +513,7 @@ rl_yank (count, ignore) delete that text from the line, rotate the index down, and yank back some other text. */ int -rl_yank_pop (count, key) - int count, key; +rl_yank_pop (int count __attribute__((unused)), int key __attribute__((unused))) { int l, n; diff --git a/cmd-line-utils/readline/macro.c b/cmd-line-utils/readline/macro.c index f7b77a831b8..8727285e181 100644 --- a/cmd-line-utils/readline/macro.c +++ b/cmd-line-utils/readline/macro.c @@ -189,8 +189,8 @@ _rl_kill_kbd_macro () definition to the end of the existing macro, and start by re-executing the existing macro. */ int -rl_start_kbd_macro (ignore1, ignore2) - int ignore1, ignore2; +rl_start_kbd_macro (int ignore1 __attribute__((unused)), + int ignore2 __attribute__((unused))) { if (RL_ISSTATE (RL_STATE_MACRODEF)) { @@ -214,8 +214,7 @@ rl_start_kbd_macro (ignore1, ignore2) A numeric argument says to execute the macro right now, that many times, counting the definition as the first time. */ int -rl_end_kbd_macro (count, ignore) - int count, ignore; +rl_end_kbd_macro (int count, int ignore __attribute__((unused))) { if (RL_ISSTATE (RL_STATE_MACRODEF) == 0) { @@ -234,8 +233,7 @@ rl_end_kbd_macro (count, ignore) /* Execute the most recently defined keyboard macro. COUNT says how many times to execute it. */ int -rl_call_last_kbd_macro (count, ignore) - int count, ignore; +rl_call_last_kbd_macro (int count, int ignore __attribute__((unused))) { if (current_macro == 0) _rl_abort_internal (); diff --git a/cmd-line-utils/readline/misc.c b/cmd-line-utils/readline/misc.c index 810b940edab..c8739d0d750 100644 --- a/cmd-line-utils/readline/misc.c +++ b/cmd-line-utils/readline/misc.c @@ -154,8 +154,7 @@ rl_digit_loop () /* Add the current digit to the argument in progress. */ int -rl_digit_argument (ignore, key) - int ignore, key; +rl_digit_argument (int ignore __attribute__((unused)), int key) { rl_execute_next (key); return (rl_digit_loop ()); @@ -184,8 +183,8 @@ _rl_init_argument () Read a key. If the key has nothing to do with arguments, then dispatch on it. If the key is the abort character then abort. */ int -rl_universal_argument (count, key) - int count, key; +rl_universal_argument (int count __attribute__((unused)), + int key __attribute__((unused))) { rl_numeric_arg *= 4; return (rl_digit_loop ()); @@ -314,9 +313,7 @@ _rl_history_set_point () } void -rl_replace_from_history (entry, flags) - HIST_ENTRY *entry; - int flags; /* currently unused */ +rl_replace_from_history (HIST_ENTRY *entry, int flags __attribute__((unused))) { /* Can't call with `1' because rl_undo_list might point to an undo list from a history entry, just like we're setting up here. */ @@ -342,16 +339,15 @@ rl_replace_from_history (entry, flags) /* Meta-< goes to the start of the history. */ int -rl_beginning_of_history (count, key) - int count, key; +rl_beginning_of_history (int count __attribute__((unused)), int key) { return (rl_get_previous_history (1 + where_history (), key)); } /* Meta-> goes to the end of the history. (The current line). */ int -rl_end_of_history (count, key) - int count, key; +rl_end_of_history (int count __attribute__((unused)), + int key __attribute__((unused))) { rl_maybe_replace_line (); using_history (); @@ -455,8 +451,7 @@ rl_get_previous_history (count, key) /* **************************************************************** */ /* How to toggle back and forth between editing modes. */ int -rl_vi_editing_mode (count, key) - int count, key; +rl_vi_editing_mode (int count __attribute__((unused)), int key) { #if defined (VI_MODE) _rl_set_insert_mode (RL_IM_INSERT, 1); /* vi mode ignores insert mode */ @@ -468,8 +463,8 @@ rl_vi_editing_mode (count, key) } int -rl_emacs_editing_mode (count, key) - int count, key; +rl_emacs_editing_mode (int count __attribute__((unused)), + int key __attribute__((unused))) { rl_editing_mode = emacs_mode; _rl_set_insert_mode (RL_IM_INSERT, 1); /* emacs mode default is insert mode */ @@ -479,8 +474,7 @@ rl_emacs_editing_mode (count, key) /* Function for the rest of the library to use to set insert/overwrite mode. */ void -_rl_set_insert_mode (im, force) - int im, force; +_rl_set_insert_mode (int im, int force __attribute__((unused))) { #ifdef CURSOR_MODE _rl_set_cursor (im, force); @@ -492,8 +486,7 @@ _rl_set_insert_mode (im, force) /* Toggle overwrite mode. A positive explicit argument selects overwrite mode. A negative or zero explicit argument selects insert mode. */ int -rl_overwrite_mode (count, key) - int count, key; +rl_overwrite_mode (int count, int key __attribute__((unused))) { if (rl_explicit_arg == 0) _rl_set_insert_mode (rl_insert_mode ^ 1, 0); diff --git a/cmd-line-utils/readline/nls.c b/cmd-line-utils/readline/nls.c index 4f28152f316..73ad0227195 100644 --- a/cmd-line-utils/readline/nls.c +++ b/cmd-line-utils/readline/nls.c @@ -111,7 +111,7 @@ _rl_init_eightbit () if (lspec == 0 || *lspec == 0) lspec = setlocale (LC_CTYPE, (char *)NULL); if (lspec == 0) - lspec = ""; + lspec = (char*) ""; t = setlocale (LC_CTYPE, lspec); if (t && *t && (t[0] != 'C' || t[1]) && (STREQ (t, "POSIX") == 0)) diff --git a/cmd-line-utils/readline/readline.c b/cmd-line-utils/readline/readline.c index e82db84c9dc..dd3724a86d7 100644 --- a/cmd-line-utils/readline/readline.c +++ b/cmd-line-utils/readline/readline.c @@ -83,7 +83,9 @@ static void bind_arrow_keys_internal PARAMS((Keymap)); static void bind_arrow_keys PARAMS((void)); static void readline_default_bindings PARAMS((void)); +#ifdef NOT_USED static void reset_default_bindings PARAMS((void)); +#endif /* **************************************************************** */ /* */ @@ -866,12 +868,14 @@ readline_default_bindings () /* Reset the default bindings for the terminal special characters we're interested in back to rl_insert and read the new ones. */ +#ifdef NOT_USED static void reset_default_bindings () { rl_tty_unset_default_bindings (_rl_keymap); rl_tty_set_default_bindings (_rl_keymap); } +#endif /* Bind some common arrow key sequences in MAP. */ static void diff --git a/cmd-line-utils/readline/rltty.c b/cmd-line-utils/readline/rltty.c index 3e9c71c8df1..ffbae1e08af 100644 --- a/cmd-line-utils/readline/rltty.c +++ b/cmd-line-utils/readline/rltty.c @@ -716,8 +716,7 @@ rl_deprep_terminal () /* **************************************************************** */ int -rl_restart_output (count, key) - int count, key; +rl_restart_output(int count __attribute__((unused)), int key __attribute__((unused))) { int fildes = fileno (rl_outstream); #if defined (TIOCSTART) @@ -749,8 +748,7 @@ rl_restart_output (count, key) } int -rl_stop_output (count, key) - int count, key; +rl_stop_output(int count __attribute__((unused)), int key __attribute__((unused))) { int fildes = fileno (rl_instream); @@ -867,7 +865,6 @@ rltty_set_default_bindings (kmap) { TIOTYPE ttybuff; int tty; - static int called = 0; tty = fileno (rl_instream); diff --git a/cmd-line-utils/readline/search.c b/cmd-line-utils/readline/search.c index 1878d2bf031..6479427be2f 100644 --- a/cmd-line-utils/readline/search.c +++ b/cmd-line-utils/readline/search.c @@ -303,8 +303,7 @@ noninc_search (dir, pchar) /* Search forward through the history list for a string. If the vi-mode code calls this, KEY will be `?'. */ int -rl_noninc_forward_search (count, key) - int count, key; +rl_noninc_forward_search (int count __attribute__((unused)), int key) { noninc_search (1, (key == '?') ? '?' : 0); return 0; @@ -313,8 +312,7 @@ rl_noninc_forward_search (count, key) /* Reverse search the history list for a string. If the vi-mode code calls this, KEY will be `/'. */ int -rl_noninc_reverse_search (count, key) - int count, key; +rl_noninc_reverse_search (int count __attribute__((unused)), int key) { noninc_search (-1, (key == '/') ? '/' : 0); return 0; @@ -323,8 +321,8 @@ rl_noninc_reverse_search (count, key) /* Search forward through the history list for the last string searched for. If there is no saved search string, abort. */ int -rl_noninc_forward_search_again (count, key) - int count, key; +rl_noninc_forward_search_again (int count __attribute__((unused)), + int key __attribute__((unused))) { if (!noninc_search_string) { @@ -338,8 +336,8 @@ rl_noninc_forward_search_again (count, key) /* Reverse search in the history list for the last string searched for. If there is no saved search string, abort. */ int -rl_noninc_reverse_search_again (count, key) - int count, key; +rl_noninc_reverse_search_again (int count __attribute__((unused)), + int key __attribute__((unused))) { if (!noninc_search_string) { diff --git a/cmd-line-utils/readline/terminal.c b/cmd-line-utils/readline/terminal.c index 3545fce5b85..4b900c5d860 100644 --- a/cmd-line-utils/readline/terminal.c +++ b/cmd-line-utils/readline/terminal.c @@ -344,7 +344,7 @@ get_term_capabilities (bp) #if !defined (__DJGPP__) /* XXX - doesn't DJGPP have a termcap library? */ register int i; - for (i = 0; i < NUM_TC_STRINGS; i++) + for (i = 0; i < (int) NUM_TC_STRINGS; i++) *(tc_strings[i].tc_value) = tgetstr ((char *)tc_strings[i].tc_var, bp); #endif tcap_initialized = 1; @@ -410,7 +410,7 @@ _rl_init_terminal_io (terminal_name) /* Everything below here is used by the redisplay code (tputs). */ _rl_screenchars = _rl_screenwidth * _rl_screenheight; - _rl_term_cr = "\r"; + _rl_term_cr = (char*) "\r"; _rl_term_im = _rl_term_ei = _rl_term_ic = _rl_term_IC = (char *)NULL; _rl_term_up = _rl_term_dc = _rl_term_DC = _rl_visible_bell = (char *)NULL; _rl_term_ku = _rl_term_kd = _rl_term_kl = _rl_term_kr = (char *)NULL; @@ -427,7 +427,7 @@ _rl_init_terminal_io (terminal_name) tgoto if _rl_term_IC or _rl_term_DC is defined, but just in case we change that later... */ PC = '\0'; - BC = _rl_term_backspace = "\b"; + BC = _rl_term_backspace = (char*) "\b"; UP = _rl_term_up; return 0; @@ -442,7 +442,7 @@ _rl_init_terminal_io (terminal_name) UP = _rl_term_up; if (!_rl_term_cr) - _rl_term_cr = "\r"; + _rl_term_cr = (char*) "\r"; _rl_term_autowrap = tgetflag ("am") && tgetflag ("xn"); @@ -502,7 +502,7 @@ rl_get_termcap (cap) if (tcap_initialized == 0) return ((char *)NULL); - for (i = 0; i < NUM_TC_STRINGS; i++) + for (i = 0; i < (int) NUM_TC_STRINGS; i++) { if (tc_strings[i].tc_var[0] == cap[0] && strcmp (tc_strings[i].tc_var, cap) == 0) return *(tc_strings[i].tc_value); diff --git a/cmd-line-utils/readline/text.c b/cmd-line-utils/readline/text.c index ad7b53ec422..89457be37cd 100644 --- a/cmd-line-utils/readline/text.c +++ b/cmd-line-utils/readline/text.c @@ -402,8 +402,7 @@ rl_backward (count, key) /* Move to the beginning of the line. */ int -rl_beg_of_line (count, key) - int count, key; +rl_beg_of_line (int count __attribute__((unused)), int key __attribute__((unused))) { rl_point = 0; return 0; @@ -411,8 +410,7 @@ rl_beg_of_line (count, key) /* Move to the end of the line. */ int -rl_end_of_line (count, key) - int count, key; +rl_end_of_line (int count __attribute__((unused)), int key __attribute__((unused))) { rl_point = rl_end; return 0; @@ -508,8 +506,7 @@ rl_backward_word (count, key) /* Clear the current line. Numeric argument to C-l does this. */ int -rl_refresh_line (ignore1, ignore2) - int ignore1, ignore2; +rl_refresh_line (int count __attribute__((unused)), int key __attribute__((unused))) { int curr_line; @@ -547,8 +544,7 @@ rl_clear_screen (count, key) } int -rl_arrow_keys (count, c) - int count, c; +rl_arrow_keys (int count, int c __attribute__((unused))) { int ch; @@ -596,7 +592,7 @@ rl_arrow_keys (count, c) #ifdef HANDLE_MULTIBYTE static char pending_bytes[MB_LEN_MAX]; static int pending_bytes_length = 0; -static mbstate_t ps = {0}; +static mbstate_t ps; #endif /* Insert the character C at the current location, moving point forward. @@ -832,8 +828,7 @@ rl_insert (count, c) /* Insert the next typed character verbatim. */ int -rl_quoted_insert (count, key) - int count, key; +rl_quoted_insert (int count, int key __attribute__((unused))) { int c; @@ -854,8 +849,7 @@ rl_quoted_insert (count, key) /* Insert a tab character. */ int -rl_tab_insert (count, key) - int count, key; +rl_tab_insert (int count, int key __attribute__((unused))) { return (_rl_insert_char (count, '\t')); } @@ -864,8 +858,7 @@ rl_tab_insert (count, key) KEY is the key that invoked this command. I guess it could have meaning in the future. */ int -rl_newline (count, key) - int count, key; +rl_newline (int count __attribute__((unused)), int key __attribute__((unused))) { rl_done = 1; @@ -898,8 +891,8 @@ rl_newline (count, key) is just a stub, you bind keys to it and the code in _rl_dispatch () is special cased. */ int -rl_do_lowercase_version (ignore1, ignore2) - int ignore1, ignore2; +rl_do_lowercase_version (int count __attribute__((unused)), + int key __attribute__((unused))) { return 0; } @@ -1093,8 +1086,8 @@ rl_rubout_or_delete (count, key) /* Delete all spaces and tabs around point. */ int -rl_delete_horizontal_space (count, ignore) - int count, ignore; +rl_delete_horizontal_space (int count __attribute__((unused)), + int key __attribute__((unused))) { int start = rl_point; @@ -1134,14 +1127,13 @@ rl_delete_or_show_completions (count, key) /* Turn the current line into a comment in shell history. A K*rn shell style function. */ int -rl_insert_comment (count, key) - int count, key; +rl_insert_comment (int count __attribute__((unused)), int key) { char *rl_comment_text; int rl_comment_len; rl_beg_of_line (1, key); - rl_comment_text = _rl_comment_begin ? _rl_comment_begin : RL_COMMENT_BEGIN_DEFAULT; + rl_comment_text = _rl_comment_begin ? _rl_comment_begin : (char*) RL_COMMENT_BEGIN_DEFAULT; if (rl_explicit_arg == 0) rl_insert_text (rl_comment_text); @@ -1173,24 +1165,21 @@ rl_insert_comment (count, key) /* Uppercase the word at point. */ int -rl_upcase_word (count, key) - int count, key; +rl_upcase_word (int count, int key __attribute__((unused))) { return (rl_change_case (count, UpCase)); } /* Lowercase the word at point. */ int -rl_downcase_word (count, key) - int count, key; +rl_downcase_word (int count, int key __attribute__((unused))) { return (rl_change_case (count, DownCase)); } /* Upcase the first letter, downcase the rest. */ int -rl_capitalize_word (count, key) - int count, key; +rl_capitalize_word (int count, int key __attribute__((unused))) { return (rl_change_case (count, CapCase)); } @@ -1314,8 +1303,7 @@ rl_transpose_words (count, key) /* Transpose the characters at point. If point is at the end of the line, then transpose the characters before point. */ int -rl_transpose_chars (count, key) - int count, key; +rl_transpose_chars (int count, int key __attribute__((unused))) { #if defined (HANDLE_MULTIBYTE) char *dummy; @@ -1486,15 +1474,13 @@ _rl_char_search (count, fdir, bdir) #endif /* !HANDLE_MULTIBYTE */ int -rl_char_search (count, key) - int count, key; +rl_char_search (int count, int key __attribute__((unused))) { return (_rl_char_search (count, FFIND, BFIND)); } int -rl_backward_char_search (count, key) - int count, key; +rl_backward_char_search (int count, int key __attribute__((unused))) { return (_rl_char_search (count, BFIND, FFIND)); } @@ -1519,16 +1505,15 @@ _rl_set_mark_at_pos (position) /* A bindable command to set the mark. */ int -rl_set_mark (count, key) - int count, key; +rl_set_mark (int count, int key __attribute__((unused))) { return (_rl_set_mark_at_pos (rl_explicit_arg ? count : rl_point)); } /* Exchange the position of mark and point. */ int -rl_exchange_point_and_mark (count, key) - int count, key; +rl_exchange_point_and_mark (int count __attribute__((unused)), + int key __attribute__((unused))) { if (rl_mark > rl_end) rl_mark = -1; diff --git a/cmd-line-utils/readline/tilde.c b/cmd-line-utils/readline/tilde.c index c44357ffbea..91eead0d9e2 100644 --- a/cmd-line-utils/readline/tilde.c +++ b/cmd-line-utils/readline/tilde.c @@ -190,7 +190,7 @@ tilde_expand (string) int result_size, result_index; result_index = result_size = 0; - if (result = strchr (string, '~')) + if ((result = strchr (string, '~'))) result = (char *)xmalloc (result_size = (strlen (string) + 16)); else result = (char *)xmalloc (result_size = (strlen (string) + 1)); diff --git a/cmd-line-utils/readline/undo.c b/cmd-line-utils/readline/undo.c index 48baded332a..b4b5a6511ba 100644 --- a/cmd-line-utils/readline/undo.c +++ b/cmd-line-utils/readline/undo.c @@ -175,7 +175,7 @@ _rl_fix_last_undo_of_type (type, start, end) for (rl = rl_undo_list; rl; rl = rl->next) { - if (rl->what == type) + if (rl->what == (uint) type) { rl->start = start; rl->end = end; @@ -226,8 +226,7 @@ rl_modifying (start, end) /* Revert the current line to its previous state. */ int -rl_revert_line (count, key) - int count, key; +rl_revert_line (int count __attribute__((unused)), int key __attribute__((unused))) { if (!rl_undo_list) rl_ding (); @@ -241,8 +240,7 @@ rl_revert_line (count, key) /* Do some undoing of things that were done. */ int -rl_undo_command (count, key) - int count, key; +rl_undo_command (int count, int key __attribute__((unused))) { if (count < 0) return 0; /* Nothing to do. */ diff --git a/cmd-line-utils/readline/util.c b/cmd-line-utils/readline/util.c index 43478aaf1ac..d5fe51a7bf2 100644 --- a/cmd-line-utils/readline/util.c +++ b/cmd-line-utils/readline/util.c @@ -95,15 +95,13 @@ _rl_abort_internal () } int -rl_abort (count, key) - int count, key; +rl_abort (int count __attribute__((unused)), int key __attribute__((unused))) { return (_rl_abort_internal ()); } int -rl_tty_status (count, key) - int count, key; +rl_tty_status (int count __attribute__((unused)), int key __attribute__((unused))) { #if defined (TIOCSTAT) ioctl (1, TIOCSTAT, (char *)0); @@ -152,8 +150,7 @@ rl_extend_line_buffer (len) /* A function for simple tilde expansion. */ int -rl_tilde_expand (ignore, key) - int ignore, key; +rl_tilde_expand (int ignore __attribute__((unused)), int key __attribute__((unused))) { register int start, end; char *homedir, *temp; diff --git a/cmd-line-utils/readline/vi_mode.c b/cmd-line-utils/readline/vi_mode.c index 9a8cfdd7200..4d1cc56117d 100644 --- a/cmd-line-utils/readline/vi_mode.c +++ b/cmd-line-utils/readline/vi_mode.c @@ -112,7 +112,7 @@ _rl_vi_initialize_line () { register int i; - for (i = 0; i < sizeof (vi_mark_chars) / sizeof (int); i++) + for (i = 0; i < (int) (sizeof (vi_mark_chars) / sizeof (int)); i++) vi_mark_chars[i] = -1; } @@ -166,8 +166,7 @@ _rl_vi_stuff_insert (count) redo a text modification command. The default for _rl_vi_last_command puts you back into insert mode. */ int -rl_vi_redo (count, c) - int count, c; +rl_vi_redo (int count, int c __attribute__((unused))) { int r; @@ -205,8 +204,7 @@ rl_vi_undo (count, key) /* Yank the nth arg from the previous line into this line at point. */ int -rl_vi_yank_arg (count, key) - int count, key; +rl_vi_yank_arg (int count, int key __attribute__((unused))) { /* Readline thinks that the first word on a line is the 0th, while vi thinks the first word on a line is the 1st. Compensate. */ @@ -286,8 +284,7 @@ rl_vi_search (count, key) /* Completion, from vi's point of view. */ int -rl_vi_complete (ignore, key) - int ignore, key; +rl_vi_complete (int ignore __attribute__((unused)), int key) { if ((rl_point < rl_end) && (!whitespace (rl_line_buffer[rl_point]))) { @@ -313,8 +310,7 @@ rl_vi_complete (ignore, key) /* Tilde expansion for vi mode. */ int -rl_vi_tilde_expand (ignore, key) - int ignore, key; +rl_vi_tilde_expand (int ignore __attribute__((unused)), int key) { rl_tilde_expand (0, key); rl_vi_start_inserting (key, 1, rl_arg_sign); @@ -384,8 +380,7 @@ rl_vi_end_word (count, key) /* Move forward a word the way that 'W' does. */ int -rl_vi_fWord (count, ignore) - int count, ignore; +rl_vi_fWord (int count, int ignore __attribute__((unused))) { while (count-- && rl_point < (rl_end - 1)) { @@ -401,8 +396,7 @@ rl_vi_fWord (count, ignore) } int -rl_vi_bWord (count, ignore) - int count, ignore; +rl_vi_bWord (int count, int ignore __attribute__((unused))) { while (count-- && rl_point > 0) { @@ -425,8 +419,7 @@ rl_vi_bWord (count, ignore) } int -rl_vi_eWord (count, ignore) - int count, ignore; +rl_vi_eWord(int count, int ignore __attribute__((unused))) { while (count-- && rl_point < (rl_end - 1)) { @@ -456,8 +449,7 @@ rl_vi_eWord (count, ignore) } int -rl_vi_fword (count, ignore) - int count, ignore; +rl_vi_fword (int count, int ignore __attribute__((unused))) { while (count-- && rl_point < (rl_end - 1)) { @@ -482,8 +474,7 @@ rl_vi_fword (count, ignore) } int -rl_vi_bword (count, ignore) - int count, ignore; +rl_vi_bword (int count, int ignore __attribute__((unused))) { while (count-- && rl_point > 0) { @@ -521,8 +512,7 @@ rl_vi_bword (count, ignore) } int -rl_vi_eword (count, ignore) - int count, ignore; +rl_vi_eword (int count, int ignore __attribute__((unused))) { while (count-- && rl_point < rl_end - 1) { @@ -546,8 +536,7 @@ rl_vi_eword (count, ignore) } int -rl_vi_insert_beg (count, key) - int count, key; +rl_vi_insert_beg (int count __attribute__((unused)), int key) { rl_beg_of_line (1, key); rl_vi_insertion_mode (1, key); @@ -555,8 +544,7 @@ rl_vi_insert_beg (count, key) } int -rl_vi_append_mode (count, key) - int count, key; +rl_vi_append_mode (int count __attribute__((unused)), int key) { if (rl_point < rl_end) { @@ -575,8 +563,7 @@ rl_vi_append_mode (count, key) } int -rl_vi_append_eol (count, key) - int count, key; +rl_vi_append_eol (int count __attribute__((unused)), int key) { rl_end_of_line (1, key); rl_vi_append_mode (1, key); @@ -585,8 +572,7 @@ rl_vi_append_eol (count, key) /* What to do in the case of C-d. */ int -rl_vi_eof_maybe (count, c) - int count, c; +rl_vi_eof_maybe (int count __attribute__((unused)), int c __attribute__((unused))) { return (rl_newline (1, '\n')); } @@ -596,8 +582,7 @@ rl_vi_eof_maybe (count, c) /* Switching from one mode to the other really just involves switching keymaps. */ int -rl_vi_insertion_mode (count, key) - int count, key; +rl_vi_insertion_mode (int count __attribute__((unused)), int key) { _rl_keymap = vi_insertion_keymap; _rl_vi_last_key_before_insert = key; @@ -659,8 +644,7 @@ _rl_vi_done_inserting () } int -rl_vi_movement_mode (count, key) - int count, key; +rl_vi_movement_mode (int count __attribute__((unused)), int key) { if (rl_point > 0) rl_backward_char (1, key); @@ -729,8 +713,7 @@ _rl_vi_change_mbchar_case (count) #endif int -rl_vi_change_case (count, ignore) - int count, ignore; +rl_vi_change_case (int count, int ignore __attribute__((unused))) { int c, p; @@ -959,8 +942,7 @@ rl_digit_loop1 () } int -rl_vi_delete_to (count, key) - int count, key; +rl_vi_delete_to (int count __attribute__((unused)), int key) { int c; @@ -985,8 +967,7 @@ rl_vi_delete_to (count, key) } int -rl_vi_change_to (count, key) - int count, key; +rl_vi_change_to (int count __attribute__((unused)), int key) { int c, start_pos; @@ -1038,8 +1019,7 @@ rl_vi_change_to (count, key) } int -rl_vi_yank_to (count, key) - int count, key; +rl_vi_yank_to (int count __attribute__((unused)), int key) { int c, save = rl_point; @@ -1094,8 +1074,7 @@ rl_vi_delete (count, key) } int -rl_vi_back_to_indent (count, key) - int count, key; +rl_vi_back_to_indent (int count __attribute__((unused)), int key) { rl_beg_of_line (1, key); while (rl_point < rl_end && whitespace (rl_line_buffer[rl_point])) @@ -1104,8 +1083,7 @@ rl_vi_back_to_indent (count, key) } int -rl_vi_first_print (count, key) - int count, key; +rl_vi_first_print (int count __attribute__((unused)), int key) { return (rl_vi_back_to_indent (1, key)); } @@ -1173,8 +1151,7 @@ rl_vi_char_search (count, key) /* Match brackets */ int -rl_vi_match (ignore, key) - int ignore, key; +rl_vi_match (int ignore __attribute__((unused)), int key) { int count = 1, brack, pos, tmp, pre; @@ -1284,8 +1261,7 @@ rl_vi_bracktype (c) for test against 033 or ^C. Make sure that _rl_read_mbchar does this right. */ int -rl_vi_change_char (count, key) - int count, key; +rl_vi_change_char (int count, int key __attribute__((unused))) { int c, p; @@ -1389,8 +1365,7 @@ rl_vi_overstrike_delete (count, key) } int -rl_vi_replace (count, key) - int count, key; +rl_vi_replace (int count __attribute__((unused)), int key __attribute__((unused))) { int i; @@ -1450,8 +1425,7 @@ rl_vi_possible_completions() /* Functions to save and restore marks. */ int -rl_vi_set_mark (count, key) - int count, key; +rl_vi_set_mark (int count __attribute__((unused)), int key __attribute__((unused))) { int ch; @@ -1470,8 +1444,7 @@ rl_vi_set_mark (count, key) } int -rl_vi_goto_mark (count, key) - int count, key; +rl_vi_goto_mark (int count __attribute__((unused)), int key __attribute__((unused))) { int ch; diff --git a/dbug/dbug_analyze.c b/dbug/dbug_analyze.c index 1db056d549c..f1caeea2be4 100644 --- a/dbug/dbug_analyze.c +++ b/dbug/dbug_analyze.c @@ -169,7 +169,7 @@ register unsigned long *child_time; *name_pos = temp->pos; *time_entered = temp->time; *child_time = temp->children; - DBUG_PRINT ("pop", ("%d %d %d",*name_pos,*time_entered,*child_time)); + DBUG_PRINT ("pop", ("%d %lu %lu",*name_pos,*time_entered,*child_time)); rtnval = stacktop--; } DBUG_RETURN (rtnval); @@ -334,12 +334,12 @@ FILE *inf; * function is found on the stack. */ while (pop (&oldpos, &oldtime, &oldchild)) { - DBUG_PRINT ("popped", ("%d %d", oldtime, oldchild)); + DBUG_PRINT ("popped", ("%lu %lu", oldtime, oldchild)); time = fn_time - oldtime; t = top (); t -> children += time; DBUG_PRINT ("update", ("%s", modules[t -> pos].name)); - DBUG_PRINT ("update", ("%d", t -> children)); + DBUG_PRINT ("update", ("%lu", t -> children)); time -= oldchild; modules[oldpos].m_time += time; modules[oldpos].m_calls++; @@ -520,19 +520,19 @@ register unsigned long int *s_calls, *s_time; unsigned long int calls, time; DBUG_ENTER ("out_body"); - DBUG_PRINT ("out_body", ("%d,%d",*s_calls,*s_time)); + DBUG_PRINT ("out_body", ("%lu,%lu",*s_calls,*s_time)); if (root == MAXPROCS) { - DBUG_PRINT ("out_body", ("%d,%d",*s_calls,*s_time)); + DBUG_PRINT ("out_body", ("%lu,%lu",*s_calls,*s_time)); } else { while (root != MAXPROCS) { out_body (outf, s_table[root].lchild,s_calls,s_time); out_item (outf, &modules[s_table[root].pos],&calls,&time); - DBUG_PRINT ("out_body", ("-- %d -- %d --", calls, time)); + DBUG_PRINT ("out_body", ("-- %lu -- %lu --", calls, time)); *s_calls += calls; *s_time += time; root = s_table[root].rchild; } - DBUG_PRINT ("out_body", ("%d,%d", *s_calls, *s_time)); + DBUG_PRINT ("out_body", ("%lu,%lu", *s_calls, *s_time)); } DBUG_VOID_RETURN; } diff --git a/extra/yassl/src/ssl.cpp b/extra/yassl/src/ssl.cpp index a008ea7228b..fe4661b5946 100644 --- a/extra/yassl/src/ssl.cpp +++ b/extra/yassl/src/ssl.cpp @@ -918,7 +918,7 @@ void ERR_print_errors_fp(FILE* /*fp*/) char* ERR_error_string(unsigned long errNumber, char* buffer) { - static char* msg = "Please supply a buffer for error string"; + static char* msg = (char*) "Please supply a buffer for error string"; if (buffer) { SetErrorString(YasslError(errNumber), buffer); diff --git a/extra/yassl/testsuite/testsuite.cpp b/extra/yassl/testsuite/testsuite.cpp index 1cf6a78ebe7..49113a552cd 100644 --- a/extra/yassl/testsuite/testsuite.cpp +++ b/extra/yassl/testsuite/testsuite.cpp @@ -86,8 +86,8 @@ int main(int argc, char** argv) // input output compare byte input[TaoCrypt::MD5::DIGEST_SIZE]; byte output[TaoCrypt::MD5::DIGEST_SIZE]; - file_test("input", input); - file_test("output", output); + file_test((char*) "input", input); + file_test((char*) "output", output); assert(memcmp(input, output, sizeof(input)) == 0); printf("\nAll tests passed!\n"); diff --git a/heap/_check.c b/heap/_check.c index cc832f8ed5b..c861fdb582f 100644 --- a/heap/_check.c +++ b/heap/_check.c @@ -88,7 +88,8 @@ int heap_check_heap(HP_INFO *info, my_bool print_status) if (records != share->records || deleted != share->deleted) { DBUG_PRINT("error",("Found rows: %lu (%lu) deleted %lu (%lu)", - records, share->records, deleted, share->deleted)); + records, (ulong) share->records, + deleted, (ulong) share->deleted)); error= 1; } *info= save_info; @@ -100,9 +101,9 @@ static int check_one_key(HP_KEYDEF *keydef, uint keynr, ulong records, ulong blength, my_bool print_status) { int error; - uint i,found,max_links,seek,links; - uint rec_link; /* Only used with debugging */ - uint hash_buckets_found; + ulong i,found,max_links,seek,links; + ulong rec_link; /* Only used with debugging */ + ulong hash_buckets_found; HASH_INFO *hash_info; error=0; @@ -123,7 +124,9 @@ static int check_one_key(HP_KEYDEF *keydef, uint keynr, ulong records, blength, records)) != i) { - DBUG_PRINT("error",("Record in wrong link: Link %d Record: 0x%lx Record-link %d", i,hash_info->ptr_to_rec,rec_link)); + DBUG_PRINT("error", + ("Record in wrong link: Link %lu Record: 0x%lx Record-link %lu", + i, (long) hash_info->ptr_to_rec, rec_link)); error=1; } else @@ -141,18 +144,18 @@ static int check_one_key(HP_KEYDEF *keydef, uint keynr, ulong records, if (keydef->hash_buckets != hash_buckets_found) { DBUG_PRINT("error",("Found %ld buckets, stats shows %ld buckets", - hash_buckets_found, keydef->hash_buckets)); + hash_buckets_found, (long) keydef->hash_buckets)); error=1; } DBUG_PRINT("info", - ("records: %ld seeks: %d max links: %d hitrate: %.2f " - "buckets: %d", + ("records: %ld seeks: %lu max links: %lu hitrate: %.2f " + "buckets: %lu", records,seek,max_links, (float) seek / (float) (records ? records : 1), hash_buckets_found)); if (print_status) - printf("Key: %d records: %ld seeks: %d max links: %d " - "hitrate: %.2f buckets: %d\n", + printf("Key: %d records: %ld seeks: %lu max links: %lu " + "hitrate: %.2f buckets: %lu\n", keynr, records, seek, max_links, (float) seek / (float) (records ? records : 1), hash_buckets_found); @@ -180,8 +183,8 @@ static int check_one_rb_key(HP_INFO *info, uint keynr, ulong records, key_length, SEARCH_FIND | SEARCH_SAME, not_used)) { error= 1; - DBUG_PRINT("error",("Record in wrong link: key: %d Record: 0x%lx\n", - keynr, recpos)); + DBUG_PRINT("error",("Record in wrong link: key: %u Record: 0x%lx\n", + keynr, (long) recpos)); } else found++; diff --git a/heap/hp_delete.c b/heap/hp_delete.c index f18c5e7054c..2ef57624e77 100644 --- a/heap/hp_delete.c +++ b/heap/hp_delete.c @@ -24,7 +24,7 @@ int heap_delete(HP_INFO *info, const byte *record) HP_SHARE *share=info->s; HP_KEYDEF *keydef, *end, *p_lastinx; DBUG_ENTER("heap_delete"); - DBUG_PRINT("enter",("info: %lx record: 0x%lx",info,record)); + DBUG_PRINT("enter",("info: 0x%lx record: 0x%lx", (long) info, (long) record)); test_active(info); @@ -144,7 +144,7 @@ int hp_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo, info->current_hash_ptr=last_ptr; info->current_ptr = last_ptr ? last_ptr->ptr_to_rec : 0; DBUG_PRINT("info",("Corrected current_ptr to point at: 0x%lx", - info->current_ptr)); + (long) info->current_ptr)); } empty=pos; if (gpos) diff --git a/heap/hp_hash.c b/heap/hp_hash.c index 77f3cf6d80b..6a537906929 100644 --- a/heap/hp_hash.c +++ b/heap/hp_hash.c @@ -120,7 +120,7 @@ byte *hp_search(HP_INFO *info, HP_KEYDEF *keyinfo, const byte *key, { switch (nextflag) { case 0: /* Search after key */ - DBUG_PRINT("exit",("found key at %d",pos->ptr_to_rec)); + DBUG_PRINT("exit", ("found key at 0x%lx", (long) pos->ptr_to_rec)); info->current_hash_ptr=pos; DBUG_RETURN(info->current_ptr= pos->ptr_to_rec); case 1: /* Search next */ diff --git a/heap/hp_open.c b/heap/hp_open.c index fd937229b0d..f50478c8b3d 100644 --- a/heap/hp_open.c +++ b/heap/hp_open.c @@ -64,7 +64,8 @@ HP_INFO *heap_open(const char *name, int mode) info->opt_flag= READ_CHECK_USED; /* Check when changing */ #endif DBUG_PRINT("exit",("heap: 0x%lx reclength: %d records_in_block: %d", - info,share->reclength,share->block.records_in_block)); + (long) info, share->reclength, + share->block.records_in_block)); DBUG_RETURN(info); } @@ -82,7 +83,7 @@ HP_SHARE *hp_find_named_heap(const char *name) info= (HP_SHARE*) pos->data; if (!strcmp(name, info->name)) { - DBUG_PRINT("exit", ("Old heap_database: 0x%lx",info)); + DBUG_PRINT("exit", ("Old heap_database: 0x%lx", (long) info)); DBUG_RETURN(info); } } diff --git a/heap/hp_rkey.c b/heap/hp_rkey.c index f5f22a877a1..f02d44cc456 100644 --- a/heap/hp_rkey.c +++ b/heap/hp_rkey.c @@ -23,7 +23,7 @@ int heap_rkey(HP_INFO *info, byte *record, int inx, const byte *key, HP_SHARE *share= info->s; HP_KEYDEF *keyinfo= share->keydef + inx; DBUG_ENTER("heap_rkey"); - DBUG_PRINT("enter",("base: 0x%lx inx: %d",info,inx)); + DBUG_PRINT("enter",("info: 0x%lx inx: %d", (long) info, inx)); if ((uint) inx >= share->keys) { diff --git a/heap/hp_rrnd.c b/heap/hp_rrnd.c index 4daa3a06377..2f8556484a4 100644 --- a/heap/hp_rrnd.c +++ b/heap/hp_rrnd.c @@ -29,7 +29,7 @@ int heap_rrnd(register HP_INFO *info, byte *record, byte *pos) { HP_SHARE *share=info->s; DBUG_ENTER("heap_rrnd"); - DBUG_PRINT("enter",("info: 0x%lx pos: %lx",info,pos)); + DBUG_PRINT("enter",("info: 0x%lx pos: %lx",(long) info, (long) pos)); info->lastinx= -1; if (!(info->current_ptr= pos)) @@ -44,7 +44,7 @@ int heap_rrnd(register HP_INFO *info, byte *record, byte *pos) } info->update=HA_STATE_PREV_FOUND | HA_STATE_NEXT_FOUND | HA_STATE_AKTIV; memcpy(record,info->current_ptr,(size_t) share->reclength); - DBUG_PRINT("exit",("found record at 0x%lx",info->current_ptr)); + DBUG_PRINT("exit", ("found record at 0x%lx", (long) info->current_ptr)); info->current_hash_ptr=0; /* Can't use rnext */ DBUG_RETURN(0); } /* heap_rrnd */ diff --git a/heap/hp_write.c b/heap/hp_write.c index bc94e3bfae4..16f02999c93 100644 --- a/heap/hp_write.c +++ b/heap/hp_write.c @@ -138,7 +138,7 @@ static byte *next_free_record_pos(HP_SHARE *info) pos=info->del_link; info->del_link= *((byte**) pos); info->deleted--; - DBUG_PRINT("exit",("Used old position: 0x%lx",pos)); + DBUG_PRINT("exit",("Used old position: 0x%lx",(long) pos)); DBUG_RETURN(pos); } if (!(block_pos=(info->records % info->block.records_in_block))) @@ -153,9 +153,9 @@ static byte *next_free_record_pos(HP_SHARE *info) DBUG_RETURN(NULL); info->data_length+=length; } - DBUG_PRINT("exit",("Used new position: %lx", - (byte*) info->block.level_info[0].last_blocks+block_pos* - info->block.recbuffer)); + DBUG_PRINT("exit",("Used new position: 0x%lx", + (long) ((byte*) info->block.level_info[0].last_blocks+ + block_pos * info->block.recbuffer))); DBUG_RETURN((byte*) info->block.level_info[0].last_blocks+ block_pos*info->block.recbuffer); } diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c index 21fb84fb19a..ec6fdd01500 100644 --- a/libmysql/libmysql.c +++ b/libmysql/libmysql.c @@ -2100,7 +2100,7 @@ mysql_stmt_prepare(MYSQL_STMT *stmt, const char *query, ulong length) } stmt->bind= stmt->params + stmt->param_count; stmt->state= MYSQL_STMT_PREPARE_DONE; - DBUG_PRINT("info", ("Parameter count: %ld", stmt->param_count)); + DBUG_PRINT("info", ("Parameter count: %u", stmt->param_count)); DBUG_RETURN(0); } @@ -2443,10 +2443,10 @@ static my_bool store_param(MYSQL_STMT *stmt, MYSQL_BIND *param) { NET *net= &stmt->mysql->net; DBUG_ENTER("store_param"); - DBUG_PRINT("enter",("type: %d, buffer:%lx, length: %lu is_null: %d", + DBUG_PRINT("enter",("type: %d buffer: 0x%lx length: %lu is_null: %d", param->buffer_type, - param->buffer ? param->buffer : "0", *param->length, - *param->is_null)); + (long) (param->buffer ? param->buffer : NullS), + *param->length, *param->is_null)); if (*param->is_null) store_param_null(net, param); @@ -3325,8 +3325,8 @@ mysql_stmt_send_long_data(MYSQL_STMT *stmt, uint param_number, MYSQL_BIND *param; DBUG_ENTER("mysql_stmt_send_long_data"); DBUG_ASSERT(stmt != 0); - DBUG_PRINT("enter",("param no : %d, data : %lx, length : %ld", - param_number, data, length)); + DBUG_PRINT("enter",("param no: %d data: 0x%lx, length : %ld", + param_number, (long) data, length)); /* We only need to check for stmt->param_count, if it's not null @@ -4409,7 +4409,7 @@ my_bool STDCALL mysql_stmt_bind_result(MYSQL_STMT *stmt, MYSQL_BIND *bind) ulong bind_count= stmt->field_count; uint param_count= 0; DBUG_ENTER("mysql_stmt_bind_result"); - DBUG_PRINT("enter",("field_count: %d", bind_count)); + DBUG_PRINT("enter",("field_count: %lu", bind_count)); if (!bind_count) { diff --git a/libmysqld/libmysqld.c b/libmysqld/libmysqld.c index cb4fa104b4c..58a22686199 100644 --- a/libmysqld/libmysqld.c +++ b/libmysqld/libmysqld.c @@ -206,7 +206,7 @@ mysql_real_connect(MYSQL *mysql,const char *host, const char *user, } } - DBUG_PRINT("exit",("Mysql handler: %lx",mysql)); + DBUG_PRINT("exit",("Mysql handler: 0x%lx", (long) mysql)); DBUG_RETURN(mysql); error: diff --git a/myisam/mi_close.c b/myisam/mi_close.c index 62f5617de1a..8a4f6ee7f5d 100644 --- a/myisam/mi_close.c +++ b/myisam/mi_close.c @@ -28,8 +28,9 @@ int mi_close(register MI_INFO *info) int error=0,flag; MYISAM_SHARE *share=info->s; DBUG_ENTER("mi_close"); - DBUG_PRINT("enter",("base: %lx reopen: %u locks: %u", - info,(uint) share->reopen, (uint) share->tot_locks)); + DBUG_PRINT("enter",("base: 0x%lx reopen: %u locks: %u", + (long) info, (uint) share->reopen, + (uint) share->tot_locks)); pthread_mutex_lock(&THR_LOCK_myisam); if (info->lock_type == F_EXTRA_LCK) diff --git a/myisam/mi_delete.c b/myisam/mi_delete.c index 85cc60bdd9d..471420d99c0 100644 --- a/myisam/mi_delete.c +++ b/myisam/mi_delete.c @@ -165,7 +165,7 @@ static int _mi_ck_real_delete(register MI_INFO *info, MI_KEYDEF *keyinfo, DBUG_PRINT("error",("Couldn't allocate memory")); DBUG_RETURN(my_errno=ENOMEM); } - DBUG_PRINT("info",("root_page: %ld",old_root)); + DBUG_PRINT("info",("root_page: %ld", (long) old_root)); if (!_mi_fetch_keypage(info,keyinfo,old_root,DFLT_INIT_HITS,root_buff,0)) { error= -1; @@ -410,7 +410,7 @@ static int del(register MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *key, MYISAM_SHARE *share=info->s; MI_KEY_PARAM s_temp; DBUG_ENTER("del"); - DBUG_PRINT("enter",("leaf_page: %ld keypos: 0x%lx", leaf_page, + DBUG_PRINT("enter",("leaf_page: %ld keypos: 0x%lx", (long) leaf_page, (ulong) keypos)); DBUG_DUMP("leaf_buff",(byte*) leaf_buff,mi_getint(leaf_buff)); @@ -597,7 +597,8 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo, else { /* Page is full */ endpos=anc_buff+anc_length; - DBUG_PRINT("test",("anc_buff: %lx endpos: %lx",anc_buff,endpos)); + DBUG_PRINT("test",("anc_buff: 0x%lx endpos: 0x%lx", + (long) anc_buff, (long) endpos)); if (keypos != anc_buff+2+key_reflength && !_mi_get_last_key(info,keyinfo,anc_buff,anc_key,keypos,&length)) goto err; @@ -775,7 +776,7 @@ static uint remove_key(MI_KEYDEF *keyinfo, uint nod_flag, int s_length; uchar *start; DBUG_ENTER("remove_key"); - DBUG_PRINT("enter",("keypos: %lx page_end: %lx",keypos,page_end)); + DBUG_PRINT("enter",("keypos: 0x%lx page_end: 0x%lx",(long) keypos, (long) page_end)); start=keypos; if (!(keyinfo->flag & diff --git a/myisam/mi_dynrec.c b/myisam/mi_dynrec.c index ef5ab73f1a9..6590e6bd92b 100644 --- a/myisam/mi_dynrec.c +++ b/myisam/mi_dynrec.c @@ -1066,8 +1066,8 @@ ulong _mi_rec_unpack(register MI_INFO *info, register byte *to, byte *from, err: my_errno= HA_ERR_WRONG_IN_RECORD; - DBUG_PRINT("error",("to_end: %lx -> %lx from_end: %lx -> %lx", - to,to_end,from,from_end)); + DBUG_PRINT("error",("to_end: 0x%lx -> 0x%lx from_end: 0x%lx -> 0x%lx", + (long) to, (long) to_end, (long) from, (long) from_end)); DBUG_DUMP("from",(byte*) info->rec_buff,info->s->base.min_pack_length); DBUG_RETURN(MY_FILE_ERROR); } /* _mi_rec_unpack */ diff --git a/myisam/mi_keycache.c b/myisam/mi_keycache.c index fb13f3703a2..d595be3a684 100644 --- a/myisam/mi_keycache.c +++ b/myisam/mi_keycache.c @@ -54,8 +54,8 @@ int mi_assign_to_key_cache(MI_INFO *info, int error= 0; MYISAM_SHARE* share= info->s; DBUG_ENTER("mi_assign_to_key_cache"); - DBUG_PRINT("enter",("old_key_cache_handle: %lx new_key_cache_handle: %lx", - share->key_cache, key_cache)); + DBUG_PRINT("enter",("old_key_cache_handle: 0x%lx new_key_cache_handle: 0x%lx", + (long) share->key_cache, (long) key_cache)); /* Skip operation if we didn't change key cache. This can happen if we diff --git a/myisam/mi_page.c b/myisam/mi_page.c index 5240c063fba..d18a10c3cde 100644 --- a/myisam/mi_page.c +++ b/myisam/mi_page.c @@ -27,7 +27,7 @@ uchar *_mi_fetch_keypage(register MI_INFO *info, MI_KEYDEF *keyinfo, uchar *tmp; uint page_size; DBUG_ENTER("_mi_fetch_keypage"); - DBUG_PRINT("enter",("page: %ld",page)); + DBUG_PRINT("enter",("page: %ld", (long) page)); tmp=(uchar*) key_cache_read(info->s->key_cache, info->s->kfile, page, level, (byte*) buff, @@ -80,7 +80,7 @@ int _mi_write_keypage(register MI_INFO *info, register MI_KEYDEF *keyinfo, my_errno=EINVAL; DBUG_RETURN((-1)); } - DBUG_PRINT("page",("write page at: %lu",(long) page,buff)); + DBUG_PRINT("page",("write page at: %lu",(long) page)); DBUG_DUMP("buff",(byte*) buff,mi_getint(buff)); #endif diff --git a/myisam/mi_statrec.c b/myisam/mi_statrec.c index 42352f63c66..5e6ea939eca 100644 --- a/myisam/mi_statrec.c +++ b/myisam/mi_statrec.c @@ -255,8 +255,8 @@ int _mi_read_rnd_static_record(MI_INFO *info, byte *buf, if (filepos >= info->state->data_file_length) { DBUG_PRINT("test",("filepos: %ld (%ld) records: %ld del: %ld", - filepos/share->base.reclength,filepos, - info->state->records, info->state->del)); + (long) filepos/share->base.reclength, (long) filepos, + (long) info->state->records, (long) info->state->del)); fast_mi_writeinfo(info); DBUG_RETURN(my_errno=HA_ERR_END_OF_FILE); } diff --git a/myisam/mi_test2.c b/myisam/mi_test2.c index 6a6dcb971a2..0959769992c 100644 --- a/myisam/mi_test2.c +++ b/myisam/mi_test2.c @@ -813,7 +813,7 @@ end: printf("Write records: %d\nUpdate records: %d\nSame-key-read: %d\nDelete records: %d\n", write_count,update,dupp_keys,opt_delete); if (rec_pointer_size) printf("Record pointer size: %d\n",rec_pointer_size); - printf("myisam_block_size: %u\n", myisam_block_size); + printf("myisam_block_size: %lu\n", myisam_block_size); if (key_cacheing) { puts("Key cache used"); diff --git a/myisam/mi_write.c b/myisam/mi_write.c index 9ab8753f6d7..fb64ec2bb8b 100644 --- a/myisam/mi_write.c +++ b/myisam/mi_write.c @@ -339,7 +339,7 @@ static int w_search(register MI_INFO *info, register MI_KEYDEF *keyinfo, my_bool was_last_key; my_off_t next_page, dupp_key_pos; DBUG_ENTER("w_search"); - DBUG_PRINT("enter",("page: %ld",page)); + DBUG_PRINT("enter",("page: %ld", (long) page)); search_key_length= (comp_flag & SEARCH_FIND) ? key_length : USE_WHOLE_KEY; if (!(temp_buff= (uchar*) my_alloca((uint) keyinfo->block_length+ @@ -462,7 +462,7 @@ int _mi_insert(register MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *endpos, *prev_key; MI_KEY_PARAM s_temp; DBUG_ENTER("_mi_insert"); - DBUG_PRINT("enter",("key_pos: %lx",key_pos)); + DBUG_PRINT("enter",("key_pos: 0x%lx", (long) key_pos)); DBUG_EXECUTE("key",_mi_print_key(DBUG_FILE,keyinfo->seg,key,USE_WHOLE_KEY);); nod_flag=mi_test_if_nod(anc_buff); @@ -483,8 +483,8 @@ int _mi_insert(register MI_INFO *info, register MI_KEYDEF *keyinfo, { DBUG_PRINT("test",("t_length: %d ref_len: %d", t_length,s_temp.ref_length)); - DBUG_PRINT("test",("n_ref_len: %d n_length: %d key_pos: %lx", - s_temp.n_ref_length,s_temp.n_length,s_temp.key)); + DBUG_PRINT("test",("n_ref_len: %d n_length: %d key_pos: 0x%lx", + s_temp.n_ref_length,s_temp.n_length, (long) s_temp.key)); } #endif if (t_length > 0) @@ -677,7 +677,8 @@ uchar *_mi_find_half_pos(uint nod_flag, MI_KEYDEF *keyinfo, uchar *page, } while (page < end); *return_key_length=length; *after_key=page; - DBUG_PRINT("exit",("returns: %lx page: %lx half: %lx",lastpos,page,end)); + DBUG_PRINT("exit",("returns: 0x%lx page: 0x%lx half: 0x%lx", + (long) lastpos, (long) page, (long) end)); DBUG_RETURN(lastpos); } /* _mi_find_half_pos */ @@ -732,7 +733,8 @@ static uchar *_mi_find_last_pos(MI_KEYDEF *keyinfo, uchar *page, } *return_key_length=last_length; *after_key=lastpos; - DBUG_PRINT("exit",("returns: %lx page: %lx end: %lx",prevpos,page,end)); + DBUG_PRINT("exit",("returns: 0x%lx page: 0x%lx end: 0x%lx", + (long) prevpos,(long) page,(long) end)); DBUG_RETURN(prevpos); } /* _mi_find_last_pos */ @@ -768,7 +770,7 @@ static int _mi_balance_page(register MI_INFO *info, MI_KEYDEF *keyinfo, next_page= _mi_kpos(info->s->base.key_reflength, father_key_pos+father_keylength); buff=info->buff; - DBUG_PRINT("test",("use right page: %lu",next_page)); + DBUG_PRINT("test",("use right page: %lu", (ulong) next_page)); } else { @@ -777,7 +779,7 @@ static int _mi_balance_page(register MI_INFO *info, MI_KEYDEF *keyinfo, next_page= _mi_kpos(info->s->base.key_reflength,father_key_pos); /* Fix that curr_buff is to left */ buff=curr_buff; curr_buff=info->buff; - DBUG_PRINT("test",("use left page: %lu",next_page)); + DBUG_PRINT("test",("use left page: %lu", (ulong) next_page)); } /* father_key_pos ptr to parting key */ if (!_mi_fetch_keypage(info,keyinfo,next_page,DFLT_INIT_HITS,info->buff,0)) diff --git a/myisam/myisampack.c b/myisam/myisampack.c index e80a3ffacd9..79359e29a5d 100644 --- a/myisam/myisampack.c +++ b/myisam/myisampack.c @@ -2036,7 +2036,7 @@ static void write_field_info(HUFF_COUNTS *counts, uint fields, uint trees) uint huff_tree_bits; huff_tree_bits=max_bit(trees ? trees-1 : 0); - DBUG_PRINT("info", ("")); + DBUG_PRINT("info", (" ")); DBUG_PRINT("info", ("column types:")); DBUG_PRINT("info", ("FIELD_NORMAL 0")); DBUG_PRINT("info", ("FIELD_SKIP_ENDSPACE 1")); @@ -2048,12 +2048,12 @@ static void write_field_info(HUFF_COUNTS *counts, uint fields, uint trees) DBUG_PRINT("info", ("FIELD_ZERO 7")); DBUG_PRINT("info", ("FIELD_VARCHAR 8")); DBUG_PRINT("info", ("FIELD_CHECK 9")); - DBUG_PRINT("info", ("")); + DBUG_PRINT("info", (" ")); DBUG_PRINT("info", ("pack type as a set of flags:")); DBUG_PRINT("info", ("PACK_TYPE_SELECTED 1")); DBUG_PRINT("info", ("PACK_TYPE_SPACE_FIELDS 2")); DBUG_PRINT("info", ("PACK_TYPE_ZERO_FILL 4")); - DBUG_PRINT("info", ("")); + DBUG_PRINT("info", (" ")); if (verbose >= 2) { VOID(printf("\n")); @@ -2126,7 +2126,7 @@ static my_off_t write_huff_tree(HUFF_TREE *huff_tree, uint trees) return 0; } - DBUG_PRINT("info", ("")); + DBUG_PRINT("info", (" ")); if (verbose >= 2) VOID(printf("\n")); tree_no= 0; @@ -2137,7 +2137,7 @@ static my_off_t write_huff_tree(HUFF_TREE *huff_tree, uint trees) if (huff_tree->tree_number == 0) continue; /* Deleted tree */ tree_no++; - DBUG_PRINT("info", ("")); + DBUG_PRINT("info", (" ")); if (verbose >= 3) VOID(printf("\n")); /* Count the total number of elements (byte codes or column values). */ @@ -2329,7 +2329,7 @@ static my_off_t write_huff_tree(HUFF_TREE *huff_tree, uint trees) } flush_bits(); } - DBUG_PRINT("info", ("")); + DBUG_PRINT("info", (" ")); if (verbose >= 2) VOID(printf("\n")); my_afree((gptr) packed_tree); @@ -2507,7 +2507,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts) end_pos-=count->max_zero_fill; field_length-=count->max_zero_fill; - switch(count->field_type) { + switch (count->field_type) { case FIELD_SKIP_ZERO: if (!memcmp((byte*) start_pos,zero_string,field_length)) { @@ -2726,6 +2726,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts) break; } case FIELD_LAST: + case FIELD_enum_val_count: abort(); /* Impossible */ } start_pos+=count->max_zero_fill; diff --git a/myisammrg/myrg_extra.c b/myisammrg/myrg_extra.c index 62cf5f01aba..30bb46d27d4 100644 --- a/myisammrg/myrg_extra.c +++ b/myisammrg/myrg_extra.c @@ -28,7 +28,7 @@ int myrg_extra(MYRG_INFO *info,enum ha_extra_function function, int error,save_error=0; MYRG_TABLE *file; DBUG_ENTER("myrg_extra"); - DBUG_PRINT("info",("function: %d",(ulong) function)); + DBUG_PRINT("info",("function: %lu", (ulong) function)); if (function == HA_EXTRA_CACHE) { diff --git a/mysql-test/include/federated.inc b/mysql-test/include/federated.inc index 1c53b9ed2c5..15230f47ed8 100644 --- a/mysql-test/include/federated.inc +++ b/mysql-test/include/federated.inc @@ -5,7 +5,7 @@ source ./include/master-slave.inc; # remote table creation connection slave; ---replicate-ignore-db=federated +#--replicate-ignore-db=federated stop slave; --disable_warnings diff --git a/mysql-test/include/sp-vars.inc b/mysql-test/include/sp-vars.inc index 4bac883ee0e..c241af2fb54 100644 --- a/mysql-test/include/sp-vars.inc +++ b/mysql-test/include/sp-vars.inc @@ -1,6 +1,6 @@ delimiter |; ---------------------------------------------------------------------------- +# -------------------------------------------------------------------------- CREATE PROCEDURE sp_vars_check_dflt() BEGIN @@ -40,7 +40,7 @@ BEGIN SELECT v17, v18, v19, v20; END| ---------------------------------------------------------------------------- +# -------------------------------------------------------------------------- CREATE PROCEDURE sp_vars_check_assignment() BEGIN @@ -89,35 +89,35 @@ BEGIN SELECT d1, d2, d3; END| ---------------------------------------------------------------------------- +# -------------------------------------------------------------------------- CREATE FUNCTION sp_vars_check_ret1() RETURNS TINYINT BEGIN RETURN 1e200; END| ---------------------------------------------------------------------------- +# -------------------------------------------------------------------------- CREATE FUNCTION sp_vars_check_ret2() RETURNS TINYINT BEGIN RETURN 10 * 10 * 10; END| ---------------------------------------------------------------------------- +# -------------------------------------------------------------------------- CREATE FUNCTION sp_vars_check_ret3() RETURNS TINYINT BEGIN RETURN 'Hello, world'; END| ---------------------------------------------------------------------------- +# -------------------------------------------------------------------------- CREATE FUNCTION sp_vars_check_ret4() RETURNS DECIMAL(64, 2) BEGIN RETURN 12 * 10 + 34 + 0.1234; END| ---------------------------------------------------------------------------- +# -------------------------------------------------------------------------- CREATE FUNCTION sp_vars_div_zero() RETURNS INTEGER BEGIN @@ -126,6 +126,6 @@ BEGIN RETURN div_zero; END| ---------------------------------------------------------------------------- +# -------------------------------------------------------------------------- delimiter ;| diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 74ec2a02c12..c06a5a6524a 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -2045,6 +2045,12 @@ sub cleanup_stale_files () { } closedir(DIR); } + + # Remove old log files + foreach my $name (glob("r/*.reject r/*.progress r/*.log r/*.warnings")) + { + unlink($name); + } } @@ -2425,8 +2431,8 @@ sub ndbcluster_start ($$) { sub rm_ndbcluster_tables ($) { my $dir= shift; - foreach my $bin ( glob("$dir/cluster/apply_status*"), - glob("$dir/cluster/schema*") ) + foreach my $bin ( glob("$dir/mysql/apply_status*"), + glob("$dir/mysql/schema*")) { unlink($bin); } @@ -4089,12 +4095,12 @@ sub run_testcase_start_servers($) { # tables ok FIXME This is a workaround so that only one mysqld # create the tables if ( ! sleep_until_file_created( - "$master->[0]->{'path_myddir'}/cluster/apply_status.ndb", + "$master->[0]->{'path_myddir'}/mysql/apply_status.ndb", $master->[0]->{'start_timeout'}, $master->[0]->{'pid'})) { - $tinfo->{'comment'}= "Failed to create 'cluster/apply_status' table"; + $tinfo->{'comment'}= "Failed to create 'mysql/apply_status' table"; return 1; } } diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh index 52e98304aca..9f53d818d2b 100644 --- a/mysql-test/mysql-test-run.sh +++ b/mysql-test/mysql-test-run.sh @@ -182,19 +182,14 @@ if [ -d ./sql ] ; then SOURCE_DIST=1 else BINARY_DIST=1 -fi -# ... one level for tar.gz, two levels for a RPM installation -if [ -d ./bin ] ; then - # this is not perfect: we have - # /usr/share/mysql/ # mysql-test-run is here, so this is "$MYSQL_TEST_DIR" - # /usr/bin/ # with MySQL client programs - # so the existence of "/usr/share/bin/" would make this test fail. - BASEDIR=`pwd` -else - cd .. - BASEDIR=`pwd` + # ... one level for tar.gz, two levels for a RPM installation + if [ ! -f ./bin/mysql_upgrade ] ; then + # Has to be RPM installation + cd .. + fi fi +BASEDIR=`pwd` cd $MYSQL_TEST_DIR MYSQL_TEST_WINDIR=$MYSQL_TEST_DIR @@ -2098,6 +2093,9 @@ then $RM -f $MASTER_MYDDIR/log.* $RM -f $MASTER_MYDDIR"1"/log.* + # Remove old log and reject files + $RM -f r/*.reject r/*.progress r/*.log r/*.warnings + wait_for_master=$SLEEP_TIME_FOR_FIRST_MASTER wait_for_slave=$SLEEP_TIME_FOR_FIRST_SLAVE $ECHO "Installing Test Databases" diff --git a/mysql-test/r/ctype_cp1250_ch.result b/mysql-test/r/ctype_cp1250_ch.result index b55849e4e12..73f415732cd 100644 --- a/mysql-test/r/ctype_cp1250_ch.result +++ b/mysql-test/r/ctype_cp1250_ch.result @@ -1,3 +1,4 @@ +DROP TABLE IF EXISTS t1; SHOW COLLATION LIKE 'cp1250_czech_cs'; Collation Charset Id Default Compiled Sortlen cp1250_czech_cs cp1250 34 Yes 2 diff --git a/mysql-test/t/create.test b/mysql-test/t/create.test index 91c22001b6c..eef6ec85bfa 100644 --- a/mysql-test/t/create.test +++ b/mysql-test/t/create.test @@ -550,7 +550,7 @@ create table t1 ( a varchar(112) charset utf8 collate utf8_bin not null, primary key (a) ) select 'test' as a ; ---warning 1364 +#--warning 1364 show create table t1; drop table t1; @@ -563,7 +563,7 @@ CREATE TABLE t2 ( ); insert into t2 values(111); ---warning 1364 +#--warning 1364 create table t1 ( a varchar(12) charset utf8 collate utf8_bin not null, b int not null, primary key (a) @@ -571,7 +571,7 @@ create table t1 ( show create table t1; drop table t1; ---warning 1364 +#--warning 1364 create table t1 ( a varchar(12) charset utf8 collate utf8_bin not null, b int not null, primary key (a) @@ -579,7 +579,7 @@ create table t1 ( show create table t1; drop table t1; ---warning 1364 +#--warning 1364 create table t1 ( a varchar(12) charset utf8 collate utf8_bin not null, b int null, primary key (a) @@ -587,7 +587,7 @@ create table t1 ( show create table t1; drop table t1; ---warning 1364 +#--warning 1364 create table t1 ( a varchar(12) charset utf8 collate utf8_bin not null, b int not null, primary key (a) @@ -595,7 +595,7 @@ create table t1 ( show create table t1; drop table t1; ---warning 1364 +#--warning 1364 create table t1 ( a varchar(12) charset utf8 collate utf8_bin, b int not null, primary key (a) @@ -609,7 +609,7 @@ create table t1 ( ); insert into t1 values (1,1,1, 1,1,1, 1,1,1); ---warning 1364 +#--warning 1364 create table t2 ( a1 varchar(12) charset utf8 collate utf8_bin not null, a2 int, a3 int, a4 int, a5 int, a6 int, a7 int, a8 int, a9 int, @@ -617,20 +617,20 @@ create table t2 ( ) select a1,a2,a3,a4,a5,a6,a7,a8,a9 from t1 ; drop table t2; ---warning 1364 +#--warning 1364 create table t2 ( a1 varchar(12) charset utf8 collate utf8_bin, a2 int, a3 int, a4 int, a5 int, a6 int, a7 int, a8 int, a9 int ) select a1,a2,a3,a4,a5,a6,a7,a8,a9 from t1; drop table t1, t2; ---warning 1364 +#--warning 1364 create table t1 ( a1 int, a2 int, a3 int, a4 int, a5 int, a6 int, a7 int, a8 int, a9 int ); insert into t1 values (1,1,1, 1,1,1, 1,1,1); ---warning 1364 +#--warning 1364 create table t2 ( a1 varchar(12) charset utf8 collate utf8_bin not null, a2 int, a3 int, a4 int, a5 int, a6 int, a7 int, a8 int, a9 int, diff --git a/mysql-test/t/csv.test b/mysql-test/t/csv.test index 65173cbf355..830bdaa3cf0 100644 --- a/mysql-test/t/csv.test +++ b/mysql-test/t/csv.test @@ -1360,27 +1360,27 @@ DROP TABLE bug14672; create table t1 (a int) engine=csv; insert t1 values (1); --enable_info -delete from t1; -- delete_row -delete from t1; -- delete_all_rows +delete from t1; # delete_row +delete from t1; # delete_all_rows --disable_info insert t1 values (1),(2); --enable_info -delete from t1; -- delete_all_rows +delete from t1; # delete_all_rows --disable_info insert t1 values (1),(2),(3); flush tables; --enable_info -delete from t1; -- delete_row +delete from t1; # delete_row --disable_info insert t1 values (1),(2),(3),(4); flush tables; select count(*) from t1; --enable_info -delete from t1; -- delete_all_rows +delete from t1; # delete_all_rows --disable_info insert t1 values (1),(2),(3),(4),(5); --enable_info -truncate table t1; -- truncate +truncate table t1; # truncate --disable_info drop table t1; diff --git a/mysql-test/t/ctype_collate.test b/mysql-test/t/ctype_collate.test index e59693680bf..aca240b46bc 100644 --- a/mysql-test/t/ctype_collate.test +++ b/mysql-test/t/ctype_collate.test @@ -59,7 +59,7 @@ INSERT INTO t1 (latin1_f) VALUES (_latin1'Z'); INSERT INTO t1 (latin1_f) VALUES (_latin1'z'); --- ORDER BY +# ORDER BY SELECT latin1_f FROM t1 ORDER BY latin1_f; SELECT latin1_f FROM t1 ORDER BY latin1_f COLLATE latin1_swedish_ci; @@ -69,9 +69,9 @@ SELECT latin1_f FROM t1 ORDER BY latin1_f COLLATE latin1_bin; --error 1253 SELECT latin1_f FROM t1 ORDER BY latin1_f COLLATE koi8r_general_ci; ---SELECT latin1_f COLLATE koi8r FROM t1 ; +# SELECT latin1_f COLLATE koi8r FROM t1 ; --- AS + ORDER BY +# AS + ORDER BY SELECT latin1_f COLLATE latin1_swedish_ci AS latin1_f_as FROM t1 ORDER BY latin1_f_as; SELECT latin1_f COLLATE latin1_german2_ci AS latin1_f_as FROM t1 ORDER BY latin1_f_as; SELECT latin1_f COLLATE latin1_general_ci AS latin1_f_as FROM t1 ORDER BY latin1_f_as; @@ -80,7 +80,7 @@ SELECT latin1_f COLLATE latin1_bin AS latin1_f_as FROM t1 ORDER BY latin1 SELECT latin1_f COLLATE koi8r_general_ci AS latin1_f_as FROM t1 ORDER BY latin1_f_as; --- GROUP BY +# GROUP BY SELECT latin1_f,count(*) FROM t1 GROUP BY latin1_f; SELECT latin1_f,count(*) FROM t1 GROUP BY latin1_f COLLATE latin1_swedish_ci; @@ -91,7 +91,7 @@ SELECT latin1_f,count(*) FROM t1 GROUP BY latin1_f COLLATE latin1_bin; SELECT latin1_f,count(*) FROM t1 GROUP BY latin1_f COLLATE koi8r_general_ci; --- DISTINCT +# DISTINCT SELECT DISTINCT latin1_f FROM t1; SELECT DISTINCT latin1_f COLLATE latin1_swedish_ci FROM t1; @@ -102,21 +102,20 @@ SELECT DISTINCT latin1_f COLLATE latin1_bin FROM t1; SELECT DISTINCT latin1_f COLLATE koi8r FROM t1; --- Aggregates ---SELECT MAX(k COLLATE latin1_german2_ci) ---FROM t1 - - --- WHERE ---SELECT * ---FROM t1 ---WHERE (_latin1'Mu"ller' COLLATE latin1_german2_ci) = k - ---HAVING ---SELECT * ---FROM t1 ---HAVING (_latin1'Mu"ller' COLLATE latin1_german2_ci) = k +# Aggregates +--disable_parsing +SELECT MAX(k COLLATE latin1_german2_ci) +FROM t1 +WHERE +SELECT * +FROM t1 +WHERE (_latin1'Mu"ller' COLLATE latin1_german2_ci) = k +HAVING +SELECT * +FROM t1 +HAVING (_latin1'Mu"ller' COLLATE latin1_german2_ci) = k; +--enable_parsing # # Check that SHOW displays COLLATE clause diff --git a/mysql-test/t/ctype_cp1250_ch.test b/mysql-test/t/ctype_cp1250_ch.test index 65550e0c193..86eb8c31d99 100644 --- a/mysql-test/t/ctype_cp1250_ch.test +++ b/mysql-test/t/ctype_cp1250_ch.test @@ -1,5 +1,9 @@ -- source include/have_cp1250_ch.inc +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + SHOW COLLATION LIKE 'cp1250_czech_cs'; # diff --git a/mysql-test/t/ctype_ucs.test b/mysql-test/t/ctype_ucs.test index 6c814368c88..4b1e0e1b059 100644 --- a/mysql-test/t/ctype_ucs.test +++ b/mysql-test/t/ctype_ucs.test @@ -298,7 +298,7 @@ INSERT INTO t1 VALUES (0xA),(0xAA),(0xAAA),(0xAAAA),(0xAAAAA); SELECT HEX(a) FROM t1; DROP TABLE t1; --- the same should be also done with enum and set +# the same should be also done with enum and set # diff --git a/mysql-test/t/func_sapdb.test b/mysql-test/t/func_sapdb.test index 97101fba615..77d7366afe6 100644 --- a/mysql-test/t/func_sapdb.test +++ b/mysql-test/t/func_sapdb.test @@ -35,7 +35,7 @@ SET @@SQL_MODE="ALLOW_INVALID_DATES"; select datediff("1997-11-31 23:59:59.000001","1997-12-31"); SET @@SQL_MODE=""; --- This will give a warning +# This will give a warning select datediff("1997-11-31 23:59:59.000001","1997-12-31"); select datediff("1997-11-30 23:59:59.000001",null); diff --git a/mysql-test/t/func_str.test b/mysql-test/t/func_str.test index 9622de96143..94190ff519f 100644 --- a/mysql-test/t/func_str.test +++ b/mysql-test/t/func_str.test @@ -300,18 +300,26 @@ select POSITION(_latin1'B' COLLATE latin1_general_ci IN _latin1'abcd' COLLATE la select POSITION(_latin1'B' IN _latin2'abcd'); select FIND_IN_SET(_latin1'B',_latin1'a,b,c,d'); ---fix this: ---select FIND_IN_SET(_latin1'B',_latin1'a,b,c,d' COLLATE latin1_bin); ---select FIND_IN_SET(_latin1'B' COLLATE latin1_bin,_latin1'a,b,c,d'); + +# fix this: +--disable_parsing +select FIND_IN_SET(_latin1'B',_latin1'a,b,c,d' COLLATE latin1_bin); +select FIND_IN_SET(_latin1'B' COLLATE latin1_bin,_latin1'a,b,c,d'); +--enable_parsing + --error 1267 select FIND_IN_SET(_latin1'B' COLLATE latin1_general_ci,_latin1'a,b,c,d' COLLATE latin1_bin); --error 1267 select FIND_IN_SET(_latin1'B',_latin2'a,b,c,d'); select SUBSTRING_INDEX(_latin1'abcdabcdabcd',_latin1'd',2); ---fix this: ---select SUBSTRING_INDEX(_latin1'abcdabcdabcd' COLLATE latin1_bin,_latin1'd',2); ---select SUBSTRING_INDEX(_latin1'abcdabcdabcd',_latin1'd' COLLATE latin1_bin,2); + +# fix this: +--disable_parsing +select SUBSTRING_INDEX(_latin1'abcdabcdabcd' COLLATE latin1_bin,_latin1'd',2); +select SUBSTRING_INDEX(_latin1'abcdabcdabcd',_latin1'd' COLLATE latin1_bin,2); +--enable_parsing + --error 1267 select SUBSTRING_INDEX(_latin1'abcdabcdabcd',_latin2'd',2); --error 1267 diff --git a/mysql-test/t/grant.test b/mysql-test/t/grant.test index d3781d58780..2f5e3dced22 100644 --- a/mysql-test/t/grant.test +++ b/mysql-test/t/grant.test @@ -118,7 +118,7 @@ drop table t1; # --error 1221 GRANT FILE on mysqltest.* to mysqltest_1@localhost; -select 1; -- To test that the previous command didn't cause problems +select 1; # To test that the previous command didn't cause problems # # Bug #4898: User privileges depending on ORDER BY Settings of table db diff --git a/mysql-test/t/greedy_optimizer.test b/mysql-test/t/greedy_optimizer.test index e547d85b7f3..049d0ab09f7 100644 --- a/mysql-test/t/greedy_optimizer.test +++ b/mysql-test/t/greedy_optimizer.test @@ -140,18 +140,18 @@ insert into t7 values (21,2,3,4,5,6); select @@optimizer_search_depth; select @@optimizer_prune_level; --- This value swithes back to the old implementation of 'find_best()' --- set optimizer_search_depth=63; - old (independent of the optimizer_prune_level) +# This value swithes back to the old implementation of 'find_best()' +# set optimizer_search_depth=63; - old (independent of the optimizer_prune_level) +# +# These are the values for the parameters that control the greedy optimizer +# (total 6 combinations - 3 for optimizer_search_depth, 2 for optimizer_prune_level): -- --- These are the values for the parameters that control the greedy optimizer --- (total 6 combinations - 3 for optimizer_search_depth, 2 for optimizer_prune_level): +# set optimizer_search_depth=0; - automatic +# set optimizer_search_depth=1; - min +# set optimizer_search_depth=62; - max (default) -- --- set optimizer_search_depth=0; - automatic --- set optimizer_search_depth=1; - min --- set optimizer_search_depth=62; - max (default) --- --- set optimizer_prune_level=0 - exhaustive; --- set optimizer_prune_level=1 - heuristic; -- default +# set optimizer_prune_level=0 - exhaustive; +# set optimizer_prune_level=1 - heuristic; # default # @@ -170,17 +170,17 @@ select @@optimizer_prune_level; set optimizer_search_depth=63; select @@optimizer_search_depth; --- 6-table join, chain +# 6-table join, chain explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71; show status like 'Last_query_cost'; explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71; show status like 'Last_query_cost'; --- 6-table join, star +# 6-table join, star explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71; show status like 'Last_query_cost'; explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71; show status like 'Last_query_cost'; --- 6-table join, clique +# 6-table join, clique explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; show status like 'Last_query_cost'; explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; @@ -195,17 +195,17 @@ select @@optimizer_prune_level; set optimizer_search_depth=0; select @@optimizer_search_depth; --- 6-table join, chain +# 6-table join, chain explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71; show status like 'Last_query_cost'; explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71; show status like 'Last_query_cost'; --- 6-table join, star +# 6-table join, star explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71; show status like 'Last_query_cost'; explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71; show status like 'Last_query_cost'; --- 6-table join, clique +# 6-table join, clique explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; show status like 'Last_query_cost'; explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; @@ -214,17 +214,17 @@ show status like 'Last_query_cost'; set optimizer_search_depth=1; select @@optimizer_search_depth; --- 6-table join, chain +# 6-table join, chain explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71; show status like 'Last_query_cost'; explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71; show status like 'Last_query_cost'; --- 6-table join, star +# 6-table join, star explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71; show status like 'Last_query_cost'; explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71; show status like 'Last_query_cost'; --- 6-table join, clique +# 6-table join, clique explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; show status like 'Last_query_cost'; explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; @@ -233,17 +233,17 @@ show status like 'Last_query_cost'; set optimizer_search_depth=62; select @@optimizer_search_depth; --- 6-table join, chain +# 6-table join, chain explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71; show status like 'Last_query_cost'; explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71; show status like 'Last_query_cost'; --- 6-table join, star +# 6-table join, star explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71; show status like 'Last_query_cost'; explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71; show status like 'Last_query_cost'; --- 6-table join, clique +# 6-table join, clique explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; show status like 'Last_query_cost'; explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; @@ -256,17 +256,17 @@ select @@optimizer_prune_level; set optimizer_search_depth=0; select @@optimizer_search_depth; --- 6-table join, chain +# 6-table join, chain explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71; show status like 'Last_query_cost'; explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71; show status like 'Last_query_cost'; --- 6-table join, star +# 6-table join, star explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71; show status like 'Last_query_cost'; explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71; show status like 'Last_query_cost'; --- 6-table join, clique +# 6-table join, clique explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; show status like 'Last_query_cost'; explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; @@ -275,17 +275,17 @@ show status like 'Last_query_cost'; set optimizer_search_depth=1; select @@optimizer_search_depth; --- 6-table join, chain +# 6-table join, chain explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71; show status like 'Last_query_cost'; explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71; show status like 'Last_query_cost'; --- 6-table join, star +# 6-table join, star explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71; show status like 'Last_query_cost'; explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71; show status like 'Last_query_cost'; --- 6-table join, clique +# 6-table join, clique explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; show status like 'Last_query_cost'; explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; @@ -294,17 +294,17 @@ show status like 'Last_query_cost'; set optimizer_search_depth=62; select @@optimizer_search_depth; --- 6-table join, chain +# 6-table join, chain explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71; show status like 'Last_query_cost'; explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71; show status like 'Last_query_cost'; --- 6-table join, star +# 6-table join, star explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71; show status like 'Last_query_cost'; explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71; show status like 'Last_query_cost'; --- 6-table join, clique +# 6-table join, clique explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; show status like 'Last_query_cost'; explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; diff --git a/mysql-test/t/group_min_max.test b/mysql-test/t/group_min_max.test index 08f0f54df60..8f88c98caa7 100644 --- a/mysql-test/t/group_min_max.test +++ b/mysql-test/t/group_min_max.test @@ -57,8 +57,8 @@ create index idx_t1_1 on t1 (a1,a2,b,c); create index idx_t1_2 on t1 (a1,a2,b); analyze table t1; --- t2 is the same as t1, but with some NULLs in the MIN/MAX column, and one more --- nullable attribute +# t2 is the same as t1, but with some NULLs in the MIN/MAX column, and +# one more nullable attribute --disable_warnings drop table if exists t2; @@ -68,7 +68,7 @@ create table t2 ( a1 char(64), a2 char(64) not null, b char(16), c char(16), d char(16), dummy char(64) default ' ' ); insert into t2 select * from t1; --- add few rows with NULL's in the MIN/MAX column +# add few rows with NULL's in the MIN/MAX column insert into t2 (a1, a2, b, c, d) values ('a','a',NULL,'a777','xyz'),('a','a',NULL,'a888','xyz'),('a','a',NULL,'a999','xyz'), ('a','a','a',NULL,'xyz'), @@ -92,10 +92,10 @@ create index idx_t2_1 on t2 (a1,a2,b,c); create index idx_t2_2 on t2 (a1,a2,b); analyze table t2; --- Table t3 is the same as t1, but with smaller column lenghts. --- This allows to test different branches of the cost computation procedure --- when the number of keys per block are less than the number of keys in the --- sub-groups formed by predicates over non-group attributes. +# Table t3 is the same as t1, but with smaller column lenghts. +# This allows to test different branches of the cost computation procedure +# when the number of keys per block are less than the number of keys in the +# sub-groups formed by predicates over non-group attributes. --disable_warnings drop table if exists t3; @@ -164,11 +164,11 @@ create index idx_t3_2 on t3 (a1,a2,b); analyze table t3; --- --- Queries without a WHERE clause. These queries do not use ranges. --- +# +# Queries without a WHERE clause. These queries do not use ranges. +# --- plans +# plans explain select a1, min(a2) from t1 group by a1; explain select a1, max(a2) from t1 group by a1; explain select a1, min(a2), max(a2) from t1 group by a1; @@ -176,31 +176,31 @@ explain select a1, a2, b, min(c), max(c) from t1 group by a1,a2,b; explain select a1,a2,b,max(c),min(c) from t1 group by a1,a2,b; --replace_column 7 # 9 # explain select a1,a2,b,max(c),min(c) from t2 group by a1,a2,b; --- Select fields in different order +# Select fields in different order explain select min(a2), a1, max(a2), min(a2), a1 from t1 group by a1; explain select a1, b, min(c), a1, max(c), b, a2, max(c), max(c) from t1 group by a1, a2, b; explain select min(a2) from t1 group by a1; explain select a2, min(c), max(c) from t1 group by a1,a2,b; --- queries +# queries select a1, min(a2) from t1 group by a1; select a1, max(a2) from t1 group by a1; select a1, min(a2), max(a2) from t1 group by a1; select a1, a2, b, min(c), max(c) from t1 group by a1,a2,b; select a1,a2,b,max(c),min(c) from t1 group by a1,a2,b; select a1,a2,b,max(c),min(c) from t2 group by a1,a2,b; --- Select fields in different order +# Select fields in different order select min(a2), a1, max(a2), min(a2), a1 from t1 group by a1; select a1, b, min(c), a1, max(c), b, a2, max(c), max(c) from t1 group by a1, a2, b; select min(a2) from t1 group by a1; select a2, min(c), max(c) from t1 group by a1,a2,b; --- --- Queries with a where clause --- +# +# Queries with a where clause +# --- A) Preds only over the group 'A' attributes --- plans +# A) Preds only over the group 'A' attributes +# plans explain select a1,a2,b,min(c),max(c) from t1 where a1 < 'd' group by a1,a2,b; explain select a1,a2,b,min(c),max(c) from t1 where a1 >= 'b' group by a1,a2,b; explain select a1,a2,b, max(c) from t1 where a1 >= 'c' or a1 < 'b' group by a1,a2,b; @@ -238,7 +238,7 @@ explain select a1,min(c),max(c) from t2 where a1 >= 'b' group by a1,a2,b; --replace_column 9 # explain select a1, max(c) from t2 where a1 in ('a','b','d') group by a1,a2,b; --- queries +# queries select a1,a2,b,min(c),max(c) from t1 where a1 < 'd' group by a1,a2,b; select a1,a2,b,min(c),max(c) from t1 where a1 >= 'b' group by a1,a2,b; select a1,a2,b, max(c) from t1 where a1 >= 'c' or a1 < 'b' group by a1,a2,b; @@ -264,8 +264,8 @@ select a1,a2,b,min(c),max(c) from t2 where (a1 = 'b' or a1 = 'd' or a1 = 'a' or select a1,min(c),max(c) from t2 where a1 >= 'b' group by a1,a2,b; select a1, max(c) from t2 where a1 in ('a','b','d') group by a1,a2,b; --- B) Equalities only over the non-group 'B' attributes --- plans +# B) Equalities only over the non-group 'B' attributes +# plans explain select a1,a2,b,max(c),min(c) from t1 where (a2 = 'a') and (b = 'b') group by a1; explain select a1,max(c),min(c) from t1 where (a2 = 'a') and (b = 'b') group by a1; explain select a1,a2,b, max(c) from t1 where (b = 'b') group by a1,a2; @@ -278,11 +278,11 @@ explain select a1,a2,b, max(c) from t2 where (b = 'b') group by a1,a2; explain select a1,a2,b,min(c),max(c) from t2 where (b = 'b') group by a1,a2; explain select a1,a2, max(c) from t2 where (b = 'b') group by a1,a2; --- these queries test case 2) in TRP_GROUP_MIN_MAX::update_cost() +# these queries test case 2) in TRP_GROUP_MIN_MAX::update_cost() explain select a1,a2,b,max(c),min(c) from t3 where (a2 = 'a') and (b = 'b') group by a1; explain select a1,max(c),min(c) from t3 where (a2 = 'a') and (b = 'b') group by a1; --- queries +# queries select a1,a2,b,max(c),min(c) from t1 where (a2 = 'a') and (b = 'b') group by a1; select a1,max(c),min(c) from t1 where (a2 = 'a') and (b = 'b') group by a1; select a1,a2,b, max(c) from t1 where (b = 'b') group by a1,a2; @@ -295,20 +295,20 @@ select a1,a2,b, max(c) from t2 where (b = 'b') group by a1,a2; select a1,a2,b,min(c),max(c) from t2 where (b = 'b') group by a1,a2; select a1,a2, max(c) from t2 where (b = 'b') group by a1,a2; --- these queries test case 2) in TRP_GROUP_MIN_MAX::update_cost() +# these queries test case 2) in TRP_GROUP_MIN_MAX::update_cost() select a1,a2,b,max(c),min(c) from t3 where (a2 = 'a') and (b = 'b') group by a1; select a1,max(c),min(c) from t3 where (a2 = 'a') and (b = 'b') group by a1; --- IS NULL (makes sense for t2 only) --- plans +# IS NULL (makes sense for t2 only) +# plans explain select a1,a2,b,min(c) from t2 where (a2 = 'a') and b is NULL group by a1; explain select a1,a2,b,max(c) from t2 where (a2 = 'a') and b is NULL group by a1; explain select a1,a2,b,min(c) from t2 where b is NULL group by a1,a2; explain select a1,a2,b,max(c) from t2 where b is NULL group by a1,a2; explain select a1,a2,b,min(c),max(c) from t2 where b is NULL group by a1,a2; explain select a1,a2,b,min(c),max(c) from t2 where b is NULL group by a1,a2; --- queries +# queries select a1,a2,b,min(c) from t2 where (a2 = 'a') and b is NULL group by a1; select a1,a2,b,max(c) from t2 where (a2 = 'a') and b is NULL group by a1; select a1,a2,b,min(c) from t2 where b is NULL group by a1,a2; @@ -316,8 +316,8 @@ select a1,a2,b,max(c) from t2 where b is NULL group by a1,a2; select a1,a2,b,min(c),max(c) from t2 where b is NULL group by a1,a2; select a1,a2,b,min(c),max(c) from t2 where b is NULL group by a1,a2; --- C) Range predicates for the MIN/MAX attribute --- plans +# C) Range predicates for the MIN/MAX attribute +# plans --replace_column 9 # explain select a1,a2,b, max(c) from t1 where (c > 'b1') group by a1,a2,b; explain select a1,a2,b,min(c),max(c) from t1 where (c > 'b1') group by a1,a2,b; @@ -367,7 +367,7 @@ explain select a1,a2,b,min(c),max(c) from t2 where (c < 'c5') or (c = 'g412') or --replace_column 9 # explain select a1,a2,b,min(c),max(c) from t2 where ((c > 'b111') and (c <= 'g112')) or ((c > 'd000') and (c <= 'i110')) group by a1,a2,b; --- queries +# queries select a1,a2,b, max(c) from t1 where (c > 'b1') group by a1,a2,b; select a1,a2,b,min(c),max(c) from t1 where (c > 'b1') group by a1,a2,b; select a1,a2,b, max(c) from t1 where (c > 'f123') group by a1,a2,b; @@ -401,19 +401,19 @@ select a1,a2,b,min(c),max(c) from t2 where (c > 'b111') and (c <= 'g112') group select a1,a2,b,min(c),max(c) from t2 where (c < 'c5') or (c = 'g412') or (c = 'k421') group by a1,a2,b; select a1,a2,b,min(c),max(c) from t2 where ((c > 'b111') and (c <= 'g112')) or ((c > 'd000') and (c <= 'i110')) group by a1,a2,b; --- analyze the sub-select +# analyze the sub-select explain select a1,a2,b,min(c),max(c) from t1 where exists ( select * from t2 where t2.c = t1.c ) group by a1,a2,b; --- the sub-select is unrelated to MIN/MAX +# the sub-select is unrelated to MIN/MAX explain select a1,a2,b,min(c),max(c) from t1 where exists ( select * from t2 where t2.c > 'b1' ) group by a1,a2,b; --- A,B,C) Predicates referencing mixed classes of attributes --- plans +# A,B,C) Predicates referencing mixed classes of attributes +# plans explain select a1,a2,b,min(c),max(c) from t1 where (a1 >= 'c' or a2 < 'b') and (b > 'a') group by a1,a2,b; explain select a1,a2,b,min(c),max(c) from t1 where (a1 >= 'c' or a2 < 'b') and (c > 'b111') group by a1,a2,b; explain select a1,a2,b,min(c),max(c) from t1 where (a2 >= 'b') and (b = 'a') and (c > 'b111') group by a1,a2,b; @@ -435,7 +435,7 @@ explain select a1,a2,b,min(c) from t2 where ((a1 > 'a') or (a1 < '9')) and ((a2 --replace_column 9 # explain select a1,a2,b,min(c) from t2 where (a1 > 'a') and (a2 > 'a') and (b = 'c') group by a1,a2,b; --- queries +# queries select a1,a2,b,min(c),max(c) from t1 where (a1 >= 'c' or a2 < 'b') and (b > 'a') group by a1,a2,b; select a1,a2,b,min(c),max(c) from t1 where (a1 >= 'c' or a2 < 'b') and (c > 'b111') group by a1,a2,b; select a1,a2,b,min(c),max(c) from t1 where (a2 >= 'b') and (b = 'a') and (c > 'b111') group by a1,a2,b; @@ -452,11 +452,11 @@ select a1,a2,b,min(c) from t2 where ((a1 > 'a') or (a1 < '9')) and ((a2 >= 'b') select a1,a2,b,min(c) from t2 where (a1 > 'a') and (a2 > 'a') and (b = 'c') group by a1,a2,b; --- --- GROUP BY queries without MIN/MAX --- +# +# GROUP BY queries without MIN/MAX +# --- plans +# plans explain select a1,a2,b from t1 where (a1 >= 'c' or a2 < 'b') and (b > 'a') group by a1,a2,b; explain select a1,a2,b from t1 where (a2 >= 'b') and (b = 'a') group by a1,a2,b; explain select a1,a2,b,c from t1 where (a2 >= 'b') and (b = 'a') and (c = 'i121') group by a1,a2,b; @@ -471,7 +471,7 @@ explain select a1,a2,b,c from t2 where (a2 >= 'b') and (b = 'a') and (c = 'i121' --replace_column 9 # explain select a1,a2,b from t2 where (a1 > 'a') and (a2 > 'a') and (b = 'c') group by a1,a2,b; --- queries +# queries select a1,a2,b from t1 where (a1 >= 'c' or a2 < 'b') and (b > 'a') group by a1,a2,b; select a1,a2,b from t1 where (a2 >= 'b') and (b = 'a') group by a1,a2,b; select a1,a2,b,c from t1 where (a2 >= 'b') and (b = 'a') and (c = 'i121') group by a1,a2,b; @@ -482,11 +482,11 @@ select a1,a2,b from t2 where (a2 >= 'b') and (b = 'a') group by a1,a2,b; select a1,a2,b,c from t2 where (a2 >= 'b') and (b = 'a') and (c = 'i121') group by a1,a2,b; select a1,a2,b from t2 where (a1 > 'a') and (a2 > 'a') and (b = 'c') group by a1,a2,b; --- --- DISTINCT queries --- +# +# DISTINCT queries +# --- plans +# plans explain select distinct a1,a2,b from t1; explain select distinct a1,a2,b from t1 where (a2 >= 'b') and (b = 'a'); explain select distinct a1,a2,b,c from t1 where (a2 >= 'b') and (b = 'a') and (c = 'i121'); @@ -502,7 +502,7 @@ explain select distinct a1,a2,b,c from t2 where (a2 >= 'b') and (b = 'a') and (c explain select distinct a1,a2,b from t2 where (a1 > 'a') and (a2 > 'a') and (b = 'c'); explain select distinct b from t2 where (a2 >= 'b') and (b = 'a'); --- queries +# queries select distinct a1,a2,b from t1; select distinct a1,a2,b from t1 where (a2 >= 'b') and (b = 'a'); select distinct a1,a2,b,c from t1 where (a2 >= 'b') and (b = 'a') and (c = 'i121'); @@ -515,22 +515,22 @@ select distinct a1,a2,b,c from t2 where (a2 >= 'b') and (b = 'a') and (c = 'i121 select distinct a1,a2,b from t2 where (a1 > 'a') and (a2 > 'a') and (b = 'c'); select distinct b from t2 where (a2 >= 'b') and (b = 'a'); --- BUG #6303 +# BUG #6303 select distinct t_00.a1 from t1 t_00 where exists ( select * from t2 where a1 = t_00.a1 ); --- BUG #8532 - SELECT DISTINCT a, a causes server to crash +# BUG #8532 - SELECT DISTINCT a, a causes server to crash select distinct a1,a1 from t1; select distinct a2,a1,a2,a1 from t1; select distinct t1.a1,t2.a1 from t1,t2; --- --- DISTINCT queries with GROUP-BY --- +# +# DISTINCT queries with GROUP-BY +# --- plans +# plans explain select distinct a1,a2,b from t1; explain select distinct a1,a2,b from t1 where (a2 >= 'b') and (b = 'a') group by a1,a2,b; explain select distinct a1,a2,b,c from t1 where (a2 >= 'b') and (b = 'a') and (c = 'i121') group by a1,a2,b; @@ -548,7 +548,7 @@ explain select distinct a1,a2,b from t2 where (a1 > 'a') and (a2 > 'a') and (b = --replace_column 9 # explain select distinct b from t2 where (a2 >= 'b') and (b = 'a') group by a1,a2,b; --- queries +# queries select distinct a1,a2,b from t1; select distinct a1,a2,b from t1 where (a2 >= 'b') and (b = 'a') group by a1,a2,b; select distinct a1,a2,b,c from t1 where (a2 >= 'b') and (b = 'a') and (c = 'i121') group by a1,a2,b; @@ -562,9 +562,9 @@ select distinct a1,a2,b from t2 where (a1 > 'a') and (a2 > 'a') and (b = 'c') gr select distinct b from t2 where (a2 >= 'b') and (b = 'a') group by a1,a2,b; --- --- COUNT (DISTINCT cols) queries --- +# +# COUNT (DISTINCT cols) queries +# explain select count(distinct a1,a2,b) from t1 where (a2 >= 'b') and (b = 'a'); explain select count(distinct a1,a2,b,c) from t1 where (a2 >= 'b') and (b = 'a') and (c = 'i121'); @@ -578,9 +578,9 @@ select count(distinct a1,a2,b) from t1 where (a1 > 'a') and (a2 > 'a') and (b = select count(distinct b) from t1 where (a2 >= 'b') and (b = 'a'); select ord(a1) + count(distinct a1,a2,b) from t1 where (a1 > 'a') and (a2 > 'a'); --- --- Queries with expressions in the select clause --- +# +# Queries with expressions in the select clause +# explain select a1,a2,b, concat(min(c), max(c)) from t1 where a1 < 'd' group by a1,a2,b; explain select concat(a1,min(c)),b from t1 where a1 < 'd' group by a1,a2,b; @@ -595,48 +595,48 @@ select concat(a1,a2),b,min(c),max(c) from t1 where a1 < 'd' group by a1,a2,b; select concat(ord(min(b)),ord(max(b))),min(b),max(b) from t1 group by a1,a2; --- --- Negative examples: queries that should NOT be treated as optimizable by --- QUICK_GROUP_MIN_MAX_SELECT --- +# +# Negative examples: queries that should NOT be treated as optimizable by +# QUICK_GROUP_MIN_MAX_SELECT +# --- select a non-indexed attribute +# select a non-indexed attribute explain select a1,a2,b,d,min(c),max(c) from t1 group by a1,a2,b; explain select a1,a2,b,d from t1 group by a1,a2,b; --- predicate that references an attribute that is after the MIN/MAX argument --- in the index +# predicate that references an attribute that is after the MIN/MAX argument +# in the index explain select a1,a2,min(b),max(b) from t1 where (a1 = 'b' or a1 = 'd' or a1 = 'a' or a1 = 'c') and (a2 > 'a') and (c > 'a111') group by a1,a2; --- predicate that references a non-indexed attribute +# predicate that references a non-indexed attribute explain select a1,a2,b,min(c),max(c) from t1 where (a1 = 'b' or a1 = 'd' or a1 = 'a' or a1 = 'c') and (a2 > 'a') and (d > 'xy2') group by a1,a2,b; explain select a1,a2,b,c from t1 where (a1 = 'b' or a1 = 'd' or a1 = 'a' or a1 = 'c') and (a2 > 'a') and (d > 'xy2') group by a1,a2,b,c; --- non-equality predicate for a non-group select attribute +# non-equality predicate for a non-group select attribute explain select a1,a2,b,max(c),min(c) from t2 where (a2 = 'a') and (b = 'b') or (b < 'b') group by a1; explain select a1,a2,b from t1 where (a1 = 'b' or a1 = 'd' or a1 = 'a' or a1 = 'c') and (a2 > 'a') and (c > 'a111') group by a1,a2,b; --- non-group field with an equality predicate that references a keypart after the --- MIN/MAX argument +# non-group field with an equality predicate that references a keypart after the +# MIN/MAX argument explain select a1,a2,min(b),c from t2 where (a2 = 'a') and (c = 'a111') group by a1; select a1,a2,min(b),c from t2 where (a2 = 'a') and (c = 'a111') group by a1; --- disjunction for a non-group select attribute +# disjunction for a non-group select attribute explain select a1,a2,b,max(c),min(c) from t2 where (a2 = 'a') and (b = 'b') or (b = 'a') group by a1; --- non-range predicate for the MIN/MAX attribute +# non-range predicate for the MIN/MAX attribute explain select a1,a2,b,min(c),max(c) from t2 where (c > 'a000') and (c <= 'd999') and (c like '_8__') group by a1,a2,b; --- not all attributes are indexed by one index +# not all attributes are indexed by one index explain select a1, a2, b, c, min(d), max(d) from t1 group by a1,a2,b,c; --- other aggregate functions than MIN/MAX +# other aggregate functions than MIN/MAX explain select a1,a2,count(a2) from t1 group by a1,a2,b; explain select a1,a2,count(a2) from t1 where (a1 > 'a') group by a1,a2,b; explain select sum(ord(a1)) from t1 where (a1 > 'a') group by a1,a2,b; @@ -765,24 +765,24 @@ INSERT INTO t4 VALUES(1); INSERT INTO t5 VALUES(1,1); INSERT INTO t6 VALUES(1); --- original bug query +# original bug query SELECT * FROM t1 NATURAL JOIN (t2 JOIN (t3 NATURAL JOIN t4, t5 NATURAL JOIN t6) ON (t3.id3 = t2.id3 AND t5.id5 = t2.id5)); --- inner join swapped +# inner join swapped SELECT * FROM t1 NATURAL JOIN (((t3 NATURAL JOIN t4) join (t5 NATURAL JOIN t6) on t3.id4 = t5.id5) JOIN t2 ON (t3.id3 = t2.id3 AND t5.id5 = t2.id5)); --- one join less, no ON cond +# one join less, no ON cond SELECT * FROM t1 NATURAL JOIN ((t3 join (t5 NATURAL JOIN t6)) JOIN t2); --- wrong error message: 'id2' - ambiguous column +# wrong error message: 'id2' - ambiguous column SELECT * FROM (t2 JOIN (t3 NATURAL JOIN t4, t5 NATURAL JOIN t6) ON (t3.id3 = t2.id3 AND t5.id5 = t2.id5)) diff --git a/mysql-test/t/innodb.test b/mysql-test/t/innodb.test index 0c083ccdfd3..4a1efc9e566 100644 --- a/mysql-test/t/innodb.test +++ b/mysql-test/t/innodb.test @@ -1482,7 +1482,7 @@ INSERT INTO t1 (id) VALUES (NULL); SELECT * FROM t1; DROP TABLE t2, t1; --- Test that foreign keys in temporary tables are not accepted (bug #12084) +# Test that foreign keys in temporary tables are not accepted (bug #12084) CREATE TABLE t1 ( id INT PRIMARY KEY diff --git a/mysql-test/t/join.test b/mysql-test/t/join.test index 27558a31d68..4a07f495a55 100644 --- a/mysql-test/t/join.test +++ b/mysql-test/t/join.test @@ -362,38 +362,38 @@ insert into t4 values (2, 3); insert into t5 values (11,4); insert into t6 values (2, 3); --- Views with simple natural join. +# Views with simple natural join. create algorithm=merge view v1a as select * from t1 natural join t2; --- as above, but column names are cross-renamed: a->c, c->b, b->a +# as above, but column names are cross-renamed: a->c, c->b, b->a create algorithm=merge view v1b(a,b,c) as select * from t1 natural join t2; --- as above, but column names are aliased: a->c, c->b, b->a +# as above, but column names are aliased: a->c, c->b, b->a create algorithm=merge view v1c as select b as a, c as b, a as c from t1 natural join t2; --- as above, but column names are cross-renamed, and aliased --- a->c->b, c->b->a, b->a->c +# as above, but column names are cross-renamed, and aliased +# a->c->b, c->b->a, b->a->c create algorithm=merge view v1d(b, a, c) as select a as c, c as b, b as a from t1 natural join t2; --- Views with JOIN ... ON +# Views with JOIN ... ON create algorithm=merge view v2a as select t1.c, t1.b, t2.a from t1 join (t2 join t4 on b + 1 = y) on t1.c = t4.c; create algorithm=merge view v2b as select t1.c as b, t1.b as a, t2.a as c from t1 join (t2 join t4 on b + 1 = y) on t1.c = t4.c; --- Views with bigger natural join +# Views with bigger natural join create algorithm=merge view v3a as select * from t1 natural join t2 natural join t3; create algorithm=merge view v3b as select * from t1 natural join (t2 natural join t3); --- View over views with mixed natural join and join ... on +# View over views with mixed natural join and join ... on create algorithm=merge view v4 as select * from v2a natural join v3a; --- Nested natural/using joins. +# Nested natural/using joins. select * from (t1 natural join t2) natural join (t3 natural join t4); select * from (t1 natural join t2) natural left join (t3 natural join t4); select * from (t3 natural join t4) natural right join (t1 natural join t2); @@ -402,12 +402,12 @@ select * from (t4 natural right join t3) natural right join (t2 natural right jo select * from t1 natural join t2 natural join t3 natural join t4; select * from ((t1 natural join t2) natural join t3) natural join t4; select * from t1 natural join (t2 natural join (t3 natural join t4)); --- BUG#15355: this query fails in 'prepared statements' mode --- select * from ((t3 natural join (t1 natural join t2)) natural join t4) natural join t5; --- select * from ((t3 natural left join (t1 natural left join t2)) natural left join t4) natural left join t5; +# BUG#15355: this query fails in 'prepared statements' mode +# select * from ((t3 natural join (t1 natural join t2)) natural join t4) natural join t5; +# select * from ((t3 natural left join (t1 natural left join t2)) natural left join t4) natural left join t5; select * from t5 natural right join (t4 natural right join ((t2 natural right join t1) natural right join t3)); select * from (t1 natural join t2), (t3 natural join t4); --- MySQL extension - nested comma ',' operator instead of cross join. +# MySQL extension - nested comma ',' operator instead of cross join. select * from t5 natural join ((t1 natural join t2), (t3 natural join t4)); select * from ((t1 natural join t2), (t3 natural join t4)) natural join t5; select * from t5 natural join ((t1 natural join t2) cross join (t3 natural join t4)); @@ -417,7 +417,7 @@ select * from (t1 join t2 using (b)) join (t3 join t4 using (c)) using (c); select * from (t1 join t2 using (b)) natural join (t3 join t4 using (c)); --- Other clauses refer to NJ columns. +# Other clauses refer to NJ columns. select a,b,c from (t1 natural join t2) natural join (t3 natural join t4) where b + 1 = y or b + 10 = y group by b,c,a having min(b) < max(y) order by a; select * from (t1 natural join t2) natural left join (t3 natural join t4) @@ -425,23 +425,23 @@ where b + 1 = y or b + 10 = y group by b,c,a,y having min(b) < max(y) order by a select * from (t3 natural join t4) natural right join (t1 natural join t2) where b + 1 = y or b + 10 = y group by b,c,a,y having min(b) < max(y) order by a, y; --- Qualified column references to NJ columns. +# Qualified column references to NJ columns. select * from t1 natural join t2 where t1.c > t2.a; select * from t1 natural join t2 where t1.b > t2.b; select * from t1 natural left join (t4 natural join t5) where t5.z is not NULL; --- Nested 'join ... on' - name resolution of ON conditions +# Nested 'join ... on' - name resolution of ON conditions select * from t1 join (t2 join t4 on b + 1 = y) on t1.c = t4.c; select * from (t2 join t4 on b + 1 = y) join t1 on t1.c = t4.c; select * from t1 natural join (t2 join t4 on b + 1 = y); select * from (t1 cross join t2) join (t3 cross join t4) on (a < y and t2.b < t3.c); --- MySQL extension - 'join ... on' over nested comma operator +# MySQL extension - 'join ... on' over nested comma operator select * from (t1, t2) join (t3, t4) on (a < y and t2.b < t3.c); select * from (t1 natural join t2) join (t3 natural join t4) on a = y; select * from ((t3 join (t1 join t2 on c > a) on t3.b < t2.a) join t4 on y > t1.c) join t5 on z = t1.b + 3; --- MySQL extension - refererence qualified coalesced columns +# MySQL extension - refererence qualified coalesced columns select * from t1 natural join t2 where t1.b > 0; select * from t1 natural join (t4 natural join t5) where t4.y > 7; select * from (t4 natural join t5) natural join t1 where t4.y > 7; @@ -449,11 +449,11 @@ select * from t1 natural left join (t4 natural join t5) where t4.y > 7; select * from (t4 natural join t5) natural right join t1 where t4.y > 7; select * from (t1 natural join t2) join (t3 natural join t4) on t1.b = t3.b; --- MySQL extension - select qualified columns of NJ columns +# MySQL extension - select qualified columns of NJ columns select t1.*, t2.* from t1 natural join t2; select t1.*, t2.*, t3.*, t4.* from (t1 natural join t2) natural join (t3 natural join t4); --- Queries over subselects in the FROM clause +# Queries over subselects in the FROM clause select * from (select * from t1 natural join t2) as t12 natural join (select * from t3 natural join t4) as t34; @@ -464,7 +464,7 @@ select * from (select * from t3 natural join t4) as t34 natural right join (select * from t1 natural join t2) as t12; --- Queries over views +# Queries over views select * from v1a; select * from v1b; select * from v1c; @@ -481,13 +481,13 @@ select * from v1c join v2a on v1c.b = v2a.c; select * from v1d join v2a on v1d.a = v2a.c; select * from v1a join (t3 natural join t4) on a = y; --- TODO: add tests with correlated subqueries for natural join/join on. --- related to BUG#15269 +# TODO: add tests with correlated subqueries for natural join/join on. +# related to BUG#15269 ----------------------------------------------------------------------- --- Negative tests (tests for errors) ----------------------------------------------------------------------- +#-------------------------------------------------------------------- +# Negative tests (tests for errors) +#-------------------------------------------------------------------- -- error 1052 select * from t1 natural join (t3 cross join t4); -- works in Oracle - bug -- error 1052 @@ -504,7 +504,7 @@ select * from t6 natural join ((t1 natural join t2), (t3 natural join t4)); select * from (t1 join t2 on t1.b=t2.b) natural join (t3 natural join t4); -- error 1052 select * from (t3 natural join t4) natural join (t1 join t2 on t1.b=t2.b); --- this one is OK, the next equivalent one is incorrect (bug in Oracle) +# this one is OK, the next equivalent one is incorrect (bug in Oracle) -- error 1052 select * from (t3 join (t4 natural join t5) on (b < z)) natural join @@ -578,12 +578,12 @@ insert into t3 values (2,3); insert into t4 values (1,3); insert into t5 values (1,4); --- this fails +# this fails prepare stmt1 from "select * from ((t3 natural join (t1 natural join t2)) natural join t4) natural join t5"; execute stmt1; --- this works +# this works select * from ((t3 natural join (t1 natural join t2)) natural join t4) natural join t5; drop table t1, t2, t3, t4, t5; diff --git a/mysql-test/t/limit.test b/mysql-test/t/limit.test index cf7789428b2..2eb4e6cbbb2 100644 --- a/mysql-test/t/limit.test +++ b/mysql-test/t/limit.test @@ -7,7 +7,7 @@ drop table if exists t1; --enable_warnings create table t1 (a int not null default 0 primary key, b int not null default 0); -insert into t1 () values (); -- Testing default values +insert into t1 () values (); # Testing default values insert into t1 values (1,1),(2,1),(3,1); update t1 set a=4 where b=1 limit 1; select * from t1; diff --git a/mysql-test/t/null.test b/mysql-test/t/null.test index 4aec745f3f7..65e09b006ec 100644 --- a/mysql-test/t/null.test +++ b/mysql-test/t/null.test @@ -177,7 +177,7 @@ drop table t1; # non-null string collation, i.e. case insensitively, # rather than according to NULL's collation, i.e. case sensitively # --- in field +# in field select case 'str' when 'STR' then 'str' when null then 'null' end as c01, case 'str' when null then 'null' when 'STR' then 'str' end as c02, diff --git a/mysql-test/t/select.test b/mysql-test/t/select.test index 0f096d97d25..0c82cef867f 100644 --- a/mysql-test/t/select.test +++ b/mysql-test/t/select.test @@ -2700,7 +2700,7 @@ insert into t2 values ('58013'),('58014'),('58015'),('58016'); create table t3 (a_id int(11) not null, b_id char(16) character set utf8); insert into t3 values (123,null),(123,null),(123,null),(123,null),(123,null),(123,'58013'); --- both queries are equivalent +# both queries are equivalent select count(*) from t1 inner join (t3 left join t2 on t2.id = t3.b_id) on t1.id = t3.a_id; @@ -2940,7 +2940,7 @@ create table t2 ( insert into t1 (b,c) values (0,1), (0,1); insert into t2 (b,c) values (0,1); --- Row 1 should succeed. Row 2 should fail. Both fail. +# Row 1 should succeed. Row 2 should fail. Both fail. select t1.a, t1.b + 0, t1.c + 0, t2.a, t2.b + 0, t2.c, t2.d from t1 left outer join t2 on t1.a = t2.c and t2.b <> 1 where t1.b <> 1 order by t1.a; diff --git a/mysql-test/t/sp-prelocking.test b/mysql-test/t/sp-prelocking.test index b94de6236d3..cc3e3b93e06 100644 --- a/mysql-test/t/sp-prelocking.test +++ b/mysql-test/t/sp-prelocking.test @@ -209,7 +209,7 @@ select f3() // call sp1() // ---------------- +# --------------- drop procedure sp1// drop function f3// diff --git a/mysql-test/t/strict.test b/mysql-test/t/strict.test index 6ebbb53ed8e..224a7422de1 100644 --- a/mysql-test/t/strict.test +++ b/mysql-test/t/strict.test @@ -327,14 +327,14 @@ INSERT INTO t1 (col2) VALUES(CAST('0000-00-00' AS DATETIME)); # SQLSTATE 22007 --error 1292 INSERT INTO t1 (col3) VALUES(CAST('0000-10-31 15:30' AS DATETIME)); --- should return OK --- We accept this to be a failure +# should return OK +# We accept this to be a failure --error 1292 INSERT INTO t1 (col3) VALUES(CAST('2004-10-0 15:30' AS DATETIME)); --error 1292 INSERT INTO t1 (col3) VALUES(CAST('2004-0-10 15:30' AS DATETIME)); --- should return SQLSTATE 22007 +# should return SQLSTATE 22007 # deactivated because of Bug#8294 # Bug#8294 Traditional: Misleading error message for invalid CAST to DATE @@ -422,8 +422,8 @@ INSERT INTO t1 (col2) VALUES(CONVERT('0000-00-00',DATETIME)); # SQLSTATE 22007 --error 1292 INSERT INTO t1 (col3) VALUES(CONVERT('0000-10-31 15:30',DATETIME)); --- should return OK --- We accept this to be a failure +# should return OK +# We accept this to be a failure --error 1292 INSERT INTO t1 (col3) VALUES(CONVERT('2004-10-0 15:30',DATETIME)); @@ -729,11 +729,11 @@ DROP TABLE t1; CREATE TABLE t1 (col1 NUMERIC(4,2)); INSERT INTO t1 VALUES (10.55),(10.5555),(0),(-10.55),(-10.5555),(11),(1e+01); --- Note that the +/-10.5555 is inserted as +/-10.55, not +/-10.56 ! +# Note that the +/-10.5555 is inserted as +/-10.55, not +/-10.56 ! INSERT INTO t1 VALUES ('10.55'),('10.5555'),('-10.55'),('-10.5555'),('11'),('1e+01'); --- The 2 following inserts should generate a warning, but doesn't yet --- because NUMERIC works like DECIMAL +# The 2 following inserts should generate a warning, but doesn't yet +# because NUMERIC works like DECIMAL --error 1264 INSERT INTO t1 VALUES (101.55); --error 1264 @@ -744,8 +744,8 @@ INSERT INTO t1 VALUES (-101.55); INSERT INTO t1 VALUES (1010.55); --error 1264 INSERT INTO t1 VALUES (1010); --- The 2 following inserts should generate a warning, but doesn't yet --- because NUMERIC works like DECIMAL +# The 2 following inserts should generate a warning, but doesn't yet +# because NUMERIC works like DECIMAL --error 1264 INSERT INTO t1 VALUES ('101.55'); --error 1264 diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test index dee5b1e4fb0..b68cd225a08 100644 --- a/mysql-test/t/subselect.test +++ b/mysql-test/t/subselect.test @@ -108,7 +108,7 @@ select * from t3 where a in (select a,b from t2); -- error 1241 select * from t3 where a in (select * from t2); insert into t4 values (12,7),(1,7),(10,9),(9,6),(7,6),(3,9),(1,10); --- empty set +# empty set select b,max(a) as ma from t4 group by b having b < (select max(t2.a) from t2 where t2.b=t4.b); insert into t2 values (2,10); select b,max(a) as ma from t4 group by b having ma < (select max(t2.a) from t2 where t2.b=t4.b); @@ -2247,11 +2247,11 @@ drop table t1; # Bug#19700: subselect returning BIGINT always returned it as SIGNED # CREATE TABLE t1 (i BIGINT UNSIGNED); -INSERT INTO t1 VALUES (10000000000000000000); -- > MAX SIGNED BIGINT 9323372036854775807 +INSERT INTO t1 VALUES (10000000000000000000); # > MAX SIGNED BIGINT 9323372036854775807 INSERT INTO t1 VALUES (1); CREATE TABLE t2 (i BIGINT UNSIGNED); -INSERT INTO t2 VALUES (10000000000000000000); -- same as first table +INSERT INTO t2 VALUES (10000000000000000000); # same as first table INSERT INTO t2 VALUES (1); /* simple test */ diff --git a/mysql-test/t/type_newdecimal.test b/mysql-test/t/type_newdecimal.test index e4843c3b83e..398eeb5b740 100644 --- a/mysql-test/t/type_newdecimal.test +++ b/mysql-test/t/type_newdecimal.test @@ -613,7 +613,7 @@ select truncate(99.999999999999999999999999999999999999,31); #-- should return 99.9999999999999999999999999999999 # select truncate(99999999999999999999999999999999999999,-31); --- should return 90000000000000000000000000000000 +# should return 90000000000000000000000000000000 # #-- 6. Set functions (AVG, SUM, COUNT) should work. # @@ -810,7 +810,7 @@ select 1 / 0; #BUG#6048 Stored procedure causes operating system reboot #BUG#6053 DOUBLE PRECISION literal --- Tests from 'traditional' mode tests +# Tests from 'traditional' mode tests # set sql_mode='ansi,traditional'; # diff --git a/mysql-test/t/view_grant.test b/mysql-test/t/view_grant.test index 8bc34cfe148..4a3a29e3afe 100644 --- a/mysql-test/t/view_grant.test +++ b/mysql-test/t/view_grant.test @@ -800,7 +800,7 @@ DROP DATABASE mysqltest1; CREATE TABLE t1 (a INT PRIMARY KEY); INSERT INTO t1 VALUES (1), (2), (3); CREATE DEFINER = 'no-such-user'@localhost VIEW v AS SELECT a from t1; ---warning 1448 +#--warning 1448 SHOW CREATE VIEW v; --error 1449 SELECT * FROM v; diff --git a/mysys/default.c b/mysys/default.c index 540968d4ba0..d93f4135e73 100644 --- a/mysys/default.c +++ b/mysys/default.c @@ -484,7 +484,7 @@ static int search_default_file(Process_option_func opt_handler, my_bool have_ext= fn_ext(config_file)[0] != 0; const char **exts_to_use= have_ext ? empty_list : f_extensions; - for (ext= (char**) exts_to_use; *ext; *ext++) + for (ext= (char**) exts_to_use; *ext; ext++) { int error; if ((error= search_default_file_with_ext(opt_handler, handler_ctx, @@ -672,7 +672,7 @@ static int search_default_file_with_ext(Process_option_func opt_handler, ext= fn_ext(search_file->name); /* check extension */ - for (tmp_ext= (char**) f_extensions; *tmp_ext; *tmp_ext++) + for (tmp_ext= (char**) f_extensions; *tmp_ext; tmp_ext++) { if (!strcmp(ext, *tmp_ext)) break; @@ -861,7 +861,7 @@ void my_print_default_files(const char *conf_file) { for (dirs=default_directories ; *dirs; dirs++) { - for (ext= (char**) exts_to_use; *ext; *ext++) + for (ext= (char**) exts_to_use; *ext; ext++) { const char *pos; char *end; diff --git a/mysys/hash.c b/mysys/hash.c index 99479ef6769..9a268a7e218 100644 --- a/mysys/hash.c +++ b/mysys/hash.c @@ -53,7 +53,7 @@ _hash_init(HASH *hash,CHARSET_INFO *charset, void (*free_element)(void*),uint flags CALLER_INFO_PROTO) { DBUG_ENTER("hash_init"); - DBUG_PRINT("enter",("hash: 0x%lx size: %d",hash,size)); + DBUG_PRINT("enter",("hash: 0x%lx size: %d", (long) hash, size)); hash->records=0; if (my_init_dynamic_array_ci(&hash->array,sizeof(HASH_LINK),size,0)) @@ -109,7 +109,7 @@ static inline void hash_free_elements(HASH *hash) void hash_free(HASH *hash) { DBUG_ENTER("hash_free"); - DBUG_PRINT("enter",("hash: 0x%lxd",hash)); + DBUG_PRINT("enter",("hash: 0x%lxd", (long) hash)); hash_free_elements(hash); hash->free= 0; @@ -129,7 +129,7 @@ void hash_free(HASH *hash) void my_hash_reset(HASH *hash) { DBUG_ENTER("my_hash_reset"); - DBUG_PRINT("enter",("hash: 0x%lxd",hash)); + DBUG_PRINT("enter",("hash: 0x%lxd", (long) hash)); hash_free_elements(hash); reset_dynamic(&hash->array); @@ -644,7 +644,8 @@ my_bool hash_check(HASH *hash) if ((rec_link=hash_rec_mask(hash,hash_info,blength,records)) != i) { DBUG_PRINT("error", - ("Record in wrong link at %d: Start %d Record: 0x%lx Record-link %d", idx,i,hash_info->data,rec_link)); + ("Record in wrong link at %d: Start %d Record: 0x%lx Record-link %d", + idx, i, (long) hash_info->data, rec_link)); error=1; } else @@ -655,12 +656,12 @@ my_bool hash_check(HASH *hash) } if (found != records) { - DBUG_PRINT("error",("Found %ld of %ld records")); + DBUG_PRINT("error",("Found %u of %u records", found, records)); error=1; } if (records) DBUG_PRINT("info", - ("records: %ld seeks: %d max links: %d hitrate: %.2f", + ("records: %u seeks: %d max links: %d hitrate: %.2f", records,seek,max_links,(float) seek / (float) records)); return error; } diff --git a/mysys/list.c b/mysys/list.c index 0e55c9399f5..c4ce5b5e36f 100644 --- a/mysys/list.c +++ b/mysys/list.c @@ -28,7 +28,7 @@ LIST *list_add(LIST *root, LIST *element) { DBUG_ENTER("list_add"); - DBUG_PRINT("enter",("root: 0x%lx element: 0x%lx", root, element)); + DBUG_PRINT("enter",("root: 0x%lx element: 0x%lx", (long) root, (long) element)); if (root) { if (root->prev) /* If add in mid of list */ diff --git a/mysys/mf_iocache.c b/mysys/mf_iocache.c index 249eaf48ad2..e0962999015 100644 --- a/mysys/mf_iocache.c +++ b/mysys/mf_iocache.c @@ -594,7 +594,8 @@ void init_io_cache_share(IO_CACHE *read_cache, IO_CACHE_SHARE *cshare, DBUG_ENTER("init_io_cache_share"); DBUG_PRINT("io_cache_share", ("read_cache: 0x%lx share: 0x%lx " "write_cache: 0x%lx threads: %u", - read_cache, cshare, write_cache, num_threads)); + (long) read_cache, (long) cshare, + (long) write_cache, num_threads)); DBUG_ASSERT(num_threads > 1); DBUG_ASSERT(read_cache->type == READ_CACHE); @@ -656,7 +657,7 @@ void remove_io_thread(IO_CACHE *cache) pthread_mutex_lock(&cshare->mutex); DBUG_PRINT("io_cache_share", ("%s: 0x%lx", (cache == cshare->source_cache) ? - "writer" : "reader", cache)); + "writer" : "reader", (long) cache)); /* Remove from share. */ total= --cshare->total_threads; @@ -732,7 +733,7 @@ static int lock_io_cache(IO_CACHE *cache, my_off_t pos) cshare->running_threads--; DBUG_PRINT("io_cache_share", ("%s: 0x%lx pos: %lu running: %u", (cache == cshare->source_cache) ? - "writer" : "reader", cache, (ulong) pos, + "writer" : "reader", (long) cache, (ulong) pos, cshare->running_threads)); if (cshare->source_cache) @@ -871,7 +872,7 @@ static void unlock_io_cache(IO_CACHE *cache) DBUG_PRINT("io_cache_share", ("%s: 0x%lx pos: %lu running: %u", (cache == cshare->source_cache) ? "writer" : "reader", - cache, (ulong) cshare->pos_in_file, + (long) cache, (ulong) cshare->pos_in_file, cshare->total_threads)); cshare->running_threads= cshare->total_threads; diff --git a/mysys/mf_keycache.c b/mysys/mf_keycache.c index 11e27bb19ad..d658e6fe055 100644 --- a/mysys/mf_keycache.c +++ b/mysys/mf_keycache.c @@ -418,9 +418,9 @@ int init_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, DBUG_PRINT("exit", ("disk_blocks: %d block_root: 0x%lx hash_entries: %d\ hash_root: 0x%lx hash_links: %d hash_link_root: 0x%lx", - keycache->disk_blocks, keycache->block_root, - keycache->hash_entries, keycache->hash_root, - keycache->hash_links, keycache->hash_link_root)); + keycache->disk_blocks, (long) keycache->block_root, + keycache->hash_entries, (long) keycache->hash_root, + keycache->hash_links, (long) keycache->hash_link_root)); bzero((gptr) keycache->changed_blocks, sizeof(keycache->changed_blocks[0]) * CHANGED_BLOCKS_HASH); bzero((gptr) keycache->file_blocks, @@ -633,7 +633,7 @@ void change_key_cache_param(KEY_CACHE *keycache, uint division_limit, void end_key_cache(KEY_CACHE *keycache, my_bool cleanup) { DBUG_ENTER("end_key_cache"); - DBUG_PRINT("enter", ("key_cache: 0x%lx", keycache)); + DBUG_PRINT("enter", ("key_cache: 0x%lx", (long) keycache)); if (!keycache->key_cache_inited) DBUG_VOID_RETURN; @@ -652,7 +652,7 @@ void end_key_cache(KEY_CACHE *keycache, my_bool cleanup) keycache->blocks_changed= 0; } - DBUG_PRINT("status", ("used: %d changed: %d w_requests: %lu " + DBUG_PRINT("status", ("used: %lu changed: %lu w_requests: %lu " "writes: %lu r_requests: %lu reads: %lu", keycache->blocks_used, keycache->global_blocks_changed, (ulong) keycache->global_cache_w_requests, @@ -1085,7 +1085,7 @@ static void unreg_request(KEY_CACHE *keycache, if (block->temperature == BLOCK_WARM) keycache->warm_blocks--; block->temperature= BLOCK_HOT; - KEYCACHE_DBUG_PRINT("unreg_request", ("#warm_blocks=%u", + KEYCACHE_DBUG_PRINT("unreg_request", ("#warm_blocks: %lu", keycache->warm_blocks)); } link_block(keycache, block, hot, (my_bool)at_end); @@ -1104,7 +1104,7 @@ static void unreg_request(KEY_CACHE *keycache, keycache->warm_blocks++; block->temperature= BLOCK_WARM; } - KEYCACHE_DBUG_PRINT("unreg_request", ("#warm_blocks=%u", + KEYCACHE_DBUG_PRINT("unreg_request", ("#warm_blocks: %lu", keycache->warm_blocks)); } } @@ -1352,11 +1352,11 @@ static BLOCK_LINK *find_key_block(KEY_CACHE *keycache, DBUG_ENTER("find_key_block"); KEYCACHE_THREAD_TRACE("find_key_block:begin"); - DBUG_PRINT("enter", ("fd: %u pos %lu wrmode: %lu", - (uint) file, (ulong) filepos, (uint) wrmode)); - KEYCACHE_DBUG_PRINT("find_key_block", ("fd: %u pos: %lu wrmode: %lu", - (uint) file, (ulong) filepos, - (uint) wrmode)); + DBUG_PRINT("enter", ("fd: %d pos: %lu wrmode: %d", + file, (ulong) filepos, wrmode)); + KEYCACHE_DBUG_PRINT("find_key_block", ("fd: %d pos: %lu wrmode: %d", + file, (ulong) filepos, + wrmode)); #if !defined(DBUG_OFF) && defined(EXTRA_DEBUG) DBUG_EXECUTE("check_keycache2", test_key_cache(keycache, "start of find_key_block", 0);); @@ -1646,8 +1646,8 @@ restart: KEYCACHE_DBUG_ASSERT(page_status != -1); *page_st=page_status; KEYCACHE_DBUG_PRINT("find_key_block", - ("fd: %u pos %lu block->status %u page_status %lu", - (uint) file, (ulong) filepos, block->status, + ("fd: %d pos: %lu block->status: %u page_status: %u", + file, (ulong) filepos, block->status, (uint) page_status)); #if !defined(DBUG_OFF) && defined(EXTRA_DEBUG) @@ -2338,7 +2338,7 @@ static int flush_key_blocks_int(KEY_CACHE *keycache, BLOCK_LINK *cache_buff[FLUSH_CACHE],**cache; int last_errno= 0; DBUG_ENTER("flush_key_blocks_int"); - DBUG_PRINT("enter",("file: %d blocks_used: %d blocks_changed: %d", + DBUG_PRINT("enter",("file: %d blocks_used: %lu blocks_changed: %lu", file, keycache->blocks_used, keycache->blocks_changed)); #if !defined(DBUG_OFF) && defined(EXTRA_DEBUG) @@ -2543,7 +2543,7 @@ int flush_key_blocks(KEY_CACHE *keycache, { int res; DBUG_ENTER("flush_key_blocks"); - DBUG_PRINT("enter", ("keycache: 0x%lx", keycache)); + DBUG_PRINT("enter", ("keycache: 0x%lx", (long) keycache)); if (keycache->disk_blocks <= 0) DBUG_RETURN(0); diff --git a/mysys/mf_keycaches.c b/mysys/mf_keycaches.c index 38fef31fdd4..e5086014a27 100644 --- a/mysys/mf_keycaches.c +++ b/mysys/mf_keycaches.c @@ -159,7 +159,7 @@ static byte *safe_hash_search(SAFE_HASH *hash, const byte *key, uint length) result= hash->default_value; else result= ((SAFE_HASH_ENTRY*) result)->data; - DBUG_PRINT("exit",("data: 0x%lx", result)); + DBUG_PRINT("exit",("data: 0x%lx", (long) result)); DBUG_RETURN(result); } @@ -190,7 +190,7 @@ static my_bool safe_hash_set(SAFE_HASH *hash, const byte *key, uint length, SAFE_HASH_ENTRY *entry; my_bool error= 0; DBUG_ENTER("safe_hash_set"); - DBUG_PRINT("enter",("key: %.*s data: 0x%lx", length, key, data)); + DBUG_PRINT("enter",("key: %.*s data: 0x%lx", length, key, (long) data)); rw_wrlock(&hash->mutex); entry= (SAFE_HASH_ENTRY*) hash_search(&hash->hash, key, length); diff --git a/mysys/my_alloc.c b/mysys/my_alloc.c index d5346d530c3..c97ae83f6bc 100644 --- a/mysys/my_alloc.c +++ b/mysys/my_alloc.c @@ -48,7 +48,8 @@ void init_alloc_root(MEM_ROOT *mem_root, uint block_size, uint pre_alloc_size __attribute__((unused))) { DBUG_ENTER("init_alloc_root"); - DBUG_PRINT("enter",("root: 0x%lx", mem_root)); + DBUG_PRINT("enter",("root: 0x%lx", (long) mem_root)); + mem_root->free= mem_root->used= mem_root->pre_alloc= 0; mem_root->min_malloc= 32; mem_root->block_size= block_size - ALLOC_ROOT_MIN_BLOCK_SIZE; @@ -146,7 +147,7 @@ gptr alloc_root(MEM_ROOT *mem_root,unsigned int Size) #if defined(HAVE_purify) && defined(EXTRA_DEBUG) reg1 USED_MEM *next; DBUG_ENTER("alloc_root"); - DBUG_PRINT("enter",("root: 0x%lx", mem_root)); + DBUG_PRINT("enter",("root: 0x%lx", (long) mem_root)); DBUG_ASSERT(alloc_root_inited(mem_root)); @@ -160,8 +161,8 @@ gptr alloc_root(MEM_ROOT *mem_root,unsigned int Size) next->next= mem_root->used; next->size= Size; mem_root->used= next; - DBUG_PRINT("exit",("ptr: 0x%lx", (((char*) next)+ - ALIGN_SIZE(sizeof(USED_MEM))))); + DBUG_PRINT("exit",("ptr: 0x%lx", (long) (((char*) next)+ + ALIGN_SIZE(sizeof(USED_MEM))))); DBUG_RETURN((gptr) (((char*) next)+ALIGN_SIZE(sizeof(USED_MEM)))); #else uint get_size, block_size; @@ -169,7 +170,7 @@ gptr alloc_root(MEM_ROOT *mem_root,unsigned int Size) reg1 USED_MEM *next= 0; reg2 USED_MEM **prev; DBUG_ENTER("alloc_root"); - DBUG_PRINT("enter",("root: 0x%lx", mem_root)); + DBUG_PRINT("enter",("root: 0x%lx", (long) mem_root)); DBUG_ASSERT(alloc_root_inited(mem_root)); Size= ALIGN_SIZE(Size); @@ -328,7 +329,7 @@ void free_root(MEM_ROOT *root, myf MyFlags) { reg1 USED_MEM *next,*old; DBUG_ENTER("free_root"); - DBUG_PRINT("enter",("root: 0x%lx flags: %u", root, (uint) MyFlags)); + DBUG_PRINT("enter",("root: 0x%lx flags: %u", (long) root, (uint) MyFlags)); if (!root) /* QQ: Should be deleted */ DBUG_VOID_RETURN; /* purecov: inspected */ diff --git a/mysys/my_dup.c b/mysys/my_dup.c index 4b7434e29ea..cdc15b3ebce 100644 --- a/mysys/my_dup.c +++ b/mysys/my_dup.c @@ -30,7 +30,7 @@ File my_dup(File file, myf MyFlags) File fd; const char *filename; DBUG_ENTER("my_dup"); - DBUG_PRINT("my",("file: %d MyFlags: %d", MyFlags)); + DBUG_PRINT("my",("file: %d MyFlags: %d", file, MyFlags)); fd = dup(file); filename= (((uint) file < my_file_limit) ? my_file_info[(int) file].name : "Unknown"); diff --git a/mysys/my_fopen.c b/mysys/my_fopen.c index f07beec9f39..6e81d40a2d6 100644 --- a/mysys/my_fopen.c +++ b/mysys/my_fopen.c @@ -79,7 +79,7 @@ FILE *my_fopen(const char *filename, int flags, myf MyFlags) my_stream_opened++; my_file_info[fileno(fd)].type = STREAM_BY_FOPEN; pthread_mutex_unlock(&THR_LOCK_open); - DBUG_PRINT("exit",("stream: 0x%lx",fd)); + DBUG_PRINT("exit",("stream: 0x%lx", (long) fd)); DBUG_RETURN(fd); } pthread_mutex_unlock(&THR_LOCK_open); @@ -103,7 +103,7 @@ int my_fclose(FILE *fd, myf MyFlags) { int err,file; DBUG_ENTER("my_fclose"); - DBUG_PRINT("my",("stream: 0x%lx MyFlags: %d",fd, MyFlags)); + DBUG_PRINT("my",("stream: 0x%lx MyFlags: %d", (long) fd, MyFlags)); pthread_mutex_lock(&THR_LOCK_open); file=fileno(fd); @@ -163,7 +163,7 @@ FILE *my_fdopen(File Filedes, const char *name, int Flags, myf MyFlags) pthread_mutex_unlock(&THR_LOCK_open); } - DBUG_PRINT("exit",("stream: 0x%lx",fd)); + DBUG_PRINT("exit",("stream: 0x%lx", (long) fd)); DBUG_RETURN(fd); } /* my_fdopen */ diff --git a/mysys/my_fstream.c b/mysys/my_fstream.c index 5b17e3ff51c..0f7f4cc888f 100644 --- a/mysys/my_fstream.c +++ b/mysys/my_fstream.c @@ -40,7 +40,7 @@ uint my_fread(FILE *stream, byte *Buffer, uint Count, myf MyFlags) uint readbytes; DBUG_ENTER("my_fread"); DBUG_PRINT("my",("stream: 0x%lx Buffer: 0x%lx Count: %u MyFlags: %d", - stream, Buffer, Count, MyFlags)); + (long) stream, (long) Buffer, Count, MyFlags)); if ((readbytes = (uint) fread(Buffer,sizeof(char),(size_t) Count,stream)) != Count) @@ -81,7 +81,7 @@ uint my_fwrite(FILE *stream, const byte *Buffer, uint Count, myf MyFlags) #endif DBUG_ENTER("my_fwrite"); DBUG_PRINT("my",("stream: 0x%lx Buffer: 0x%lx Count: %u MyFlags: %d", - stream, Buffer, Count, MyFlags)); + (long) stream, (long) Buffer, Count, MyFlags)); #if !defined(NO_BACKGROUND) && defined(USE_MY_STREAM) errors=0; @@ -153,7 +153,7 @@ my_off_t my_fseek(FILE *stream, my_off_t pos, int whence, { DBUG_ENTER("my_fseek"); DBUG_PRINT("my",("stream: 0x%lx pos: %lu whence: %d MyFlags: %d", - stream, pos, whence, MyFlags)); + (long) stream, (long) pos, whence, MyFlags)); DBUG_RETURN(fseek(stream, (off_t) pos, whence) ? MY_FILEPOS_ERROR : (my_off_t) ftell(stream)); } /* my_seek */ @@ -166,7 +166,7 @@ my_off_t my_ftell(FILE *stream, myf MyFlags __attribute__((unused))) { off_t pos; DBUG_ENTER("my_ftell"); - DBUG_PRINT("my",("stream: 0x%lx MyFlags: %d",stream, MyFlags)); + DBUG_PRINT("my",("stream: 0x%lx MyFlags: %d", (long) stream, MyFlags)); pos=ftell(stream); DBUG_PRINT("exit",("ftell: %lu",(ulong) pos)); DBUG_RETURN((my_off_t) pos); diff --git a/mysys/my_getwd.c b/mysys/my_getwd.c index 5663ceaa60e..b6b6ee610a5 100644 --- a/mysys/my_getwd.c +++ b/mysys/my_getwd.c @@ -45,7 +45,7 @@ int my_getwd(my_string buf, uint size, myf MyFlags) { my_string pos; DBUG_ENTER("my_getwd"); - DBUG_PRINT("my",("buf: 0x%lx size: %d MyFlags %d", buf,size,MyFlags)); + DBUG_PRINT("my",("buf: 0x%lx size: %d MyFlags %d", (long) buf,size,MyFlags)); #if ! defined(MSDOS) if (curr_dir[0]) /* Current pos is saved here */ diff --git a/mysys/my_handler.c b/mysys/my_handler.c index 23d28956808..46144c0dff2 100644 --- a/mysys/my_handler.c +++ b/mysys/my_handler.c @@ -505,6 +505,7 @@ HA_KEYSEG *ha_find_null(HA_KEYSEG *keyseg, uchar *a) switch ((enum ha_base_keytype) keyseg->type) { case HA_KEYTYPE_TEXT: case HA_KEYTYPE_BINARY: + case HA_KEYTYPE_BIT: if (keyseg->flag & HA_SPACE_PACK) { int a_length; @@ -516,7 +517,9 @@ HA_KEYSEG *ha_find_null(HA_KEYSEG *keyseg, uchar *a) a= end; break; case HA_KEYTYPE_VARTEXT1: + case HA_KEYTYPE_VARTEXT2: case HA_KEYTYPE_VARBINARY1: + case HA_KEYTYPE_VARBINARY2: { int a_length; get_key_length(a_length, a); @@ -546,6 +549,10 @@ HA_KEYSEG *ha_find_null(HA_KEYSEG *keyseg, uchar *a) case HA_KEYTYPE_DOUBLE: a= end; break; + case HA_KEYTYPE_END: /* purecov: inspected */ + /* keep compiler happy */ + DBUG_ASSERT(0); + break; } } return keyseg; diff --git a/mysys/my_lib.c b/mysys/my_lib.c index 1c5630ad14e..ae7b0baafbd 100644 --- a/mysys/my_lib.c +++ b/mysys/my_lib.c @@ -638,7 +638,7 @@ MY_STAT *my_stat(const char *path, MY_STAT *stat_area, myf my_flags) int m_used; DBUG_ENTER("my_stat"); DBUG_PRINT("my", ("path: '%s', stat_area: 0x%lx, MyFlags: %d", path, - (byte *) stat_area, my_flags)); + (long) stat_area, my_flags)); if ((m_used= (stat_area == NULL))) if (!(stat_area = (MY_STAT *) my_malloc(sizeof(MY_STAT), my_flags))) diff --git a/mysys/my_lread.c b/mysys/my_lread.c index a96febe4474..ccf795631b8 100644 --- a/mysys/my_lread.c +++ b/mysys/my_lread.c @@ -27,8 +27,8 @@ uint32 my_lread(int Filedes, byte *Buffer, uint32 Count, myf MyFlags) { uint32 readbytes; DBUG_ENTER("my_lread"); - DBUG_PRINT("my",("Fd: %d Buffer: %ld Count: %ld MyFlags: %d", - Filedes, Buffer, Count, MyFlags)); + DBUG_PRINT("my",("Fd: %d Buffer: 0x%lx Count: %lu MyFlags: %d", + Filedes, (long) Buffer, (ulong) Count, MyFlags)); DBUG_PRINT("error", ("Deprecated my_lread() function should not be used.")); diff --git a/mysys/my_lwrite.c b/mysys/my_lwrite.c index 02c56a69ba4..85f4677932e 100644 --- a/mysys/my_lwrite.c +++ b/mysys/my_lwrite.c @@ -23,8 +23,8 @@ uint32 my_lwrite(int Filedes, const byte *Buffer, uint32 Count, myf MyFlags) { uint32 writenbytes; DBUG_ENTER("my_lwrite"); - DBUG_PRINT("my",("Fd: %d Buffer: 0x%lx Count: %ld MyFlags: %d", - Filedes, Buffer, Count, MyFlags)); + DBUG_PRINT("my",("Fd: %d Buffer: 0x%lx Count: %lu MyFlags: %d", + Filedes, (long) Buffer, (ulong) Count, MyFlags)); DBUG_PRINT("error", ("Deprecated my_lwrite() function should not be used.")); diff --git a/mysys/my_malloc.c b/mysys/my_malloc.c index f33db2655c4..c6d51e29f18 100644 --- a/mysys/my_malloc.c +++ b/mysys/my_malloc.c @@ -44,7 +44,7 @@ gptr my_malloc(unsigned int size, myf my_flags) } else if (my_flags & MY_ZEROFILL) bzero(point,size); - DBUG_PRINT("exit",("ptr: 0x%lx",point)); + DBUG_PRINT("exit",("ptr: 0x%lx", (long) point)); DBUG_RETURN(point); } /* my_malloc */ @@ -55,7 +55,7 @@ gptr my_malloc(unsigned int size, myf my_flags) void my_no_flags_free(gptr ptr) { DBUG_ENTER("my_free"); - DBUG_PRINT("my",("ptr: 0x%lx",ptr)); + DBUG_PRINT("my",("ptr: 0x%lx", (long) ptr)); if (ptr) free(ptr); DBUG_VOID_RETURN; diff --git a/mysys/my_pread.c b/mysys/my_pread.c index f378d548731..b1b9d9da950 100644 --- a/mysys/my_pread.c +++ b/mysys/my_pread.c @@ -30,7 +30,7 @@ uint my_pread(File Filedes, byte *Buffer, uint Count, my_off_t offset, int error; DBUG_ENTER("my_pread"); DBUG_PRINT("my",("Fd: %d Seek: %lu Buffer: 0x%lx Count: %u MyFlags: %d", - Filedes, (ulong) offset, Buffer, Count, MyFlags)); + Filedes, (ulong) offset, (long) Buffer, Count, MyFlags)); for (;;) { @@ -49,8 +49,8 @@ uint my_pread(File Filedes, byte *Buffer, uint Count, my_off_t offset, if (error) { my_errno=errno; - DBUG_PRINT("warning",("Read only %ld bytes off %ld from %d, errno: %d", - readbytes,Count,Filedes,my_errno)); + DBUG_PRINT("warning",("Read only %d bytes off %u from %d, errno: %d", + (int) readbytes, Count,Filedes,my_errno)); #ifdef THREAD if ((readbytes == 0 || (int) readbytes == -1) && errno == EINTR) { @@ -87,7 +87,7 @@ uint my_pwrite(int Filedes, const byte *Buffer, uint Count, my_off_t offset, ulong written; DBUG_ENTER("my_pwrite"); DBUG_PRINT("my",("Fd: %d Seek: %lu Buffer: 0x%lx Count: %d MyFlags: %d", - Filedes, (ulong) offset,Buffer, Count, MyFlags)); + Filedes, (ulong) offset, (long) Buffer, Count, MyFlags)); errors=0; written=0L; for (;;) diff --git a/mysys/my_read.c b/mysys/my_read.c index 8b88e483fef..33eb3ddf334 100644 --- a/mysys/my_read.c +++ b/mysys/my_read.c @@ -39,7 +39,7 @@ uint my_read(File Filedes, byte *Buffer, uint Count, myf MyFlags) uint readbytes, save_count; DBUG_ENTER("my_read"); DBUG_PRINT("my",("Fd: %d Buffer: 0x%lx Count: %u MyFlags: %d", - Filedes, Buffer, Count, MyFlags)); + Filedes, (long) Buffer, Count, MyFlags)); save_count= Count; for (;;) @@ -48,8 +48,8 @@ uint my_read(File Filedes, byte *Buffer, uint Count, myf MyFlags) if ((readbytes= (uint) read(Filedes, Buffer, Count)) != Count) { my_errno= errno ? errno : -1; - DBUG_PRINT("warning",("Read only %ld bytes off %ld from %d, errno: %d", - readbytes, Count, Filedes, my_errno)); + DBUG_PRINT("warning",("Read only %d bytes off %u from %d, errno: %d", + (int) readbytes, Count, Filedes, my_errno)); #ifdef THREAD if ((readbytes == 0 || (int) readbytes == -1) && errno == EINTR) { diff --git a/mysys/my_realloc.c b/mysys/my_realloc.c index a385bf1e530..b521ae36b94 100644 --- a/mysys/my_realloc.c +++ b/mysys/my_realloc.c @@ -27,7 +27,7 @@ gptr my_realloc(gptr oldpoint, uint size, myf my_flags) { gptr point; DBUG_ENTER("my_realloc"); - DBUG_PRINT("my",("ptr: 0x%lx size: %u my_flags: %d",oldpoint, size, + DBUG_PRINT("my",("ptr: 0x%lx size: %u my_flags: %d", (long) oldpoint, size, my_flags)); if (!oldpoint && (my_flags & MY_ALLOW_ZERO_PTR)) @@ -60,6 +60,6 @@ gptr my_realloc(gptr oldpoint, uint size, myf my_flags) my_error(EE_OUTOFMEMORY, MYF(ME_BELL+ME_WAITTANG), size); } #endif - DBUG_PRINT("exit",("ptr: 0x%lx",point)); + DBUG_PRINT("exit",("ptr: 0x%lx", (long) point)); DBUG_RETURN(point); } /* my_realloc */ diff --git a/mysys/my_seek.c b/mysys/my_seek.c index 69a24c2d3c6..e8c109acacd 100644 --- a/mysys/my_seek.c +++ b/mysys/my_seek.c @@ -61,7 +61,7 @@ my_off_t my_seek(File fd, my_off_t pos, int whence, if (newpos == (os_off_t) -1) { my_errno=errno; - DBUG_PRINT("error",("lseek: %lu, errno: %d",newpos,errno)); + DBUG_PRINT("error",("lseek: %lu, errno: %d", (ulong) newpos,errno)); DBUG_RETURN(MY_FILEPOS_ERROR); } if ((my_off_t) newpos != pos) diff --git a/mysys/my_write.c b/mysys/my_write.c index ae8cb4ab02b..26b9a4f2444 100644 --- a/mysys/my_write.c +++ b/mysys/my_write.c @@ -27,7 +27,7 @@ uint my_write(int Filedes, const byte *Buffer, uint Count, myf MyFlags) ulong written; DBUG_ENTER("my_write"); DBUG_PRINT("my",("Fd: %d Buffer: 0x%lx Count: %d MyFlags: %d", - Filedes, Buffer, Count, MyFlags)); + Filedes, (long) Buffer, Count, MyFlags)); errors=0; written=0L; for (;;) diff --git a/mysys/safemalloc.c b/mysys/safemalloc.c index f6d6644859e..b3466e36197 100644 --- a/mysys/safemalloc.c +++ b/mysys/safemalloc.c @@ -194,7 +194,7 @@ gptr _mymalloc(uint size, const char *filename, uint lineno, myf MyFlags) if ((MyFlags & MY_ZEROFILL) || !sf_malloc_quick) bfill(data, size, (char) (MyFlags & MY_ZEROFILL ? 0 : ALLOC_VAL)); /* Return a pointer to the real data */ - DBUG_PRINT("exit",("ptr: 0x%lx", data)); + DBUG_PRINT("exit",("ptr: 0x%lx", (long) data)); if (sf_min_adress > data) sf_min_adress= data; if (sf_max_adress < data) @@ -259,7 +259,7 @@ void _myfree(gptr ptr, const char *filename, uint lineno, myf myflags) { struct st_irem *irem; DBUG_ENTER("_myfree"); - DBUG_PRINT("enter",("ptr: 0x%lx", ptr)); + DBUG_PRINT("enter",("ptr: 0x%lx", (long) ptr)); if (!sf_malloc_quick) (void) _sanity (filename, lineno); @@ -410,7 +410,7 @@ void TERMINATE(FILE *file) } DBUG_PRINT("safe", ("%6u bytes at 0x%09lx, allocated at line %4d in '%s'", - irem->datasize, data, irem->linenum, irem->filename)); + irem->datasize, (long) data, irem->linenum, irem->filename)); irem= irem->next; } } @@ -447,7 +447,7 @@ static int _checkchunk(register struct st_irem *irem, const char *filename, fprintf(stderr, " discovered at %s:%d\n", filename, lineno); (void) fflush(stderr); DBUG_PRINT("safe",("Underrun at 0x%lx, allocated at %s:%d", - data, irem->filename, irem->linenum)); + (long) data, irem->filename, irem->linenum)); flag=1; } @@ -463,7 +463,7 @@ static int _checkchunk(register struct st_irem *irem, const char *filename, fprintf(stderr, " discovered at '%s:%d'\n", filename, lineno); (void) fflush(stderr); DBUG_PRINT("safe",("Overrun at 0x%lx, allocated at %s:%d", - data, + (long) data, irem->filename, irem->linenum)); flag=1; diff --git a/mysys/thr_lock.c b/mysys/thr_lock.c index 51df50a4926..66848b94651 100644 --- a/mysys/thr_lock.c +++ b/mysys/thr_lock.c @@ -483,8 +483,8 @@ thr_lock(THR_LOCK_DATA *data, THR_LOCK_OWNER *owner, data->owner= owner; /* Must be reset ! */ VOID(pthread_mutex_lock(&lock->mutex)); DBUG_PRINT("lock",("data: 0x%lx thread: %ld lock: 0x%lx type: %d", - data, data->owner->info->thread_id, - lock, (int) lock_type)); + (long) data, data->owner->info->thread_id, + (long) lock, (int) lock_type)); check_locks(lock,(uint) lock_type <= (uint) TL_READ_NO_INSERT ? "enter read_lock" : "enter write_lock",0); if ((int) lock_type <= (int) TL_READ_NO_INSERT) @@ -659,7 +659,7 @@ thr_lock(THR_LOCK_DATA *data, THR_LOCK_OWNER *owner, goto end; } } - DBUG_PRINT("lock",("write locked by thread: %ld, type: %ld", + DBUG_PRINT("lock",("write locked by thread: %ld, type: %d", lock->read.data->owner->info->thread_id, data->type)); } wait_queue= &lock->write_wait; @@ -740,7 +740,7 @@ void thr_unlock(THR_LOCK_DATA *data) enum thr_lock_type lock_type=data->type; DBUG_ENTER("thr_unlock"); DBUG_PRINT("lock",("data: 0x%lx thread: %ld lock: 0x%lx", - data, data->owner->info->thread_id, lock)); + (long) data, data->owner->info->thread_id, (long) lock)); pthread_mutex_lock(&lock->mutex); check_locks(lock,"start of release lock",0); @@ -913,7 +913,7 @@ thr_multi_lock(THR_LOCK_DATA **data, uint count, THR_LOCK_OWNER *owner) { THR_LOCK_DATA **pos,**end; DBUG_ENTER("thr_multi_lock"); - DBUG_PRINT("lock",("data: 0x%lx count: %d",data,count)); + DBUG_PRINT("lock",("data: 0x%lx count: %d", (long) data, count)); if (count > 1) sort_locks(data,count); /* lock everything */ @@ -986,7 +986,7 @@ void thr_multi_unlock(THR_LOCK_DATA **data,uint count) { THR_LOCK_DATA **pos,**end; DBUG_ENTER("thr_multi_unlock"); - DBUG_PRINT("lock",("data: 0x%lx count: %d",data,count)); + DBUG_PRINT("lock",("data: 0x%lx count: %d", (long) data, count)); for (pos=data,end=data+count; pos < end ; pos++) { @@ -1000,7 +1000,8 @@ void thr_multi_unlock(THR_LOCK_DATA **data,uint count) else { DBUG_PRINT("lock",("Free lock: data: 0x%lx thread: %ld lock: 0x%lx", - *pos, (*pos)->owner->info->thread_id, (*pos)->lock)); + (long) *pos, (*pos)->owner->info->thread_id, + (long) (*pos)->lock)); } } DBUG_VOID_RETURN; diff --git a/mysys/tree.c b/mysys/tree.c index 0c9c04919b0..abbc99b2445 100644 --- a/mysys/tree.c +++ b/mysys/tree.c @@ -89,7 +89,7 @@ void init_tree(TREE *tree, uint default_alloc_size, uint memory_limit, tree_element_free free_element, void *custom_arg) { DBUG_ENTER("init_tree"); - DBUG_PRINT("enter",("tree: 0x%lx size: %d",tree,size)); + DBUG_PRINT("enter",("tree: 0x%lx size: %d", (long) tree, size)); if (default_alloc_size < DEFAULT_ALLOC_SIZE) default_alloc_size= DEFAULT_ALLOC_SIZE; @@ -137,7 +137,7 @@ void init_tree(TREE *tree, uint default_alloc_size, uint memory_limit, static void free_tree(TREE *tree, myf free_flags) { DBUG_ENTER("free_tree"); - DBUG_PRINT("enter",("tree: 0x%lx",tree)); + DBUG_PRINT("enter",("tree: 0x%lx", (long) tree)); if (tree->root) /* If initialized */ { diff --git a/mysys/typelib.c b/mysys/typelib.c index 90a093b0b32..d329b687668 100644 --- a/mysys/typelib.c +++ b/mysys/typelib.c @@ -49,7 +49,7 @@ int find_type(my_string x, TYPELIB *typelib, uint full_name) reg1 my_string i; reg2 const char *j; DBUG_ENTER("find_type"); - DBUG_PRINT("enter",("x: '%s' lib: 0x%lx",x,typelib)); + DBUG_PRINT("enter",("x: '%s' lib: 0x%lx", x, (long) typelib)); if (!typelib->count) { diff --git a/ndb/include/logger/LogHandler.hpp b/ndb/include/logger/LogHandler.hpp index 8b9aa43d7a9..efb87bb3104 100644 --- a/ndb/include/logger/LogHandler.hpp +++ b/ndb/include/logger/LogHandler.hpp @@ -135,7 +135,7 @@ public: * * @param str the error string. */ - void setErrorStr(char* str); + void setErrorStr(const char* str); /** * Parse logstring parameters diff --git a/ndb/include/ndb_global.h.in b/ndb/include/ndb_global.h.in index 122b0edc400..f1eed73f71a 100644 --- a/ndb/include/ndb_global.h.in +++ b/ndb/include/ndb_global.h.in @@ -132,4 +132,10 @@ extern "C" { #define PATH_MAX 1024 #endif +#if defined(_lint) || defined(FORCE_INIT_OF_VARS) +#define LINT_SET_PTR = {0,0} +#else +#define LINT_SET_PTR +#endif + #endif diff --git a/ndb/include/util/InputStream.hpp b/ndb/include/util/InputStream.hpp index 4d7d06aeaa3..56c43686df1 100644 --- a/ndb/include/util/InputStream.hpp +++ b/ndb/include/util/InputStream.hpp @@ -25,6 +25,7 @@ */ class InputStream { public: + virtual ~InputStream() {} virtual char* gets(char * buf, int bufLen) = 0; }; diff --git a/ndb/include/util/OutputStream.hpp b/ndb/include/util/OutputStream.hpp index c7e009d4537..a834b577bb3 100644 --- a/ndb/include/util/OutputStream.hpp +++ b/ndb/include/util/OutputStream.hpp @@ -25,6 +25,7 @@ */ class OutputStream { public: + virtual ~OutputStream() {} virtual int print(const char * fmt, ...) = 0; virtual int println(const char * fmt, ...) = 0; virtual void flush() {}; diff --git a/ndb/include/util/SimpleProperties.hpp b/ndb/include/util/SimpleProperties.hpp index 438426fb62b..b29e65e21da 100644 --- a/ndb/include/util/SimpleProperties.hpp +++ b/ndb/include/util/SimpleProperties.hpp @@ -151,6 +151,7 @@ public: ValueType m_type; protected: Reader(); + virtual ~Reader() {} virtual void reset() = 0; virtual bool step(Uint32 len) = 0; @@ -169,6 +170,7 @@ public: bool add(Uint16 key, const char * value); bool add(Uint16 key, const void* value, int len); protected: + virtual ~Writer() {} virtual bool reset() = 0; virtual bool putWord(Uint32 val) = 0; virtual bool putWords(const Uint32 * src, Uint32 len) = 0; @@ -184,6 +186,7 @@ class SimplePropertiesLinearReader : public SimpleProperties::Reader { public: SimplePropertiesLinearReader(const Uint32 * src, Uint32 len); + virtual ~SimplePropertiesLinearReader() {} virtual void reset(); virtual bool step(Uint32 len); virtual bool getWord(Uint32 * dst); @@ -202,6 +205,7 @@ class LinearWriter : public SimpleProperties::Writer { public: LinearWriter(Uint32 * src, Uint32 len); + virtual ~LinearWriter() {} virtual bool reset(); virtual bool putWord(Uint32 val); virtual bool putWords(const Uint32 * src, Uint32 len); @@ -219,6 +223,7 @@ class UtilBufferWriter : public SimpleProperties::Writer { public: UtilBufferWriter(class UtilBuffer & buf); + virtual ~UtilBufferWriter() {} virtual bool reset(); virtual bool putWord(Uint32 val); virtual bool putWords(const Uint32 * src, Uint32 len); @@ -238,6 +243,7 @@ public: SimplePropertiesSectionReader(struct SegmentedSectionPtr &, class SectionSegmentPool &); + virtual ~SimplePropertiesSectionReader() {} virtual void reset(); virtual bool step(Uint32 len); virtual bool getWord(Uint32 * dst); @@ -270,6 +276,7 @@ class SimplePropertiesSectionWriter : public SimpleProperties::Writer { public: SimplePropertiesSectionWriter(class SectionSegmentPool &); + virtual ~SimplePropertiesSectionWriter() {} virtual bool reset(); virtual bool putWord(Uint32 val); virtual bool putWords(const Uint32 * src, Uint32 len); diff --git a/ndb/src/common/debugger/EventLogger.cpp b/ndb/src/common/debugger/EventLogger.cpp index c6f02a7807a..8027a6bd347 100644 --- a/ndb/src/common/debugger/EventLogger.cpp +++ b/ndb/src/common/debugger/EventLogger.cpp @@ -116,8 +116,7 @@ void getTextNDBStopForced(QQQQ) { int sphase = theData[4]; int extra = theData[5]; getRestartAction(theData[1],action_str); - if (signal) - reason_str.appfmt(" Initiated by signal %d.", signum); + reason_str.appfmt(" Initiated by signal %d.", signum); if (error) { ndbd_exit_classification cl; diff --git a/ndb/src/common/debugger/signaldata/BackupSignalData.cpp b/ndb/src/common/debugger/signaldata/BackupSignalData.cpp index 27fed22ac72..7410db44aa3 100644 --- a/ndb/src/common/debugger/signaldata/BackupSignalData.cpp +++ b/ndb/src/common/debugger/signaldata/BackupSignalData.cpp @@ -124,6 +124,9 @@ printABORT_BACKUP_ORD(FILE * out, const Uint32 * data, Uint32 len, Uint16 b){ sig->backupPtr, sig->backupId); return true; break; + case AbortBackupOrd::AbortScan: + case AbortBackupOrd::IncompatibleVersions: + return false; } return false; } diff --git a/ndb/src/common/logger/LogHandler.cpp b/ndb/src/common/logger/LogHandler.cpp index c11f962d4fb..47333f81812 100644 --- a/ndb/src/common/logger/LogHandler.cpp +++ b/ndb/src/common/logger/LogHandler.cpp @@ -164,9 +164,9 @@ LogHandler::getErrorStr() } void -LogHandler::setErrorStr(char* str) +LogHandler::setErrorStr(const char* str) { - m_errorStr= str; + m_errorStr= (char*) str; } bool diff --git a/ndb/src/common/portlib/NdbMutex.c b/ndb/src/common/portlib/NdbMutex.c index 4a170d87e5c..f0a1614ba8e 100644 --- a/ndb/src/common/portlib/NdbMutex.c +++ b/ndb/src/common/portlib/NdbMutex.c @@ -28,7 +28,7 @@ NdbMutex* NdbMutex_Create(void) DBUG_ENTER("NdbMutex_Create"); pNdbMutex = (NdbMutex*)NdbMem_Allocate(sizeof(NdbMutex)); - DBUG_PRINT("info",("NdbMem_Allocate 0x%lx",pNdbMutex)); + DBUG_PRINT("info",("NdbMem_Allocate 0x%lx", (long) pNdbMutex)); if (pNdbMutex == NULL) DBUG_RETURN(NULL); @@ -50,7 +50,7 @@ int NdbMutex_Destroy(NdbMutex* p_mutex) result = pthread_mutex_destroy(p_mutex); - DBUG_PRINT("info",("NdbMem_Free 0x%lx",p_mutex)); + DBUG_PRINT("info",("NdbMem_Free 0x%lx", (long) p_mutex)); NdbMem_Free(p_mutex); DBUG_RETURN(result); diff --git a/ndb/src/common/portlib/NdbThread.c b/ndb/src/common/portlib/NdbThread.c index 48d00956ec2..67c8f6faf50 100644 --- a/ndb/src/common/portlib/NdbThread.c +++ b/ndb/src/common/portlib/NdbThread.c @@ -122,7 +122,7 @@ struct NdbThread* NdbThread_Create(NDB_THREAD_FUNC *p_thread_func, assert(result==0); pthread_attr_destroy(&thread_attr); - DBUG_PRINT("exit",("ret: %lx", tmpThread)); + DBUG_PRINT("exit",("ret: 0x%lx", (long) tmpThread)); DBUG_RETURN(tmpThread); } @@ -131,7 +131,7 @@ void NdbThread_Destroy(struct NdbThread** p_thread) { DBUG_ENTER("NdbThread_Destroy"); if (*p_thread != NULL){ - DBUG_PRINT("enter",("*p_thread: %lx", * p_thread)); + DBUG_PRINT("enter",("*p_thread: 0x%lx", (long) *p_thread)); free(* p_thread); * p_thread = 0; } diff --git a/ndb/src/common/transporter/Transporter.cpp b/ndb/src/common/transporter/Transporter.cpp index 383456f1077..b2ee75e4754 100644 --- a/ndb/src/common/transporter/Transporter.cpp +++ b/ndb/src/common/transporter/Transporter.cpp @@ -39,8 +39,8 @@ Transporter::Transporter(TransporterRegistry &t_reg, int _byteorder, bool _compression, bool _checksum, bool _signalId) : m_s_port(s_port), remoteNodeId(rNodeId), localNodeId(lNodeId), - isServer(lNodeId==serverNodeId), isMgmConnection(_isMgmConnection), - m_packer(_signalId, _checksum), + isServer(lNodeId==serverNodeId), + m_packer(_signalId, _checksum), isMgmConnection(_isMgmConnection), m_type(_type), m_transporter_registry(t_reg) { diff --git a/ndb/src/cw/cpcd/CPCD.hpp b/ndb/src/cw/cpcd/CPCD.hpp index aecc43150c4..3a69a03aa3f 100644 --- a/ndb/src/cw/cpcd/CPCD.hpp +++ b/ndb/src/cw/cpcd/CPCD.hpp @@ -63,6 +63,7 @@ struct CPCEvent { struct EventSubscriber { virtual void report(const CPCEvent &) = 0; + virtual ~EventSubscriber() {} }; /** diff --git a/ndb/src/kernel/blocks/backup/Backup.cpp b/ndb/src/kernel/blocks/backup/Backup.cpp index b00b1169095..8f3f6ffe55c 100644 --- a/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/ndb/src/kernel/blocks/backup/Backup.cpp @@ -272,7 +272,7 @@ Backup::execCONTINUEB(Signal* signal) Uint32 tabPtr_I = Tdata2; Uint32 fragPtr_I = signal->theData[3]; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptr_I); if (tabPtr_I == RNIL) @@ -309,7 +309,7 @@ Backup::execCONTINUEB(Signal* signal) fragInfo->NoOfRecordsLow = htonl(fragPtr.p->noOfRecords & 0xFFFFFFFF); fragInfo->NoOfRecordsHigh = htonl(fragPtr.p->noOfRecords >> 32); fragInfo->FilePosLow = htonl(0 & 0xFFFFFFFF); - fragInfo->FilePosHigh = htonl(0 >> 32); + fragInfo->FilePosHigh = htonl(0); filePtr.p->operation.dataBuffer.updateWritePtr(sz); @@ -341,7 +341,7 @@ Backup::execCONTINUEB(Signal* signal) case BackupContinueB::BUFFER_UNDERFLOW: { jam(); - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, Tdata1); checkFile(signal, filePtr); return; @@ -350,7 +350,7 @@ Backup::execCONTINUEB(Signal* signal) case BackupContinueB::BUFFER_FULL_SCAN: { jam(); - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, Tdata1); checkScan(signal, filePtr); return; @@ -359,7 +359,7 @@ Backup::execCONTINUEB(Signal* signal) case BackupContinueB::BUFFER_FULL_FRAG_COMPLETE: { jam(); - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, Tdata1); fragmentCompleted(signal, filePtr); return; @@ -368,7 +368,7 @@ Backup::execCONTINUEB(Signal* signal) case BackupContinueB::BUFFER_FULL_META: { jam(); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, Tdata1); BackupFilePtr filePtr; @@ -377,7 +377,7 @@ Backup::execCONTINUEB(Signal* signal) if(buf.getFreeSize() + buf.getMinRead() < buf.getUsableSize()) { jam(); - TablePtr tabPtr; + TablePtr tabPtr LINT_SET_PTR; c_tablePool.getPtr(tabPtr, Tdata2); DEBUG_OUT("Backup - Buffer full - " << buf.getFreeSize() @@ -392,7 +392,7 @@ Backup::execCONTINUEB(Signal* signal) return; }//if - TablePtr tabPtr; + TablePtr tabPtr LINT_SET_PTR; c_tablePool.getPtr(tabPtr, Tdata2); GetTabInfoReq * req = (GetTabInfoReq *)signal->getDataPtrSend(); req->senderRef = reference(); @@ -912,6 +912,9 @@ Backup::checkNodeFail(Signal* signal, #endif Uint32 gsn, len, pos; + LINT_INIT(gsn); + LINT_INIT(len); + LINT_INIT(pos); ptr.p->nodes.bitANDC(mask); switch(ptr.p->masterData.gsn){ case GSN_DEFINE_BACKUP_REQ: @@ -1099,7 +1102,7 @@ Backup::execBACKUP_REQ(Signal* signal) void Backup::execUTIL_SEQUENCE_REF(Signal* signal) { - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; jamEntry(); UtilSequenceRef * utilRef = (UtilSequenceRef*)signal->getDataPtr(); ptr.i = utilRef->senderData; @@ -1153,7 +1156,7 @@ Backup::execUTIL_SEQUENCE_CONF(Signal* signal) return; } - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; ptr.i = conf->senderData; c_backupPool.getPtr(ptr); @@ -1194,7 +1197,7 @@ Backup::defineBackupMutex_locked(Signal* signal, Uint32 ptrI, Uint32 retVal){ jamEntry(); ndbrequire(retVal == 0); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; ptr.i = ptrI; c_backupPool.getPtr(ptr); @@ -1215,7 +1218,7 @@ Backup::dictCommitTableMutex_locked(Signal* signal, Uint32 ptrI,Uint32 retVal) /** * We now have both the mutexes */ - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; ptr.i = ptrI; c_backupPool.getPtr(ptr); @@ -1320,7 +1323,7 @@ Backup::execDEFINE_BACKUP_REF(Signal* signal) //const Uint32 backupId = ref->backupId; const Uint32 nodeId = ref->nodeId; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); ptr.p->setErrorCode(ref->errorCode); @@ -1337,7 +1340,7 @@ Backup::execDEFINE_BACKUP_CONF(Signal* signal) //const Uint32 backupId = conf->backupId; const Uint32 nodeId = refToNode(signal->senderBlockRef()); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); if (ERROR_INSERTED(10024)) @@ -1508,7 +1511,7 @@ Backup::execCREATE_TRIG_REF(Signal* signal) const Uint32 ptrI = ref->getConnectionPtr(); const Uint32 tableId = ref->getTableId(); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); /** @@ -1623,7 +1626,7 @@ Backup::execSTART_BACKUP_REF(Signal* signal) const Uint32 signalNo = ref->signalNo; const Uint32 nodeId = ref->nodeId; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); ptr.p->setErrorCode(ref->errorCode); @@ -1641,7 +1644,7 @@ Backup::execSTART_BACKUP_CONF(Signal* signal) const Uint32 signalNo = conf->signalNo; const Uint32 nodeId = refToNode(signal->senderBlockRef()); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); startBackupReply(signal, ptr, nodeId, signalNo); @@ -1671,7 +1674,7 @@ Backup::startBackupReply(Signal* signal, BackupRecordPtr ptr, return; } - TablePtr tabPtr; + TablePtr tabPtr LINT_SET_PTR; c_tablePool.getPtr(tabPtr, ptr.p->masterData.startBackup.tablePtr); for(Uint32 i = 0; igetDataPtr(); const Uint32 ptrI = conf->getConnectionPtr(); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); alterTrigReply(signal, ptr); @@ -1779,7 +1782,7 @@ Backup::execALTER_TRIG_REF(Signal* signal) AlterTrigRef* ref = (AlterTrigRef*)signal->getDataPtr(); const Uint32 ptrI = ref->getConnectionPtr(); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); ptr.p->setErrorCode(ref->getErrorCode()); @@ -1823,7 +1826,7 @@ Backup::execWAIT_GCP_REF(Signal* signal) WaitGCPRef * ref = (WaitGCPRef*)signal->getDataPtr(); const Uint32 ptrI = ref->senderData; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); ndbrequire(ptr.p->masterRef == reference()); @@ -1847,7 +1850,7 @@ Backup::execWAIT_GCP_CONF(Signal* signal){ const Uint32 ptrI = conf->senderData; const Uint32 gcp = conf->gcp; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); ndbrequire(ptr.p->masterRef == reference()); @@ -2057,7 +2060,7 @@ Backup::execBACKUP_FRAGMENT_REF(Signal* signal) //const Uint32 backupId = ref->backupId; const Uint32 nodeId = ref->nodeId; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); TablePtr tabPtr; @@ -2206,7 +2209,7 @@ Backup::execDROP_TRIG_REF(Signal* signal) DropTrigRef* ref = (DropTrigRef*)signal->getDataPtr(); const Uint32 ptrI = ref->getConnectionPtr(); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); //ndbrequire(ref->getErrorCode() == DropTrigRef::NoSuchTrigger); @@ -2221,7 +2224,7 @@ Backup::execDROP_TRIG_CONF(Signal* signal) DropTrigConf* conf = (DropTrigConf*)signal->getDataPtr(); const Uint32 ptrI = conf->getConnectionPtr(); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); dropTrigReply(signal, ptr); @@ -2261,7 +2264,7 @@ Backup::execSTOP_BACKUP_REF(Signal* signal) //const Uint32 backupId = ref->backupId; const Uint32 nodeId = ref->nodeId; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); ptr.p->setErrorCode(ref->errorCode); @@ -2296,7 +2299,7 @@ Backup::execSTOP_BACKUP_CONF(Signal* signal) //const Uint32 backupId = conf->backupId; const Uint32 nodeId = refToNode(signal->senderBlockRef()); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); ptr.p->noOfLogBytes += conf->noOfLogBytes; @@ -2485,7 +2488,7 @@ Backup::execDEFINE_BACKUP_REQ(Signal* signal) DefineBackupReq* req = (DefineBackupReq*)signal->getDataPtr(); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; const Uint32 ptrI = req->backupPtr; const Uint32 backupId = req->backupId; const BlockReference senderRef = req->senderRef; @@ -2663,7 +2666,7 @@ Backup::execLIST_TABLES_CONF(Signal* signal) ListTablesConf* conf = (ListTablesConf*)signal->getDataPtr(); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, conf->senderData); const Uint32 len = signal->length() - ListTablesConf::HeaderLength; @@ -2711,7 +2714,7 @@ Backup::openFiles(Signal* signal, BackupRecordPtr ptr) { jam(); - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; FsOpenReq * req = (FsOpenReq *)signal->getDataPtrSend(); req->userReference = reference(); @@ -2776,10 +2779,10 @@ Backup::execFSOPENREF(Signal* signal) const Uint32 userPtr = ref->userPointer; - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, userPtr); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, filePtr.p->backupPtr); ptr.p->setErrorCode(ref->errorCode); openFilesReply(signal, ptr, filePtr); @@ -2795,11 +2798,11 @@ Backup::execFSOPENCONF(Signal* signal) const Uint32 userPtr = conf->userPointer; const Uint32 filePointer = conf->filePointer; - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, userPtr); filePtr.p->filePointer = filePointer; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, filePtr.p->backupPtr); ndbrequire(filePtr.p->fileOpened == 0); @@ -2957,7 +2960,7 @@ Backup::execGET_TABINFOREF(Signal* signal) GetTabInfoRef * ref = (GetTabInfoRef*)signal->getDataPtr(); const Uint32 senderData = ref->senderData; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, senderData); defineBackupRef(signal, ptr, ref->errorCode); @@ -2978,7 +2981,7 @@ Backup::execGET_TABINFO_CONF(Signal* signal) const Uint32 len = conf->totalLen; const Uint32 senderData = conf->senderData; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, senderData); SegmentedSectionPtr dictTabInfoPtr; @@ -3212,7 +3215,7 @@ Backup::execDI_FCOUNTCONF(Signal* signal) fragPtr.p->scanned = 0; fragPtr.p->scanning = 0; fragPtr.p->tableId = tableId; - fragPtr.p->node = RNIL; + fragPtr.p->node = 0; }//for /** @@ -3421,7 +3424,7 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal) /** * Get file */ - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr); ndbrequire(filePtr.p->backupPtr == ptrI); @@ -3553,12 +3556,12 @@ Backup::execTRANSID_AI(Signal* signal) //const Uint32 transId2 = signal->theData[2]; const Uint32 dataLen = signal->length() - 3; - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, filePtrI); OperationRecord & op = filePtr.p->operation; - TablePtr tabPtr; + TablePtr tabPtr LINT_SET_PTR; c_tablePool.getPtr(tabPtr, op.tablePtr); Table & table = * tabPtr.p; @@ -3752,7 +3755,7 @@ Backup::execSCAN_FRAGREF(Signal* signal) ScanFragRef * ref = (ScanFragRef*)signal->getDataPtr(); const Uint32 filePtrI = ref->senderData; - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, filePtrI); filePtr.p->errorCode = ref->errorCode; @@ -3771,7 +3774,7 @@ Backup::execSCAN_FRAGCONF(Signal* signal) ScanFragConf * conf = (ScanFragConf*)signal->getDataPtr(); const Uint32 filePtrI = conf->senderData; - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, filePtrI); OperationRecord & op = filePtr.p->operation; @@ -3812,7 +3815,7 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr) filePtr.p->scanRunning = 0; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, filePtr.p->backupPtr); BackupFragmentConf * conf = (BackupFragmentConf*)signal->getDataPtrSend(); @@ -3835,7 +3838,7 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr) void Backup::backupFragmentRef(Signal * signal, BackupFilePtr filePtr) { - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, filePtr.p->backupPtr); ptr.p->m_gsn = GSN_BACKUP_FRAGMENT_REF; @@ -3891,7 +3894,7 @@ Backup::checkScan(Signal* signal, BackupFilePtr filePtr) sendSignalWithDelay(DBLQH_REF, GSN_SCAN_NEXTREQ, signal, 10000, ScanFragNextReq::SignalLength); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, filePtr.p->backupPtr); AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); ord->backupId = ptr.p->backupId; @@ -3922,7 +3925,8 @@ Backup::execFSAPPENDREF(Signal* signal) const Uint32 filePtrI = ref->userPointer; const Uint32 errCode = ref->errorCode; - BackupFilePtr filePtr; + + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, filePtrI); filePtr.p->fileRunning = 0; @@ -3942,7 +3946,7 @@ Backup::execFSAPPENDCONF(Signal* signal) const Uint32 filePtrI = signal->theData[0]; //conf->userPointer; const Uint32 bytes = signal->theData[1]; //conf->bytes; - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, filePtrI); OperationRecord & op = filePtr.p->operation; @@ -4030,8 +4034,8 @@ Backup::execBACKUP_TRIG_REQ(Signal* signal) /* TUP asks if this trigger is to be fired on this node. */ - TriggerPtr trigPtr; - TablePtr tabPtr; + TriggerPtr trigPtr LINT_SET_PTR; + TablePtr tabPtr LINT_SET_PTR; FragmentPtr fragPtr; Uint32 trigger_id = signal->theData[0]; Uint32 frag_id = signal->theData[1]; @@ -4059,7 +4063,7 @@ Backup::execTRIG_ATTRINFO(Signal* signal) { TrigAttrInfo * trg = (TrigAttrInfo*)signal->getDataPtr(); - TriggerPtr trigPtr; + TriggerPtr trigPtr LINT_SET_PTR; c_triggerPool.getPtr(trigPtr, trg->getTriggerId()); ndbrequire(trigPtr.p->event != ILLEGAL_TRIGGER_ID); // Online... @@ -4090,7 +4094,7 @@ Backup::execTRIG_ATTRINFO(Signal* signal) { jam(); Uint32 save[TrigAttrInfo::StaticLength]; memcpy(save, signal->getDataPtr(), 4*TrigAttrInfo::StaticLength); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, trigPtr.p->backupPtr); trigPtr.p->errorCode = AbortBackupOrd::LogBufferFull; AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); @@ -4131,7 +4135,7 @@ Backup::execFIRE_TRIG_ORD(Signal* signal) const Uint32 gci = trg->getGCI(); const Uint32 trI = trg->getTriggerId(); - TriggerPtr trigPtr; + TriggerPtr trigPtr LINT_SET_PTR; c_triggerPool.getPtr(trigPtr, trI); ndbrequire(trigPtr.p->event != ILLEGAL_TRIGGER_ID); @@ -4144,7 +4148,7 @@ Backup::execFIRE_TRIG_ORD(Signal* signal) ndbrequire(trigPtr.p->logEntry != 0); Uint32 len = trigPtr.p->logEntry->Length; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, trigPtr.p->backupPtr); if(gci != ptr.p->currGCP) { @@ -4215,7 +4219,7 @@ Backup::execSTOP_BACKUP_REQ(Signal* signal) /** * Get backup record */ - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); ptr.p->slaveState.setState(STOPPING); @@ -4228,6 +4232,7 @@ Backup::execSTOP_BACKUP_REQ(Signal* signal) BackupFilePtr filePtr; ptr.p->files.getPtr(filePtr, ptr.p->logFilePtr); Uint32 * dst; + LINT_INIT(dst); ndbrequire(filePtr.p->operation.dataBuffer.getWritePtr(&dst, 1)); * dst = 0; filePtr.p->operation.dataBuffer.updateWritePtr(1); @@ -4240,6 +4245,7 @@ Backup::execSTOP_BACKUP_REQ(Signal* signal) const Uint32 gcpSz = sizeof(BackupFormat::CtlFile::GCPEntry) >> 2; Uint32 * dst; + LINT_INIT(dst); ndbrequire(filePtr.p->operation.dataBuffer.getWritePtr(&dst, gcpSz)); BackupFormat::CtlFile::GCPEntry * gcp = @@ -4331,7 +4337,7 @@ Backup::execFSCLOSEREF(Signal* signal) FsRef * ref = (FsRef*)signal->getDataPtr(); const Uint32 filePtrI = ref->userPointer; - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, filePtrI); BackupRecordPtr ptr; @@ -4352,7 +4358,7 @@ Backup::execFSCLOSECONF(Signal* signal) FsConf * conf = (FsConf*)signal->getDataPtr(); const Uint32 filePtrI = conf->userPointer; - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, filePtrI); #ifdef DEBUG_ABORT @@ -4366,7 +4372,7 @@ Backup::execFSCLOSECONF(Signal* signal) filePtr.p->fileOpened = 0; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, filePtr.p->backupPtr); for(ptr.p->files.first(filePtr); filePtr.i!=RNIL;ptr.p->files.next(filePtr)) { @@ -4432,7 +4438,7 @@ Backup::execABORT_BACKUP_ORD(Signal* signal) dumpUsedResources(); #endif - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; if(requestType == AbortBackupOrd::ClientAbort) { if (getOwnNodeId() != getMasterNodeId()) { jam(); @@ -4561,7 +4567,7 @@ Backup::dumpUsedResources() jam(); for(Uint32 j = 0; j<3; j++) { jam(); - TriggerPtr trigPtr; + TriggerPtr trigPtr LINT_SET_PTR; if(tabPtr.p->triggerAllocated[j]) { jam(); c_triggerPool.getPtr(trigPtr, tabPtr.p->triggerIds[j]); @@ -4600,7 +4606,7 @@ Backup::cleanup(Signal* signal, BackupRecordPtr ptr) tabPtr.p->fragments.release(); for(Uint32 j = 0; j<3; j++) { jam(); - TriggerPtr trigPtr; + TriggerPtr trigPtr LINT_SET_PTR; if(tabPtr.p->triggerAllocated[j]) { jam(); c_triggerPool.getPtr(trigPtr, tabPtr.p->triggerIds[j]); @@ -4680,7 +4686,7 @@ Backup::execFSREMOVECONF(Signal* signal){ /** * Get backup record */ - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); c_backups.release(ptr); } diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index d2f9150ade0..44f9917438f 100644 --- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -1234,7 +1234,7 @@ Cmvmi::execTESTSIG(Signal* signal){ fprintf(stdout, "\n"); for(i = 0; iheader.m_noOfSections; i++){ - SegmentedSectionPtr ptr; + SegmentedSectionPtr ptr = {0,0,0}; ndbout_c("-- Section %d --", i); signal->getSection(ptr, i); ndbrequire(ptr.p != 0); @@ -1292,7 +1292,7 @@ Cmvmi::execTESTSIG(Signal* signal){ LinearSectionPtr ptr[3]; const Uint32 secs = signal->getNoOfSections(); for(i = 0; igetSection(sptr, i); ptr[i].sz = sptr.sz; ptr[i].p = new Uint32[sptr.sz]; @@ -1341,7 +1341,7 @@ Cmvmi::execTESTSIG(Signal* signal){ LinearSectionPtr ptr[3]; const Uint32 secs = signal->getNoOfSections(); for(i = 0; igetSection(sptr, i); ptr[i].sz = sptr.sz; ptr[i].p = new Uint32[sptr.sz]; @@ -1407,7 +1407,7 @@ Cmvmi::execTESTSIG(Signal* signal){ const Uint32 secs = signal->getNoOfSections(); memset(g_test, 0, sizeof(g_test)); for(i = 0; igetSection(sptr, i); g_test[i].sz = sptr.sz; g_test[i].p = new Uint32[sptr.sz]; diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 7ecdf2466ee..2eca2a76c45 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -10054,8 +10054,8 @@ Dbdict::getDictLockType(Uint32 lockType) static const DictLockType lt[] = { { DictLockReq::NodeRestartLock, BS_NODE_RESTART, "NodeRestart" } }; - for (int i = 0; i < sizeof(lt)/sizeof(lt[0]); i++) { - if (lt[i].lockType == lockType) + for (unsigned int i = 0; i < sizeof(lt)/sizeof(lt[0]); i++) { + if ((Uint32) lt[i].lockType == lockType) return <[i]; } return NULL; @@ -10207,7 +10207,7 @@ Dbdict::execDICT_UNLOCK_ORD(Signal* signal) DictLockPtr lockPtr; c_dictLockQueue.getPtr(lockPtr, ord->lockPtr); - ndbrequire(lockPtr.p->lt->lockType == ord->lockType); + ndbrequire((Uint32) lockPtr.p->lt->lockType == ord->lockType); if (lockPtr.p->locked) { jam(); diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp index 4b5c0b791f9..da2321bdf6f 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp @@ -132,7 +132,7 @@ Dbtux::searchToAdd(Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos.m_pos = hi; return true; } - if (hi < currNode.getOccup()) { + if ((uint) hi < currNode.getOccup()) { jam(); treePos.m_pos = hi; return true; diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp index 7fa41cb2694..af05eeb9e9a 100644 --- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp +++ b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp @@ -1524,6 +1524,11 @@ void Ndbcntr::execNODE_FAILREP(Signal* signal) break; } + case StopRecord::SR_BLOCK_GCP_START_GCP: + case StopRecord::SR_WAIT_COMPLETE_GCP: + case StopRecord::SR_UNBLOCK_GCP_START_GCP: + case StopRecord::SR_CLUSTER_SHUTDOWN: + break; } } @@ -2283,7 +2288,7 @@ Ndbcntr::StopRecord::checkNodeFail(Signal* signal){ bool allNodesStopped = true; int i ; - for( i = 0; i< NdbNodeBitmask::Size; i++ ){ + for( i = 0; i < (int) NdbNodeBitmask::Size; i++ ){ if ( stopReq.nodes[i] != 0 ){ allNodesStopped = false; break; diff --git a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index 51d06537dbb..68e649d8ae0 100644 --- a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -160,7 +160,7 @@ void Qmgr::execCONTINUEB(Signal* signal) BaseString tmp; tmp.append("Shutting down node as total restart time exceeds " " StartFailureTimeout as set in config file "); - if(c_restartFailureTimeout == ~0) + if(c_restartFailureTimeout == (Uint32) ~0) tmp.append(" 0 (inifinite)"); else tmp.appfmt(" %d", c_restartFailureTimeout); @@ -1339,7 +1339,7 @@ Qmgr::check_startup(Signal* signal) if (now < partial_timeout) { jam(); - signal->theData[1] = c_restartPartialTimeout == ~0 ? 2 : 3; + signal->theData[1] = c_restartPartialTimeout == (Uint32) ~0 ? 2 : 3; signal->theData[2] = Uint32((partial_timeout - now + 500) / 1000); report_mask.assign(wait); retVal = 0; @@ -1356,7 +1356,7 @@ Qmgr::check_startup(Signal* signal) case CheckNodeGroups::Partitioning: if (now < partitioned_timeout && result != CheckNodeGroups::Win) { - signal->theData[1] = c_restartPartionedTimeout == ~0 ? 4 : 5; + signal->theData[1] = c_restartPartionedTimeout == (Uint32) ~0 ? 4 : 5; signal->theData[2] = Uint32((partitioned_timeout - now + 500) / 1000); report_mask.assign(c_definedNodes); report_mask.bitANDC(c_start.m_starting_nodes); @@ -1403,6 +1403,7 @@ missing_nodegroup: " starting: %s (missing fs for: %s)", mask1, mask2); progError(__LINE__, NDBD_EXIT_SR_RESTARTCONFLICT, buf); + return 0; // Deadcode } void diff --git a/ndb/src/kernel/error/ErrorReporter.cpp b/ndb/src/kernel/error/ErrorReporter.cpp index 6c8bb1fe615..e95cd5c132f 100644 --- a/ndb/src/kernel/error/ErrorReporter.cpp +++ b/ndb/src/kernel/error/ErrorReporter.cpp @@ -185,6 +185,7 @@ ErrorReporter::handleAssert(const char* message, const char* file, int line, int childReportError(ec); NdbShutdown(s_errorHandlerShutdownType); + exit(1); // Deadcode } void diff --git a/ndb/src/kernel/error/ErrorReporter.hpp b/ndb/src/kernel/error/ErrorReporter.hpp index 0ec84190238..dffec14dff2 100644 --- a/ndb/src/kernel/error/ErrorReporter.hpp +++ b/ndb/src/kernel/error/ErrorReporter.hpp @@ -29,7 +29,7 @@ public: static void setErrorHandlerShutdownType(NdbShutdownType nst = NST_ErrorHandler); static void handleAssert(const char* message, const char* file, - int line, int ec = NDBD_EXIT_PRGERR); + int line, int ec = NDBD_EXIT_PRGERR) __attribute__((__noreturn__)); static void handleError(int faultID, const char* problemData, diff --git a/ndb/src/kernel/error/ndbd_exit_codes.c b/ndb/src/kernel/error/ndbd_exit_codes.c index 1853130f93c..205af85575a 100644 --- a/ndb/src/kernel/error/ndbd_exit_codes.c +++ b/ndb/src/kernel/error/ndbd_exit_codes.c @@ -14,6 +14,7 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include #include typedef struct ErrStruct { diff --git a/ndb/src/kernel/vm/TransporterCallback.cpp b/ndb/src/kernel/vm/TransporterCallback.cpp index f315918b871..badd2af669c 100644 --- a/ndb/src/kernel/vm/TransporterCallback.cpp +++ b/ndb/src/kernel/vm/TransporterCallback.cpp @@ -56,7 +56,7 @@ const char *lookupConnectionError(Uint32 err) { int i= 0; while ((Uint32)connectionError[i].err != err && - (Uint32)connectionError[i].err != -1) + connectionError[i].err != -1) i++; return connectionError[i].text; } diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp index cab06331bbc..df69684784a 100644 --- a/ndb/src/mgmapi/mgmapi.cpp +++ b/ndb/src/mgmapi/mgmapi.cpp @@ -2565,8 +2565,8 @@ int ndb_mgm_report_event(NdbMgmHandle handle, Uint32 *data, Uint32 length) args.put("length", length); BaseString data_string; - for (int i = 0; i < length; i++) - data_string.appfmt(" %u", data[i]); + for (int i = 0; i < (int) length; i++) + data_string.appfmt(" %lu", (ulong) data[i]); args.put("data", data_string.c_str()); diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/ndb/src/mgmclient/CommandInterpreter.cpp index 2d096b10411..1dacd9689e8 100644 --- a/ndb/src/mgmclient/CommandInterpreter.cpp +++ b/ndb/src/mgmclient/CommandInterpreter.cpp @@ -653,7 +653,7 @@ static const char* helpTextDebug = ; #endif -struct { +struct st_cmd_help { const char *cmd; const char * help; }help_items[]={ @@ -2466,7 +2466,7 @@ CommandInterpreter::executeEventReporting(int processId, Vector specs; tmp.split(specs, " "); - for (int i=0; i < specs.size(); i++) + for (int i=0; i < (int) specs.size(); i++) { Vector spec; specs[i].split(spec, "="); diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index a2329c67bb7..766992af0cb 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -835,7 +835,7 @@ MgmtSrvr::sendVersionReq(int v_nodeId, Uint32 &version, const char **address) case GSN_API_VERSION_CONF: { const ApiVersionConf * const conf = CAST_CONSTPTR(ApiVersionConf, signal->getDataPtr()); - assert(conf->nodeId == v_nodeId); + assert((int) conf->nodeId == v_nodeId); version = conf->version; struct in_addr in; in.s_addr= conf->inet_addr; @@ -1573,7 +1573,7 @@ MgmtSrvr::setEventReportingLevelImpl(int nodeId, NodeBitmask nodes; nodes.clear(); Uint32 max = (nodeId == 0) ? (nodeId = 1, MAX_NDB_NODES) : nodeId; - for(; nodeId <= max; nodeId++) + for(; (Uint32) nodeId <= max; nodeId++) { if (nodeTypes[nodeId] != NODE_TYPE_DB) continue; @@ -2001,8 +2001,8 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, int log_event) { DBUG_ENTER("MgmtSrvr::alloc_node_id"); - DBUG_PRINT("enter", ("nodeid=%d, type=%d, client_addr=%d", - *nodeId, type, client_addr)); + DBUG_PRINT("enter", ("nodeid: %d type: %d client_addr: 0x%ld", + *nodeId, type, (long) client_addr)); if (g_no_nodeid_checks) { if (*nodeId == 0) { error_string.appfmt("no-nodeid-checks set in management server.\n" diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp index 7d59a303df2..bf16aa03d04 100644 --- a/ndb/src/mgmsrv/Services.cpp +++ b/ndb/src/mgmsrv/Services.cpp @@ -1699,7 +1699,7 @@ MgmApiSession::report_event(Parser_t::Context &ctx, BaseString tmp(data_string); Vector item; tmp.split(item, " "); - for (int i = 0; i < length ; i++) + for (int i = 0; (Uint32) i < length ; i++) { sscanf(item[i].c_str(), "%u", data+i); } diff --git a/ndb/src/ndbapi/ClusterMgr.cpp b/ndb/src/ndbapi/ClusterMgr.cpp index 475561af225..8545c599341 100644 --- a/ndb/src/ndbapi/ClusterMgr.cpp +++ b/ndb/src/ndbapi/ClusterMgr.cpp @@ -213,7 +213,7 @@ ClusterMgr::forceHB() int nodeId= 0; for(int i=0; - NodeBitmask::NotFound!=(nodeId= waitForHBFromNodes.find(i)); + (int) NodeBitmask::NotFound != (nodeId= waitForHBFromNodes.find(i)); i= nodeId+1) { #ifdef DEBUG_REG diff --git a/ndb/src/ndbapi/DictCache.cpp b/ndb/src/ndbapi/DictCache.cpp index bb59c16fb7c..ba8b0799398 100644 --- a/ndb/src/ndbapi/DictCache.cpp +++ b/ndb/src/ndbapi/DictCache.cpp @@ -116,7 +116,8 @@ void GlobalDictCache::printCache() NdbElement_t > * curr = m_tableHash.getNext(0); while(curr != 0){ DBUG_PRINT("curr", ("len: %d, hash: %d, lk: %d, str: %s", - curr->len, curr->hash, curr->localkey1, curr->str)); + curr->len, curr->hash, curr->localkey1, + (char*) curr->str)); if (curr->theData){ Vector * vers = curr->theData; const unsigned sz = vers->size(); diff --git a/ndb/src/ndbapi/Ndb.cpp b/ndb/src/ndbapi/Ndb.cpp index 9d1c78a5972..c701fbf77e0 100644 --- a/ndb/src/ndbapi/Ndb.cpp +++ b/ndb/src/ndbapi/Ndb.cpp @@ -355,8 +355,9 @@ Ndb::startTransaction(const NdbDictionary::Table *table, { NdbTransaction *trans= startTransactionLocal(0, nodeId); - DBUG_PRINT("exit",("start trans: 0x%x transid: 0x%llx", - trans, trans ? trans->getTransactionId() : 0)); + DBUG_PRINT("exit",("start trans: 0x%lx transid: 0x%lx", + (long) trans, + (long) (trans ? trans->getTransactionId() : 0))); DBUG_RETURN(trans); } } else { @@ -377,7 +378,7 @@ Ndb::hupp(NdbTransaction* pBuddyTrans) { DBUG_ENTER("Ndb::hupp"); - DBUG_PRINT("enter", ("trans: 0x%x",pBuddyTrans)); + DBUG_PRINT("enter", ("trans: 0x%lx", (long) pBuddyTrans)); Uint32 aPriority = 0; if (pBuddyTrans == NULL){ @@ -402,8 +403,9 @@ Ndb::hupp(NdbTransaction* pBuddyTrans) } pCon->setTransactionId(pBuddyTrans->getTransactionId()); pCon->setBuddyConPtr((Uint32)pBuddyTrans->getTC_ConnectPtr()); - DBUG_PRINT("exit", ("hupp trans: 0x%x transid: 0x%llx", - pCon, pCon ? pCon->getTransactionId() : 0)); + DBUG_PRINT("exit", ("hupp trans: 0x%lx transid: 0x%lx", + (long) pCon, + (long) (pCon ? pCon->getTransactionId() : 0))); DBUG_RETURN(pCon); } else { DBUG_RETURN(NULL); @@ -490,8 +492,9 @@ Ndb::closeTransaction(NdbTransaction* aConnection) tCon = theTransactionList; theRemainingStartTransactions++; - DBUG_PRINT("info",("close trans: 0x%x transid: 0x%llx", - aConnection, aConnection->getTransactionId())); + DBUG_PRINT("info",("close trans: 0x%lx transid: 0x%lx", + (long) aConnection, + (long) aConnection->getTransactionId())); DBUG_PRINT("info",("magic number: 0x%x TCConPtr: 0x%x theMyRef: 0x%x 0x%x", aConnection->theMagicNumber, aConnection->theTCConPtr, aConnection->theMyRef, getReference())); diff --git a/ndb/src/ndbapi/NdbOperationDefine.cpp b/ndb/src/ndbapi/NdbOperationDefine.cpp index 835e33dfb40..42dec161307 100644 --- a/ndb/src/ndbapi/NdbOperationDefine.cpp +++ b/ndb/src/ndbapi/NdbOperationDefine.cpp @@ -407,10 +407,10 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo, const char* aValuePassed, Uint32 len) { DBUG_ENTER("NdbOperation::setValue"); - DBUG_PRINT("enter", ("col=%s op=%d val=0x%x len=%u", + DBUG_PRINT("enter", ("col: %s op: %d val: 0x%lx len: %u", tAttrInfo->m_name.c_str(), theOperationType, - aValuePassed, len)); + (long) aValuePassed, len)); if (aValuePassed != NULL) DBUG_DUMP("value", (char*)aValuePassed, len); diff --git a/ndb/src/ndbapi/NdbOperationExec.cpp b/ndb/src/ndbapi/NdbOperationExec.cpp index 58a816e3c1a..11713678478 100644 --- a/ndb/src/ndbapi/NdbOperationExec.cpp +++ b/ndb/src/ndbapi/NdbOperationExec.cpp @@ -205,7 +205,7 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId) tcKeyReq->setKeyLength(tReqInfo, tTupKeyLen); // A simple read is always ignore error - abortOption = tSimpleIndicator ? AO_IgnoreError : abortOption; + abortOption = tSimpleIndicator ? (Uint8) AO_IgnoreError : abortOption; tcKeyReq->setAbortOption(tReqInfo, abortOption); Uint8 tDistrKeyIndicator = theDistrKeyIndicator_; diff --git a/ndb/src/ndbapi/NdbOperationInt.cpp b/ndb/src/ndbapi/NdbOperationInt.cpp index 41e0cb1d140..6defb37467f 100644 --- a/ndb/src/ndbapi/NdbOperationInt.cpp +++ b/ndb/src/ndbapi/NdbOperationInt.cpp @@ -1015,8 +1015,8 @@ NdbOperation::branch_col(Uint32 type, bool nopad, Uint32 Label){ DBUG_ENTER("NdbOperation::branch_col"); - DBUG_PRINT("enter", ("type=%u col=%u val=0x%x len=%u label=%u", - type, ColId, val, len, Label)); + DBUG_PRINT("enter", ("type: %u col:%u val: 0x%lx len: %u label: %u", + type, ColId, (long) val, len, Label)); if (val != NULL) DBUG_DUMP("value", (char*)val, len); diff --git a/ndb/src/ndbapi/NdbOperationSearch.cpp b/ndb/src/ndbapi/NdbOperationSearch.cpp index 4be7ccb313c..ede3240e9f4 100644 --- a/ndb/src/ndbapi/NdbOperationSearch.cpp +++ b/ndb/src/ndbapi/NdbOperationSearch.cpp @@ -58,10 +58,10 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, Uint32 aVariableKeyLen) { DBUG_ENTER("NdbOperation::equal_impl"); - DBUG_PRINT("enter", ("col=%s op=%d val=0x%x len=%u", + DBUG_PRINT("enter", ("col: %s op: %d val: 0x%lx len: %u", tAttrInfo->m_name.c_str(), theOperationType, - aValuePassed, aVariableKeyLen)); + (long) aValuePassed, aVariableKeyLen)); if (aValuePassed != NULL) DBUG_DUMP("value", (char*)aValuePassed, aVariableKeyLen); diff --git a/ndb/src/ndbapi/NdbTransaction.cpp b/ndb/src/ndbapi/NdbTransaction.cpp index 28bb9aeba55..86dc92a86c1 100644 --- a/ndb/src/ndbapi/NdbTransaction.cpp +++ b/ndb/src/ndbapi/NdbTransaction.cpp @@ -530,8 +530,8 @@ NdbTransaction::executeAsynchPrepare( ExecType aTypeOfExec, AbortOption abortOption) { DBUG_ENTER("NdbTransaction::executeAsynchPrepare"); - DBUG_PRINT("enter", ("aTypeOfExec: %d, aCallback: %x, anyObject: %x", - aTypeOfExec, aCallback, anyObject)); + DBUG_PRINT("enter", ("aTypeOfExec: %d, aCallback: 0x%lx, anyObject: Ox%lx", + aTypeOfExec, (long) aCallback, (long) anyObject)); /** * Reset error.code on execute diff --git a/ndb/src/ndbapi/Ndbif.cpp b/ndb/src/ndbapi/Ndbif.cpp index 6aaf44d0168..3ab9446e011 100644 --- a/ndb/src/ndbapi/Ndbif.cpp +++ b/ndb/src/ndbapi/Ndbif.cpp @@ -193,11 +193,11 @@ void Ndb::connected(Uint32 ref) ((Uint64)tmpTheNode << 40); theFirstTransId += theFacade->m_max_trans_id; // assert(0); - DBUG_PRINT("info",("connected with ref=%x, id=%d, no_db_nodes=%d, first_trans_id=%lx", + DBUG_PRINT("info",("connected with ref=%x, id=%d, no_db_nodes=%d, first_trans_id: 0x%lx", theMyRef, tmpTheNode, theImpl->theNoOfDBnodes, - theFirstTransId)); + (long) theFirstTransId)); theCommitAckSignal = new NdbApiSignal(theMyRef); theDictionary->m_receiver.m_reference= theMyRef; diff --git a/ndb/src/ndbapi/Ndbinit.cpp b/ndb/src/ndbapi/Ndbinit.cpp index 40cac675b21..fbabe9e6dc6 100644 --- a/ndb/src/ndbapi/Ndbinit.cpp +++ b/ndb/src/ndbapi/Ndbinit.cpp @@ -39,7 +39,7 @@ Ndb::Ndb( Ndb_cluster_connection *ndb_cluster_connection, : theImpl(NULL) { DBUG_ENTER("Ndb::Ndb()"); - DBUG_PRINT("enter",("Ndb::Ndb this=0x%x", this)); + DBUG_PRINT("enter",("Ndb::Ndb this: 0x%lx", (long) this)); setup(ndb_cluster_connection, aDataBase, aSchema); DBUG_VOID_RETURN; } @@ -115,7 +115,7 @@ void Ndb::setup(Ndb_cluster_connection *ndb_cluster_connection, Ndb::~Ndb() { DBUG_ENTER("Ndb::~Ndb()"); - DBUG_PRINT("enter",("Ndb::~Ndb this=0x%x",this)); + DBUG_PRINT("enter",("Ndb::~Ndb this: 0x%lx",(long) this)); doDisconnect(); if (TransporterFacade::instance() != NULL && theNdbBlockNumber > 0){ diff --git a/ndb/src/ndbapi/ndb_cluster_connection.cpp b/ndb/src/ndbapi/ndb_cluster_connection.cpp index 1e53558f179..3ae84125112 100644 --- a/ndb/src/ndbapi/ndb_cluster_connection.cpp +++ b/ndb/src/ndbapi/ndb_cluster_connection.cpp @@ -262,7 +262,7 @@ Ndb_cluster_connection_impl::Ndb_cluster_connection_impl(const char * m_name(0) { DBUG_ENTER("Ndb_cluster_connection"); - DBUG_PRINT("enter",("Ndb_cluster_connection this=0x%x", this)); + DBUG_PRINT("enter",("Ndb_cluster_connection this=0x%lx", (long) this)); g_eventLogger.createConsoleHandler(); g_eventLogger.setCategory("NdbApi"); diff --git a/ndb/tools/drop_index.cpp b/ndb/tools/drop_index.cpp index aa207212dbe..c10211a9108 100644 --- a/ndb/tools/drop_index.cpp +++ b/ndb/tools/drop_index.cpp @@ -51,9 +51,6 @@ int main(int argc, char** argv){ NDB_INIT(argv[0]); load_defaults("my",load_default_groups,&argc,&argv); int ho_error; -#ifndef DBUG_OFF - "d:t:O,/tmp/ndb_drop_index.trace"; -#endif if ((ho_error=handle_options(&argc, &argv, my_long_options, ndb_std_get_one_option))) return NDBT_ProgramExit(NDBT_WRONGARGS); diff --git a/ndb/tools/drop_tab.cpp b/ndb/tools/drop_tab.cpp index d14c60a2c6d..61df4ee9b34 100644 --- a/ndb/tools/drop_tab.cpp +++ b/ndb/tools/drop_tab.cpp @@ -51,9 +51,6 @@ int main(int argc, char** argv){ NDB_INIT(argv[0]); load_defaults("my",load_default_groups,&argc,&argv); int ho_error; -#ifndef DBUG_OFF - "d:t:O,/tmp/ndb_drop_table.trace"; -#endif if ((ho_error=handle_options(&argc, &argv, my_long_options, ndb_std_get_one_option))) return NDBT_ProgramExit(NDBT_WRONGARGS); diff --git a/ndb/tools/ndb_config.cpp b/ndb/tools/ndb_config.cpp index 8b862391c8e..049e4599447 100644 --- a/ndb/tools/ndb_config.cpp +++ b/ndb/tools/ndb_config.cpp @@ -114,6 +114,7 @@ struct Match int m_key; BaseString m_value; virtual int eval(const Iter&); + virtual ~Match() {} }; struct HostMatch : public Match @@ -127,6 +128,7 @@ struct Apply Apply(int val) { m_key = val;} int m_key; virtual int apply(const Iter&); + virtual ~Apply() {} }; struct NodeTypeApply : public Apply diff --git a/regex/regexec.c b/regex/regexec.c index 88bcc02323d..338c1bfa7fe 100644 --- a/regex/regexec.c +++ b/regex/regexec.c @@ -15,7 +15,8 @@ #include "utils.h" #include "regex2.h" -static int nope = 0; /* for use in asserts; shuts lint up */ +/* for use in asserts */ +#define nope 0 /* macros for manipulating states, small version */ #define states long diff --git a/server-tools/instance-manager/mysql_connection.cc b/server-tools/instance-manager/mysql_connection.cc index bf39c843f0a..a19914dfdd2 100644 --- a/server-tools/instance-manager/mysql_connection.cc +++ b/server-tools/instance-manager/mysql_connection.cc @@ -163,7 +163,7 @@ Mysql_connection_thread::~Mysql_connection_thread() void Mysql_connection_thread::run() { - log_info("accepted mysql connection %d", connection_id); + log_info("accepted mysql connection %lu", connection_id); my_thread_init(); @@ -173,7 +173,7 @@ void Mysql_connection_thread::run() return; } - log_info("connection %d is checked successfully", connection_id); + log_info("connection %lu is checked successfully", connection_id); vio_keepalive(vio, TRUE); @@ -312,7 +312,7 @@ int Mysql_connection_thread::do_command() packet= (char*) net.read_pos; enum enum_server_command command= (enum enum_server_command) (uchar) *packet; - log_info("connection %d: packet_length=%d, command=%d", + log_info("connection %lu: packet_length=%lu, command=%d", connection_id, packet_length, command); return dispatch_command(command, packet + 1, packet_length - 1); } @@ -323,27 +323,27 @@ int Mysql_connection_thread::dispatch_command(enum enum_server_command command, { switch (command) { case COM_QUIT: // client exit - log_info("query for connection %d received quit command", connection_id); + log_info("query for connection %lu received quit command", connection_id); return 1; case COM_PING: - log_info("query for connection %d received ping command", connection_id); + log_info("query for connection %lu received ping command", connection_id); net_send_ok(&net, connection_id, NULL); break; case COM_QUERY: { - log_info("query for connection %d : ----\n%s\n-------------------------", + log_info("query for connection %lu : ----\n%s\n-------------------------", connection_id,packet); if (Command *command= parse_command(&instance_map, packet)) { int res= 0; - log_info("query for connection %d successefully parsed",connection_id); + log_info("query for connection %lu successefully parsed",connection_id); res= command->execute(&net, connection_id); delete command; if (!res) - log_info("query for connection %d executed ok",connection_id); + log_info("query for connection %lu executed ok",connection_id); else { - log_info("query for connection %d executed err=%d",connection_id,res); + log_info("query for connection %lu executed err=%d",connection_id,res); net_send_error(&net, res); return 0; } @@ -356,7 +356,7 @@ int Mysql_connection_thread::dispatch_command(enum enum_server_command command, break; } default: - log_info("query for connection %d received unknown command",connection_id); + log_info("query for connection %lu received unknown command",connection_id); net_send_error(&net, ER_UNKNOWN_COM_ERROR); break; } diff --git a/sql-common/client.c b/sql-common/client.c index fc913f3f3cf..0354cffec32 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -598,7 +598,7 @@ cli_safe_read(MYSQL *mysql) if (len == packet_error || len == 0) { - DBUG_PRINT("error",("Wrong connection or packet. fd: %s len: %d", + DBUG_PRINT("error",("Wrong connection or packet. fd: %s len: %lu", vio_description(net->vio),len)); #ifdef MYSQL_SERVER if (net->vio && vio_was_interrupted(net->vio)) @@ -870,7 +870,7 @@ void STDCALL mysql_free_result(MYSQL_RES *result) { DBUG_ENTER("mysql_free_result"); - DBUG_PRINT("enter",("mysql_res: %lx",result)); + DBUG_PRINT("enter",("mysql_res: 0x%lx", (long) result)); if (result) { MYSQL *mysql= result->handle; @@ -1366,7 +1366,7 @@ MYSQL_DATA *cli_read_rows(MYSQL *mysql,MYSQL_FIELD *mysql_fields, DBUG_PRINT("info",("status: %u warning_count: %u", mysql->server_status, mysql->warning_count)); } - DBUG_PRINT("exit",("Got %d rows",result->rows)); + DBUG_PRINT("exit", ("Got %lu rows", (ulong) result->rows)); DBUG_RETURN(result); } @@ -2324,7 +2324,7 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, goto error; #endif - DBUG_PRINT("exit",("Mysql handler: %lx",mysql)); + DBUG_PRINT("exit", ("Mysql handler: 0x%lx", (long) mysql)); reset_sigpipe(mysql); DBUG_RETURN(mysql); @@ -2697,7 +2697,7 @@ int STDCALL mysql_real_query(MYSQL *mysql, const char *query, ulong length) { DBUG_ENTER("mysql_real_query"); - DBUG_PRINT("enter",("handle: %lx",mysql)); + DBUG_PRINT("enter",("handle: 0x%lx", (long) mysql)); DBUG_PRINT("query",("Query = '%-.4096s'",query)); if (mysql_send_query(mysql,query,length)) diff --git a/sql-common/my_time.c b/sql-common/my_time.c index 2dd40c112de..ced2a805ae5 100644 --- a/sql-common/my_time.c +++ b/sql-common/my_time.c @@ -963,7 +963,7 @@ my_system_gmt_sec(const MYSQL_TIME *t_src, long *my_timezone, */ if ((tmp < TIMESTAMP_MIN_VALUE) || (tmp > TIMESTAMP_MAX_VALUE)) tmp= 0; -end: + return (my_time_t) tmp; } /* my_system_gmt_sec */ diff --git a/sql/Makefile.am b/sql/Makefile.am index d9cea960915..6c685ba67c6 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -149,6 +149,8 @@ sql_yacc.cc: sql_yacc.yy sql_yacc.h: sql_yacc.yy sql_yacc.o: sql_yacc.cc sql_yacc.h $(HEADERS) + @SED@ -e 's/__attribute__ ((__unused__))//' sql_yacc.cc > sql_yacc.cc-new + @MV@ sql_yacc.cc-new sql_yacc.cc @echo "Note: The following compile may take a long time." @echo "If it fails, re-run configure with --with-low-memory" $(CXXCOMPILE) $(LM_CFLAGS) -c $< diff --git a/sql/filesort.cc b/sql/filesort.cc index b063b416191..7e98845372e 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -296,7 +296,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, DBUG_POP(); /* Ok to DBUG */ #endif memcpy(&table->sort, &table_sort, sizeof(FILESORT_INFO)); - DBUG_PRINT("exit",("records: %ld",records)); + DBUG_PRINT("exit",("records: %ld", (long) records)); DBUG_RETURN(error ? HA_POS_ERROR : records); } /* filesort */ diff --git a/sql/ha_archive.cc b/sql/ha_archive.cc index 189c5facfab..113008c4885 100644 --- a/sql/ha_archive.cc +++ b/sql/ha_archive.cc @@ -682,7 +682,8 @@ int ha_archive::real_write_row(byte *buf, gzFile writer) } share->approx_file_size+= total_row_length; written= gzwrite(writer, buf, table->s->reclength); - DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %d", written, table->s->reclength)); + DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %lu", (int) written, + table->s->reclength)); if (!delayed_insert || !bulk_insert) share->dirty= TRUE; @@ -822,7 +823,8 @@ int ha_archive::get_row(gzFile file_to_read, byte *buf) DBUG_ENTER("ha_archive::get_row"); read= gzread(file_to_read, buf, table->s->reclength); - DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %d", read, table->s->reclength)); + DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %lu", (int) read, + table->s->reclength)); if (read == Z_STREAM_ERROR) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc index 8be1e40f42d..6328803c743 100644 --- a/sql/ha_federated.cc +++ b/sql/ha_federated.cc @@ -557,8 +557,8 @@ static int parse_url_error(FEDERATED_SHARE *share, TABLE *table, int error_num) if (share->scheme) { DBUG_PRINT("info", - ("error: parse_url. Returning error code %d \ - freeing share->scheme %lx", error_num, share->scheme)); + ("error: parse_url. Returning error code %d freeing share->scheme 0x%lx", + error_num, (long) share->scheme)); my_free((gptr) share->scheme, MYF(0)); share->scheme= 0; } @@ -624,7 +624,7 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table, MYF(0)); share->connect_string_length= table->s->connect_string.length; - DBUG_PRINT("info",("parse_url alloced share->scheme %lx", share->scheme)); + DBUG_PRINT("info",("parse_url alloced share->scheme 0x%lx", (long) share->scheme)); /* remove addition of null terminator and store length @@ -1664,7 +1664,7 @@ void ha_federated::update_auto_increment(void) DBUG_ENTER("ha_federated::update_auto_increment"); thd->insert_id(mysql->last_used_con->insert_id); - DBUG_PRINT("info",("last_insert_id %d", auto_increment_value)); + DBUG_PRINT("info",("last_insert_id: %ld", (long) auto_increment_value)); DBUG_VOID_RETURN; } @@ -1917,8 +1917,8 @@ int ha_federated::delete_row(const byte *buf) deleted+= mysql->affected_rows; records-= mysql->affected_rows; DBUG_PRINT("info", - ("rows deleted %d rows deleted for all time %d", - int(mysql->affected_rows), deleted)); + ("rows deleted %ld rows deleted for all time %ld", + (long) mysql->affected_rows, (long) deleted)); DBUG_RETURN(0); } diff --git a/sql/ha_federated.h b/sql/ha_federated.h index 11e5a4f634f..c04ce09e75d 100644 --- a/sql/ha_federated.h +++ b/sql/ha_federated.h @@ -232,8 +232,7 @@ public: */ double scan_time() { - DBUG_PRINT("info", - ("records %d", records)); + DBUG_PRINT("info", ("records %ld", (long) records)); return (double)(records*1000); } /* diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index e9ccc0ccede..a1619d8e1a1 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -4215,7 +4215,7 @@ ha_innobase::rnd_pos( } if (error) { - DBUG_PRINT("error", ("Got error: %ld", error)); + DBUG_PRINT("error", ("Got error: %d", error)); DBUG_RETURN(error); } @@ -4225,7 +4225,7 @@ ha_innobase::rnd_pos( error = index_read(buf, pos, ref_length, HA_READ_KEY_EXACT); if (error) { - DBUG_PRINT("error", ("Got error: %ld", error)); + DBUG_PRINT("error", ("Got error: %d", error)); } change_active_index(keynr); diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index 8e40105780b..19ec1b29da3 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -1557,7 +1557,7 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, } } DBUG_PRINT("loop",("found: 0x%lx recpos: %d minpos: %d length: %d", - found,recpos,minpos,length)); + (long) found, recpos, minpos, length)); if (recpos != minpos) { // Reserved space (Null bits?) bzero((char*) recinfo_pos,sizeof(*recinfo_pos)); diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc index 33da88bbdd4..5e613a63303 100644 --- a/sql/ha_myisammrg.cc +++ b/sql/ha_myisammrg.cc @@ -108,7 +108,7 @@ int ha_myisammrg::open(const char *name, int mode, uint test_if_locked) if (table->s->reclength != mean_rec_length && mean_rec_length) { - DBUG_PRINT("error",("reclength: %d mean_rec_length: %d", + DBUG_PRINT("error",("reclength: %lu mean_rec_length: %lu", table->s->reclength, mean_rec_length)); goto err; } diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 957a3055e70..739fae79565 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -1774,7 +1774,7 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) all pending update or delete operations should be sent to NDB */ - DBUG_PRINT("info", ("ops_pending: %d", m_ops_pending)); + DBUG_PRINT("info", ("ops_pending: %ld", (long) m_ops_pending)); if (m_ops_pending) { if (m_transaction_on) @@ -2976,7 +2976,7 @@ int ha_ndbcluster::close_scan() Take over any pending transactions to the deleteing/updating transaction before closing the scan */ - DBUG_PRINT("info", ("ops_pending: %d", m_ops_pending)); + DBUG_PRINT("info", ("ops_pending: %ld", (long) m_ops_pending)); if (execute_no_commit(this,trans,false) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); @@ -3542,8 +3542,8 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) Thd_ndb *thd_ndb= get_thd_ndb(thd); Ndb *ndb= thd_ndb->ndb; - DBUG_PRINT("enter", ("thd: %x, thd_ndb: %x, thd_ndb->lock_count: %d", - thd, thd_ndb, thd_ndb->lock_count)); + DBUG_PRINT("enter", ("thd: 0x%lx thd_ndb: 0x%lx thd_ndb->lock_count: %d", + (long) thd, (long) thd_ndb, thd_ndb->lock_count)); if (lock_type != F_UNLCK) { @@ -3847,7 +3847,8 @@ int ndbcluster_commit(THD *thd, bool all) while ((share= it++)) { pthread_mutex_lock(&share->mutex); - DBUG_PRINT("info", ("Invalidate commit_count for %s, share->commit_count: %d ", share->table_name, share->commit_count)); + DBUG_PRINT("info", ("Invalidate commit_count for %s, share->commit_count: %lu", + share->table_name, (ulong) share->commit_count)); share->commit_count= 0; share->commit_count_lock++; pthread_mutex_unlock(&share->mutex); @@ -4281,7 +4282,7 @@ int ha_ndbcluster::create(const char *name, if (packfrm(data, length, &pack_data, &pack_length)) DBUG_RETURN(2); - DBUG_PRINT("info", ("setFrm data=%x, len=%d", pack_data, pack_length)); + DBUG_PRINT("info", ("setFrm data: 0x%lx len: %d", (long) pack_data, pack_length)); tab.setFrm(pack_data, pack_length); my_free((char*)data, MYF(0)); my_free((char*)pack_data, MYF(0)); @@ -5237,7 +5238,7 @@ bool ndbcluster_init() } { char buf[128]; - my_snprintf(buf, sizeof(buf), "mysqld --server-id=%d", server_id); + my_snprintf(buf, sizeof(buf), "mysqld --server-id=%lu", server_id); g_ndb_cluster_connection->set_name(buf); } g_ndb_cluster_connection->set_optimized_node_selection @@ -5813,9 +5814,9 @@ static NDB_SHARE* get_share(const char *table_name) share->use_count++; DBUG_PRINT("share", - ("table_name: %s, length: %d, use_count: %d, commit_count: %d", + ("table_name: %s length: %d use_count: %d commit_count: %lu", share->table_name, share->table_name_length, share->use_count, - share->commit_count)); + (ulong) share->commit_count)); pthread_mutex_unlock(&ndbcluster_mutex); return share; } @@ -5862,14 +5863,14 @@ static int packfrm(const void *data, uint len, uint blob_len; frm_blob_struct* blob; DBUG_ENTER("packfrm"); - DBUG_PRINT("enter", ("data: %x, len: %d", data, len)); + DBUG_PRINT("enter", ("data: 0x%lx, len: %d", (long) data, len)); error= 1; org_len= len; if (my_compress((byte*)data, &org_len, &comp_len)) goto err; - DBUG_PRINT("info", ("org_len: %d, comp_len: %d", org_len, comp_len)); + DBUG_PRINT("info", ("org_len: %lu comp_len: %lu", org_len, comp_len)); DBUG_DUMP("compressed", (char*)data, org_len); error= 2; @@ -5889,7 +5890,7 @@ static int packfrm(const void *data, uint len, *pack_len= blob_len; error= 0; - DBUG_PRINT("exit", ("pack_data: %x, pack_len: %d", *pack_data, *pack_len)); + DBUG_PRINT("exit", ("pack_data: 0x%lx, pack_len: %d", (long) *pack_data, *pack_len)); err: DBUG_RETURN(error); @@ -5903,13 +5904,13 @@ static int unpackfrm(const void **unpack_data, uint *unpack_len, byte *data; ulong complen, orglen, ver; DBUG_ENTER("unpackfrm"); - DBUG_PRINT("enter", ("pack_data: %x", pack_data)); + DBUG_PRINT("enter", ("pack_data: 0x%lx", (long) pack_data)); complen= uint4korr((char*)&blob->head.complen); orglen= uint4korr((char*)&blob->head.orglen); ver= uint4korr((char*)&blob->head.ver); - DBUG_PRINT("blob",("ver: %d complen: %d orglen: %d", + DBUG_PRINT("blob",("ver: %lu complen: %lu orglen: %lu", ver,complen,orglen)); DBUG_DUMP("blob->data", (char*) blob->data, complen); @@ -5928,7 +5929,7 @@ static int unpackfrm(const void **unpack_data, uint *unpack_len, *unpack_data= data; *unpack_len= complen; - DBUG_PRINT("exit", ("frmdata: %x, len: %d", *unpack_data, *unpack_len)); + DBUG_PRINT("exit", ("frmdata: 0x%lx, len: %d", (long) *unpack_data, *unpack_len)); DBUG_RETURN(0); } @@ -6521,7 +6522,7 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) my_thread_init(); DBUG_ENTER("ndb_util_thread"); - DBUG_PRINT("enter", ("ndb_cache_check_time: %d", ndb_cache_check_time)); + DBUG_PRINT("enter", ("ndb_cache_check_time: %lu", ndb_cache_check_time)); thd= new THD; /* note that contructor of THD uses DBUG_ */ THD_CHECK_SENTRY(thd); @@ -6550,7 +6551,7 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) &abstime); pthread_mutex_unlock(&LOCK_ndb_util_thread); - DBUG_PRINT("ndb_util_thread", ("Started, ndb_cache_check_time: %d", + DBUG_PRINT("ndb_util_thread", ("Started, ndb_cache_check_time: %lu", ndb_cache_check_time)); if (abort_loop) @@ -7447,7 +7448,7 @@ void ndb_serialize_cond(const Item *item, void *arg) if (context->expecting(Item::INT_ITEM)) { Item_int *int_item= (Item_int *) item; - DBUG_PRINT("info", ("value %d", int_item->value)); + DBUG_PRINT("info", ("value %ld", (long) int_item->value)); NDB_ITEM_QUALIFICATION q; q.value_type= Item::INT_ITEM; curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); @@ -7470,7 +7471,7 @@ void ndb_serialize_cond(const Item *item, void *arg) context->supported= FALSE; break; case Item::REAL_ITEM: - DBUG_PRINT("info", ("REAL_ITEM %s")); + DBUG_PRINT("info", ("REAL_ITEM")); if (context->expecting(Item::REAL_ITEM)) { Item_float *float_item= (Item_float *) item; @@ -7518,7 +7519,7 @@ void ndb_serialize_cond(const Item *item, void *arg) context->supported= FALSE; break; case Item::DECIMAL_ITEM: - DBUG_PRINT("info", ("DECIMAL_ITEM %s")); + DBUG_PRINT("info", ("DECIMAL_ITEM")); if (context->expecting(Item::DECIMAL_ITEM)) { Item_decimal *decimal_item= (Item_decimal *) item; diff --git a/sql/handler.cc b/sql/handler.cc index cff8213edec..5bc9deb59e3 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -1533,7 +1533,7 @@ prev_insert_id(ulonglong nr, struct system_variables *variables) */ DBUG_PRINT("info",("auto_increment: nr: %lu cannot honour " "auto_increment_offset: %lu", - nr, variables->auto_increment_offset)); + (ulong) nr, variables->auto_increment_offset)); return nr; } if (variables->auto_increment_increment == 1) diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 9a400d60ae6..221fc750e85 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -2196,7 +2196,7 @@ cmp_item* cmp_item_row::make_same() cmp_item_row::~cmp_item_row() { DBUG_ENTER("~cmp_item_row"); - DBUG_PRINT("enter",("this: 0x%lx", this)); + DBUG_PRINT("enter",("this: 0x%lx", (long) this)); if (comparators) { for (uint i= 0; i < n; i++) @@ -2971,7 +2971,7 @@ longlong Item_is_not_null_test::val_int() if (!used_tables_cache) { owner->was_null|= (!cached_value); - DBUG_PRINT("info", ("cached :%d", cached_value)); + DBUG_PRINT("info", ("cached :%ld", (long) cached_value)); DBUG_RETURN(cached_value); } if (args[0]->is_null()) diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 0ad517609c9..b43f70447c4 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -54,7 +54,7 @@ void Item_subselect::init(st_select_lex *select_lex, { DBUG_ENTER("Item_subselect::init"); - DBUG_PRINT("enter", ("select_lex: 0x%x", (ulong) select_lex)); + DBUG_PRINT("enter", ("select_lex: 0x%lx", (long) select_lex)); unit= select_lex->master_unit(); if (unit->item) diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index a776d8a5ff7..403d41ce716 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -2498,7 +2498,8 @@ String *Item_char_typecast::val_str(String *str) { // Safe even if const arg char char_type[40]; my_snprintf(char_type, sizeof(char_type), "%s(%lu)", - cast_cs == &my_charset_bin ? "BINARY" : "CHAR", length); + cast_cs == &my_charset_bin ? "BINARY" : "CHAR", + (ulong) length); if (!res->alloced_length()) { // Don't change const str diff --git a/sql/log_event.cc b/sql/log_event.cc index 271658d8054..0a68d89dfb8 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -727,7 +727,7 @@ Log_event* Log_event::read_log_event(IO_CACHE* file, LOG_EVENT_MINIMAL_HEADER_LEN); LOCK_MUTEX; - DBUG_PRINT("info", ("my_b_tell=%lu", my_b_tell(file))); + DBUG_PRINT("info", ("my_b_tell: %lu", (ulong) my_b_tell(file))); if (my_b_read(file, (byte *) head, header_size)) { DBUG_PRINT("info", ("Log_event::read_log_event(IO_CACHE*,Format_desc*) \ @@ -1297,7 +1297,8 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg, } else time_zone_len= 0; - DBUG_PRINT("info",("Query_log_event has flags2=%lu sql_mode=%lu",flags2,sql_mode)); + DBUG_PRINT("info",("Query_log_event has flags2: %lu sql_mode: %lu", + (ulong) flags2, sql_mode)); } #endif /* MYSQL_CLIENT */ @@ -1345,7 +1346,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, common_header_len= description_event->common_header_len; post_header_len= description_event->post_header_len[event_type-1]; - DBUG_PRINT("info",("event_len=%ld, common_header_len=%d, post_header_len=%d", + DBUG_PRINT("info",("event_len: %u common_header_len: %d post_header_len: %d", event_len, common_header_len, post_header_len)); /* @@ -1393,7 +1394,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, case Q_FLAGS2_CODE: flags2_inited= 1; flags2= uint4korr(pos); - DBUG_PRINT("info",("In Query_log_event, read flags2: %lu", flags2)); + DBUG_PRINT("info",("In Query_log_event, read flags2: %lu", (ulong) flags2)); pos+= 4; break; case Q_SQL_MODE_CODE: @@ -3137,8 +3138,8 @@ Rotate_log_event::Rotate_log_event(THD* thd_arg, #ifndef DBUG_OFF char buff[22]; DBUG_ENTER("Rotate_log_event::Rotate_log_event(THD*,...)"); - DBUG_PRINT("enter",("new_log_ident %s pos %s flags %lu", new_log_ident_arg, - llstr(pos_arg, buff), flags)); + DBUG_PRINT("enter",("new_log_ident: %s pos: %s flags: %lu", new_log_ident_arg, + llstr(pos_arg, buff), (ulong) flags)); #endif if (flags & DUP_NAME) new_log_ident= my_strdup_with_length(new_log_ident_arg, @@ -3912,7 +3913,7 @@ Slave_log_event::Slave_log_event(THD* thd_arg, memcpy(master_log, rli->group_master_log_name, master_log_len + 1); master_port = mi->port; master_pos = rli->group_master_log_pos; - DBUG_PRINT("info", ("master_log: %s pos: %d", master_log, + DBUG_PRINT("info", ("master_log: %s pos: %lu", master_log, (ulong) master_pos)); } else diff --git a/sql/mysqld.cc b/sql/mysqld.cc index bb102aa76d8..36a5605b507 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -1531,7 +1531,7 @@ static void network_init(void) if (strlen(mysqld_unix_port) > (sizeof(UNIXaddr.sun_path) - 1)) { - sql_print_error("The socket file path is too long (> %lu): %s", + sql_print_error("The socket file path is too long (> %u): %s", sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port); unireg_abort(1); } @@ -3378,8 +3378,8 @@ int main(int argc, char **argv) if (stack_size && stack_size < thread_stack) { if (global_system_variables.log_warnings) - sql_print_warning("Asked for %ld thread stack, but got %ld", - thread_stack, stack_size); + sql_print_warning("Asked for %lu thread stack, but got %ld", + thread_stack, (long) stack_size); #if defined(__ia64__) || defined(__ia64) thread_stack= stack_size*2; #else @@ -3913,7 +3913,7 @@ static void create_new_thread(THD *thd) int error; thread_created++; threads.append(thd); - DBUG_PRINT("info",(("creating thread %d"), thd->thread_id)); + DBUG_PRINT("info",(("creating thread %lu"), thd->thread_id)); thd->connect_time = time(NULL); if ((error=pthread_create(&thd->real_id,&connection_attrib, handle_one_connection, @@ -5130,7 +5130,7 @@ master-ssl", (gptr*) &locked_in_memory, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"merge", OPT_MERGE, "Enable Merge storage engine. Disable with \ --skip-merge.", - (gptr*) &opt_merge, (gptr*) &opt_merge, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0}, + (gptr*) &opt_merge, (gptr*) &opt_merge, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"myisam-recover", OPT_MYISAM_RECOVER, "Syntax: myisam-recover[=option[,option...]], where option can be DEFAULT, BACKUP, FORCE or QUICK.", (gptr*) &myisam_recover_options_str, (gptr*) &myisam_recover_options_str, 0, diff --git a/sql/net_serv.cc b/sql/net_serv.cc index 1601f7e5177..e84b2266e82 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -810,7 +810,7 @@ my_real_read(NET *net, ulong *complen) { my_bool interrupted = vio_should_retry(net->vio); - DBUG_PRINT("info",("vio_read returned %d, errno: %d", + DBUG_PRINT("info",("vio_read returned %ld, errno: %d", length, vio_errno(net->vio))); #if (!defined(__WIN__) && !defined(__EMX__) && !defined(OS2)) || defined(MYSQL_SERVER) /* diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 96239315026..ef755d868d9 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -878,7 +878,7 @@ QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT() file->extra(HA_EXTRA_NO_KEYREAD); if (free_file) { - DBUG_PRINT("info", ("Freeing separate handler %p (free=%d)", file, + DBUG_PRINT("info", ("Freeing separate handler 0x%lx (free: %d)", (long) file, free_file)); file->reset(); file->external_lock(current_thd, F_UNLCK); @@ -1836,9 +1836,9 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, double scan_time; DBUG_ENTER("SQL_SELECT::test_quick_select"); DBUG_PRINT("enter",("keys_to_use: %lu prev_tables: %lu const_tables: %lu", - keys_to_use.to_ulonglong(), (ulong) prev_tables, + (ulong) keys_to_use.to_ulonglong(), (ulong) prev_tables, (ulong) const_tables)); - DBUG_PRINT("info", ("records=%lu", (ulong)head->file->records)); + DBUG_PRINT("info", ("records: %lu", (ulong) head->file->records)); delete quick; quick=0; needed_reg.clear_all(); @@ -2102,7 +2102,7 @@ double get_sweep_read_cost(const PARAM *param, ha_rows records) n_blocks * (1.0 - pow(1.0 - 1.0/n_blocks, rows2double(records))); if (busy_blocks < 1.0) busy_blocks= 1.0; - DBUG_PRINT("info",("sweep: nblocks=%g, busy_blocks=%g", n_blocks, + DBUG_PRINT("info",("sweep: nblocks: %g, busy_blocks: %g", n_blocks, busy_blocks)); /* Disabled: Bail out if # of blocks to read is bigger than # of blocks in @@ -2126,7 +2126,7 @@ double get_sweep_read_cost(const PARAM *param, ha_rows records) result= busy_blocks; } } - DBUG_PRINT("info",("returning cost=%g", result)); + DBUG_PRINT("return",("cost: %g", result)); DBUG_RETURN(result); } @@ -2220,7 +2220,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge, ha_rows roru_total_records; double roru_intersect_part= 1.0; DBUG_ENTER("get_best_disjunct_quick"); - DBUG_PRINT("info", ("Full table scan cost =%g", read_time)); + DBUG_PRINT("info", ("Full table scan cost: %g", read_time)); if (!(range_scans= (TRP_RANGE**)alloc_root(param->mem_root, sizeof(TRP_RANGE*)* @@ -2264,7 +2264,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge, non_cpk_scan_records += (*cur_child)->records; } - DBUG_PRINT("info", ("index_merge scans cost=%g", imerge_cost)); + DBUG_PRINT("info", ("index_merge scans cost %g", imerge_cost)); if (imerge_too_expensive || (imerge_cost > read_time) || (non_cpk_scan_records+cpk_scan_records >= param->table->file->records) && read_time != DBL_MAX) @@ -2877,7 +2877,7 @@ static bool ror_intersect_add(ROR_INTERSECT_INFO *info, DBUG_PRINT("info", ("Current out_rows= %g", info->out_rows)); DBUG_PRINT("info", ("Adding scan on %s", info->param->table->key_info[ror_scan->keynr].name)); - DBUG_PRINT("info", ("is_cpk_scan=%d",is_cpk_scan)); + DBUG_PRINT("info", ("is_cpk_scan: %d",is_cpk_scan)); selectivity_mult = ror_scan_selectivity(info, ror_scan); if (selectivity_mult == 1.0) @@ -8280,8 +8280,8 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, *records= num_groups; DBUG_PRINT("info", - ("table rows=%u, keys/block=%u, keys/group=%u, result rows=%u, blocks=%u", - table_records, keys_per_block, keys_per_group, *records, + ("table rows: %u keys/block: %u keys/group: %u result rows: %lu blocks: %u", + table_records, keys_per_block, keys_per_group, (ulong) *records, num_blocks)); DBUG_VOID_RETURN; } diff --git a/sql/slave.cc b/sql/slave.cc index e3497a4f0ac..84e28a7be52 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -247,7 +247,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, bool look_for_description_event) { DBUG_ENTER("init_relay_log_pos"); - DBUG_PRINT("info", ("pos=%lu", pos)); + DBUG_PRINT("info", ("pos: %lu", (long) pos)); *errmsg=0; pthread_mutex_t *log_lock=rli->relay_log.get_log_lock(); @@ -3080,7 +3080,7 @@ static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings) return packet_error; } - DBUG_PRINT("info",( "len=%u, net->read_pos[4] = %d\n", + DBUG_PRINT("info",( "len: %lu net->read_pos[4]: %d\n", len, mysql->net.read_pos[4])); return len - 1; } @@ -4116,7 +4116,7 @@ static int process_io_rotate(MASTER_INFO *mi, Rotate_log_event *rev) /* Safe copy as 'rev' has been "sanitized" in Rotate_log_event's ctor */ memcpy(mi->master_log_name, rev->new_log_ident, rev->ident_len+1); mi->master_log_pos= rev->pos; - DBUG_PRINT("info", ("master_log_pos: '%s' %d", + DBUG_PRINT("info", ("master_log_pos: '%s' %lu", mi->master_log_name, (ulong) mi->master_log_pos)); #ifndef DBUG_OFF /* @@ -4233,7 +4233,7 @@ static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf, int error = process_io_create_file(mi,(Create_file_log_event*)ev); delete ev; mi->master_log_pos += inc_pos; - DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); + DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos)); pthread_mutex_unlock(&mi->data_lock); my_free((char*)tmp_buf, MYF(0)); DBUG_RETURN(error); @@ -4260,7 +4260,7 @@ static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf, } delete ev; mi->master_log_pos+= inc_pos; - DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); + DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos)); pthread_mutex_unlock(&mi->data_lock); DBUG_RETURN(0); } @@ -4316,7 +4316,7 @@ static int queue_binlog_ver_3_event(MASTER_INFO *mi, const char *buf, delete ev; mi->master_log_pos+= inc_pos; err: - DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); + DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos)); pthread_mutex_unlock(&mi->data_lock); DBUG_RETURN(0); } @@ -4486,7 +4486,8 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) rli->ign_master_log_pos_end= mi->master_log_pos; } rli->relay_log.signal_update(); // the slave SQL thread needs to re-check - DBUG_PRINT("info", ("master_log_pos: %d, event originating from the same server, ignored", (ulong) mi->master_log_pos)); + DBUG_PRINT("info", ("master_log_pos: %lu event originating from the same server, ignored", + (ulong) mi->master_log_pos)); } else { @@ -4494,7 +4495,7 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) if (likely(!(rli->relay_log.appendv(buf,event_len,0)))) { mi->master_log_pos+= inc_pos; - DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); + DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos)); rli->relay_log.harvest_bytes_written(&rli->log_space_total); } else diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 724cf88d373..d91da405c36 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -3726,7 +3726,7 @@ bool check_grant_column(THD *thd, GRANT_INFO *grant, GRANT_COLUMN *grant_column; ulong want_access= grant->want_privilege & ~grant->privilege; DBUG_ENTER("check_grant_column"); - DBUG_PRINT("enter", ("table: %s want_access: %u", table_name, want_access)); + DBUG_PRINT("enter", ("table: %s want_access: %lu", table_name, want_access)); if (!want_access) DBUG_RETURN(0); // Already checked diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index ff033b69f98..5902374dff0 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -527,7 +527,8 @@ void Query_cache_query::init_n_lock() my_rwlock_init(&lock, NULL); lock_writing(); DBUG_PRINT("qcache", ("inited & locked query for block 0x%lx", - ((byte*) this)-ALIGN_SIZE(sizeof(Query_cache_block)))); + (long) (((byte*) this) - + ALIGN_SIZE(sizeof(Query_cache_block))))); DBUG_VOID_RETURN; } @@ -536,7 +537,8 @@ void Query_cache_query::unlock_n_destroy() { DBUG_ENTER("Query_cache_query::unlock_n_destroy"); DBUG_PRINT("qcache", ("destroyed & unlocked query for block 0x%lx", - ((byte*)this)-ALIGN_SIZE(sizeof(Query_cache_block)))); + (long) (((byte*) this) - + ALIGN_SIZE(sizeof(Query_cache_block))))); /* The following call is not needed on system where one can destroy an active semaphore @@ -702,6 +704,7 @@ void query_cache_abort(NET *net) void query_cache_end_of_result(THD *thd) { + Query_cache_block *query_block; DBUG_ENTER("query_cache_end_of_result"); /* See the comment on double-check locking usage above. */ @@ -717,13 +720,9 @@ void query_cache_end_of_result(THD *thd) if (unlikely(query_cache.query_cache_size == 0 || query_cache.flush_in_progress)) - { - STRUCT_UNLOCK(&query_cache.structure_guard_mutex); - DBUG_VOID_RETURN; - } + goto end; - Query_cache_block *query_block= ((Query_cache_block*) - thd->net.query_cache_query); + query_block= ((Query_cache_block*) thd->net.query_cache_query); if (query_block) { DUMP(&query_cache); @@ -742,27 +741,21 @@ void query_cache_end_of_result(THD *thd) header->query())); query_cache.wreck(__LINE__, ""); - STRUCT_UNLOCK(&query_cache.structure_guard_mutex); - - DBUG_VOID_RETURN; + BLOCK_UNLOCK_WR(query_block); + goto end; } #endif header->found_rows(current_thd->limit_found_rows); header->result()->type= Query_cache_block::RESULT; header->writer(0); thd->net.query_cache_query= 0; + BLOCK_UNLOCK_WR(query_block); DBUG_EXECUTE("check_querycache",query_cache.check_integrity(1);); - STRUCT_UNLOCK(&query_cache.structure_guard_mutex); - - BLOCK_UNLOCK_WR(query_block); - } - else - { - // Cache was flushed or resized and query was deleted => do nothing - STRUCT_UNLOCK(&query_cache.structure_guard_mutex); } +end: + STRUCT_UNLOCK(&query_cache.structure_guard_mutex); DBUG_VOID_RETURN; } @@ -879,8 +872,8 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu", flags.character_set_client_num, flags.character_set_results_num, flags.collation_connection_num, - flags.limit, - (ulong)flags.time_zone, + (ulong) flags.limit, + (ulong) flags.time_zone, flags.sql_mode, flags.max_sort_length, flags.group_concat_max_len)); @@ -1122,8 +1115,8 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu", flags.character_set_client_num, flags.character_set_results_num, flags.collation_connection_num, - flags.limit, - (ulong)flags.time_zone, + (ulong) flags.limit, + (ulong) flags.time_zone, flags.sql_mode, flags.max_sort_length, flags.group_concat_max_len)); @@ -1260,7 +1253,7 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu", #ifndef EMBEDDED_LIBRARY do { - DBUG_PRINT("qcache", ("Results (len %lu, used %lu, headers %lu)", + DBUG_PRINT("qcache", ("Results (len: %lu used: %lu headers: %u)", result_block->length, result_block->used, result_block->headers_len()+ ALIGN_SIZE(sizeof(Query_cache_result)))); @@ -2037,7 +2030,7 @@ Query_cache::append_result_data(Query_cache_block **current_block, { DBUG_ENTER("Query_cache::append_result_data"); DBUG_PRINT("qcache", ("append %lu bytes to 0x%lx query", - data_len, query_block)); + data_len, (long) query_block)); if (query_block->query()->add(data_len) > query_cache_limit) { @@ -3044,10 +3037,10 @@ Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex, OPTION_TO_QUERY_CACHE))) && lex->safe_to_cache_query) { - DBUG_PRINT("qcache", ("options %lx %lx, type %u", - OPTION_TO_QUERY_CACHE, - lex->select_lex.options, - (int) thd->variables.query_cache_type)); + DBUG_PRINT("qcache", ("options: %lx %lx type: %u", + OPTION_TO_QUERY_CACHE, + (long) lex->select_lex.options, + (int) thd->variables.query_cache_type)); if (!(table_count= process_and_count_tables(tables_used, tables_type))) DBUG_RETURN(0); @@ -3063,10 +3056,10 @@ Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex, } DBUG_PRINT("qcache", - ("not interesting query: %d or not cacheable, options %lx %lx, type %u", + ("not interesting query: %d or not cacheable, options %lx %lx type: %u", (int) lex->sql_command, OPTION_TO_QUERY_CACHE, - lex->select_lex.options, + (long) lex->select_lex.options, (int) thd->variables.query_cache_type)); DBUG_RETURN(0); } @@ -3655,7 +3648,8 @@ void Query_cache::queries_dump() DBUG_PRINT("qcache", ("F:%u C:%u L:%lu T:'%s' (%u) '%s' '%s'", flags.client_long_flag, flags.character_set_client_num, - (ulong)flags.limit, flags.time_zone->get_name(), + (ulong)flags.limit, + flags.time_zone->get_name()->ptr(), len, str, strend(str)+1)); DBUG_PRINT("qcache", ("-b- 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx", (ulong) block, (ulong) block->next, (ulong) block->prev, @@ -3875,9 +3869,8 @@ my_bool Query_cache::check_integrity(bool locked) break; } default: - DBUG_PRINT("error", - ("block 0x%lx have incorrect type %u", - block, block->type)); + DBUG_PRINT("error", ("block 0x%lx have incorrect type %u", + (long) block, block->type)); result = 1; } @@ -3975,8 +3968,8 @@ my_bool Query_cache::check_integrity(bool locked) } while (block != bins[i].free_blocks); if (count != bins[i].number) { - DBUG_PRINT("error", ("bin[%d].number is %d, but bin have %d blocks", - bins[i].number, count)); + DBUG_PRINT("error", ("bins[%d].number = %d, but bin have %d blocks", + i, bins[i].number, count)); result = 1; } } diff --git a/sql/sql_class.cc b/sql/sql_class.cc index ba2f525a4a4..d2f1e9ed0d9 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -712,7 +712,7 @@ void THD::add_changed_table(const char *key, long key_length) { list_include(prev_changed, curr, changed_table_dup(key, key_length)); DBUG_PRINT("info", - ("key_length %u %u", key_length, (*prev_changed)->key_length)); + ("key_length %ld %u", key_length, (*prev_changed)->key_length)); DBUG_VOID_RETURN; } else if (cmp == 0) @@ -722,7 +722,7 @@ void THD::add_changed_table(const char *key, long key_length) { list_include(prev_changed, curr, changed_table_dup(key, key_length)); DBUG_PRINT("info", - ("key_length %u %u", key_length, + ("key_length %ld %u", key_length, (*prev_changed)->key_length)); DBUG_VOID_RETURN; } @@ -734,7 +734,7 @@ void THD::add_changed_table(const char *key, long key_length) } } *prev_changed = changed_table_dup(key, key_length); - DBUG_PRINT("info", ("key_length %u %u", key_length, + DBUG_PRINT("info", ("key_length %ld %u", key_length, (*prev_changed)->key_length)); DBUG_VOID_RETURN; } diff --git a/sql/sql_class.h b/sql/sql_class.h index c7bdfbd7ea7..2467385b679 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -2084,7 +2084,7 @@ public: inline bool unique_add(void *ptr) { DBUG_ENTER("unique_add"); - DBUG_PRINT("info", ("tree %u - %u", tree.elements_in_tree, max_elements)); + DBUG_PRINT("info", ("tree %u - %lu", tree.elements_in_tree, max_elements)); if (tree.elements_in_tree > max_elements && flush()) DBUG_RETURN(1); DBUG_RETURN(!tree_insert(&tree, ptr, 0, tree.custom_arg)); diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index e13e7728708..38c12562fe3 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -326,7 +326,7 @@ cleanup: { thd->row_count_func= deleted; send_ok(thd,deleted); - DBUG_PRINT("info",("%d records deleted",deleted)); + DBUG_PRINT("info",("%ld records deleted",(long) deleted)); } DBUG_RETURN(error >= 0 || thd->net.report_error); } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 3a107c2296c..e5181193b9c 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -381,9 +381,9 @@ int check_user(THD *thd, enum enum_server_command command, NO_ACCESS)) // authentication is OK { DBUG_PRINT("info", - ("Capabilities: %d packet_length: %ld Host: '%s' " + ("Capabilities: %lu packet_length: %ld Host: '%s' " "Login user: '%s' Priv_user: '%s' Using password: %s " - "Access: %u db: '%s'", + "Access: %lu db: '%s'", thd->client_capabilities, thd->max_client_packet_length, thd->main_security_ctx.host_or_ip, @@ -956,7 +956,7 @@ static int check_connection(THD *thd) if (thd->client_capabilities & CLIENT_IGNORE_SPACE) thd->variables.sql_mode|= MODE_IGNORE_SPACE; #ifdef HAVE_OPENSSL - DBUG_PRINT("info", ("client capabilities: %d", thd->client_capabilities)); + DBUG_PRINT("info", ("client capabilities: %lu", thd->client_capabilities)); if (thd->client_capabilities & CLIENT_SSL) { /* Do the SSL layering. */ @@ -1112,7 +1112,7 @@ pthread_handler_t handle_one_connection(void *arg) of handle_one_connection, which is thd. We need to know the start of the stack so that we could check for stack overruns. */ - DBUG_PRINT("info", ("handle_one_connection called by thread %d\n", + DBUG_PRINT("info", ("handle_one_connection called by thread %lu\n", thd->thread_id)); /* now that we've called my_thread_init(), it is safe to call DBUG_* */ @@ -1764,7 +1764,9 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (alloc_query(thd, packet, packet_length)) break; // fatal error is set char *packet_end= thd->query + thd->query_length; - mysql_log.write(thd,command, "%.*b", thd->query_length, thd->query); + /* 'b' stands for 'buffer' parameter', special for 'my_snprintf' */ + const char *format= "%.*b"; + mysql_log.write(thd,command, format, thd->query_length, thd->query); DBUG_PRINT("query",("%-.4096s",thd->query)); if (!(specialflag & SPECIAL_NO_PRIOR)) diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 6517afa5432..1e7601c0951 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -1664,7 +1664,7 @@ static bool check_prepared_statement(Prepared_statement *stmt, enum enum_sql_command sql_command= lex->sql_command; int res= 0; DBUG_ENTER("check_prepared_statement"); - DBUG_PRINT("enter",("command: %d, param_count: %ld", + DBUG_PRINT("enter",("command: %d, param_count: %u", sql_command, stmt->param_count)); lex->first_lists_tables_same(); @@ -1877,9 +1877,12 @@ void mysql_stmt_prepare(THD *thd, const char *packet, uint packet_length) thd->stmt_map.erase(stmt); } else - mysql_log.write(thd, COM_STMT_PREPARE, "[%lu] %.*b", stmt->id, + { + const char *format= "[%lu] %.*b"; + mysql_log.write(thd, COM_STMT_PREPARE, format, stmt->id, stmt->query_length, stmt->query); + } /* check_prepared_statemnt sends the metadata packet in case of success */ DBUG_VOID_RETURN; } @@ -2261,8 +2264,11 @@ void mysql_stmt_execute(THD *thd, char *packet_arg, uint packet_length) if (!(specialflag & SPECIAL_NO_PRIOR)) my_pthread_setprio(pthread_self(), WAIT_PRIOR); if (error == 0) - mysql_log.write(thd, COM_STMT_EXECUTE, "[%lu] %.*b", stmt->id, + { + const char *format= "[%lu] %.*b"; + mysql_log.write(thd, COM_STMT_EXECUTE, format, stmt->id, thd->query_length, thd->query); + } DBUG_VOID_RETURN; diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index e1933d42f9e..126efb35cec 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -1101,7 +1101,7 @@ bool change_master(THD* thd, MASTER_INFO* mi) { mi->master_log_pos= lex_mi->pos; } - DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); + DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos)); if (lex_mi->host) strmake(mi->host, lex_mi->host, sizeof(mi->host)-1); @@ -1218,7 +1218,7 @@ bool change_master(THD* thd, MASTER_INFO* mi) } } mi->rli.group_master_log_pos = mi->master_log_pos; - DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); + DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos)); /* Coordinates in rli were spoilt by the 'if (need_relay_log_purge)' block, diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 75cfff4cbb6..386d12e9a8a 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -9965,7 +9965,7 @@ do_select(JOIN *join,List *fields,TABLE *table,Procedure *procedure) if (join->result->send_eof()) rc= 1; // Don't send error } - DBUG_PRINT("info",("%ld records output",join->send_records)); + DBUG_PRINT("info",("%ld records output", (long) join->send_records)); } else rc= -1; diff --git a/sql/sql_update.cc b/sql/sql_update.cc index d431b671f18..dabda39d6b7 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -569,7 +569,7 @@ int mysql_update(THD *thd, (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated; send_ok(thd, (ulong) thd->row_count_func, thd->insert_id_used ? thd->last_insert_id : 0L,buff); - DBUG_PRINT("info",("%d records updated",updated)); + DBUG_PRINT("info",("%ld records updated", (long) updated)); } thd->count_cuted_fields= CHECK_FIELD_IGNORE; /* calc cuted fields */ thd->abort_on_warning= 0; @@ -667,7 +667,7 @@ static table_map get_table_map(List *items) while ((item= (Item_field *) item_it++)) map|= item->used_tables(); - DBUG_PRINT("info",("table_map: 0x%08x", map)); + DBUG_PRINT("info", ("table_map: 0x%08lx", (long) map)); return map; } diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 52b6c2c38c0..c0cdaf59712 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -563,7 +563,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, LEX_STRING *name; int i; - for (i= 0; name= names++; i++) + for (i= 0; (name= names++); i++) { buff.append(i ? ", " : "("); append_identifier(thd, &buff, name->str, name->length); diff --git a/sql/strfunc.cc b/sql/strfunc.cc index c822d10af46..d03d88ee051 100644 --- a/sql/strfunc.cc +++ b/sql/strfunc.cc @@ -150,7 +150,7 @@ uint find_type2(TYPELIB *typelib, const char *x, uint length, CHARSET_INFO *cs) int pos; const char *j; DBUG_ENTER("find_type2"); - DBUG_PRINT("enter",("x: '%.*s' lib: 0x%lx", length, x, typelib)); + DBUG_PRINT("enter",("x: '%.*s' lib: 0x%lx", length, x, (long) typelib)); if (!typelib->count) { diff --git a/sql/table.cc b/sql/table.cc index d72379efb32..5a2281775b1 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -88,7 +88,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, MEM_ROOT **root_ptr, *old_root; TABLE_SHARE *share; DBUG_ENTER("openfrm"); - DBUG_PRINT("enter",("name: '%s' form: 0x%lx",name,outparam)); + DBUG_PRINT("enter",("name: '%s' form: 0x%lx", name, (long) outparam)); error= 1; disk_buff= NULL; diff --git a/sql/tztime.cc b/sql/tztime.cc index bd8e43075c4..fe23954bbb2 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -949,13 +949,12 @@ TIME_to_gmt_sec(const TIME *t, const TIME_ZONE_INFO *sp, */ if (shift) { - if (local_t > (TIMESTAMP_MAX_VALUE - shift*86400L + - sp->revtis[i].rt_offset - saved_seconds)) + if (local_t > (my_time_t) (TIMESTAMP_MAX_VALUE - shift*86400L + + sp->revtis[i].rt_offset - saved_seconds)) { DBUG_RETURN(0); /* my_time_t overflow */ } - else - local_t+= shift*86400L; + local_t+= shift*86400L; } if (sp->revtis[i].rt_type) diff --git a/sql/unireg.cc b/sql/unireg.cc index 768a288ca19..8568b09e498 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -394,16 +394,16 @@ static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo, pos[6]=pos[7]=0; // For the future pos+=8; key_parts+=key->key_parts; - DBUG_PRINT("loop",("flags: %d key_parts: %d at 0x%lx", - key->flags,key->key_parts, - key->key_part)); + DBUG_PRINT("loop", ("flags: %d key_parts: %d at 0x%lx", + key->flags, key->key_parts, + (long) key->key_part)); for (key_part=key->key_part,key_part_end=key_part+key->key_parts ; key_part != key_part_end ; key_part++) { uint offset; - DBUG_PRINT("loop",("field: %d startpos: %lu length: %ld", + DBUG_PRINT("loop",("field: %d startpos: %lu length: %d", key_part->fieldnr, key_part->offset + data_offset, key_part->length)); int2store(pos,key_part->fieldnr+1+FIELD_NAME_USED); diff --git a/strings/decimal.c b/strings/decimal.c index 5a0bc0968b6..3b51d089c22 100644 --- a/strings/decimal.c +++ b/strings/decimal.c @@ -1348,7 +1348,7 @@ int bin2decimal(char *from, decimal_t *to, int precision, int scale) } from+=i; *buf=x ^ mask; - if (((uint32)*buf) >= powers10[intg0x+1]) + if (((ulonglong)*buf) >= (ulonglong) powers10[intg0x+1]) goto err; if (buf > to->buf || *buf != 0) buf++; diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index da7fde469ad..aa993230a7f 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -13686,7 +13686,8 @@ static void test_bug11172() hired.year, hired.month, hired.day); } DIE_UNLESS(rc == MYSQL_NO_DATA); - mysql_stmt_free_result(stmt) || mysql_stmt_reset(stmt); + if (!mysql_stmt_free_result(stmt)) + mysql_stmt_reset(stmt); } mysql_stmt_close(stmt); mysql_rollback(mysql); @@ -14828,6 +14829,8 @@ static void test_opt_reconnect() } +#ifndef EMBEDDED_LIBRARY + static void test_bug12744() { MYSQL_STMT *prep_stmt = NULL; @@ -14859,6 +14862,8 @@ static void test_bug12744() client_connect(0); } +#endif /* EMBEDDED_LIBRARY */ + /* Bug #16143: mysql_stmt_sqlstate returns an empty string instead of '00000' */ static void test_bug16143() diff --git a/vio/viosocket.c b/vio/viosocket.c index 21b3dae906a..cae0cf70db4 100644 --- a/vio/viosocket.c +++ b/vio/viosocket.c @@ -33,7 +33,7 @@ int vio_read(Vio * vio, gptr buf, int size) { int r; DBUG_ENTER("vio_read"); - DBUG_PRINT("enter", ("sd: %d, buf: 0x%lx, size: %d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d buf: 0x%lx size: %d", vio->sd, (long) buf, size)); /* Ensure nobody uses vio_read_buff and vio_read simultaneously */ DBUG_ASSERT(vio->read_end == vio->read_pos); @@ -64,7 +64,7 @@ int vio_read_buff(Vio *vio, gptr buf, int size) int rc; #define VIO_UNBUFFERED_READ_MIN_SIZE 2048 DBUG_ENTER("vio_read_buff"); - DBUG_PRINT("enter", ("sd: %d, buf: 0x%lx, size: %d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d buf: 0x%lx size: %d", vio->sd, (long) buf, size)); if (vio->read_pos < vio->read_end) { @@ -102,7 +102,7 @@ int vio_write(Vio * vio, const gptr buf, int size) { int r; DBUG_ENTER("vio_write"); - DBUG_PRINT("enter", ("sd: %d, buf: 0x%lx, size: %d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d buf: 0x%lx size: %d", vio->sd, (long) buf, size)); #ifdef __WIN__ r = send(vio->sd, buf, size,0); #else @@ -227,7 +227,7 @@ int vio_keepalive(Vio* vio, my_bool set_keep_alive) int r=0; uint opt = 0; DBUG_ENTER("vio_keepalive"); - DBUG_PRINT("enter", ("sd: %d, set_keep_alive: %d", vio->sd, (int) + DBUG_PRINT("enter", ("sd: %d set_keep_alive: %d", vio->sd, (int) set_keep_alive)); if (vio->type != VIO_TYPE_NAMEDPIPE) { @@ -411,7 +411,7 @@ int vio_read_pipe(Vio * vio, gptr buf, int size) { DWORD length; DBUG_ENTER("vio_read_pipe"); - DBUG_PRINT("enter", ("sd: %d, buf: 0x%lx, size: %d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d buf: 0x%lx size: %d", vio->sd, buf, size)); if (!ReadFile(vio->hPipe, buf, size, &length, NULL)) DBUG_RETURN(-1); @@ -425,7 +425,7 @@ int vio_write_pipe(Vio * vio, const gptr buf, int size) { DWORD length; DBUG_ENTER("vio_write_pipe"); - DBUG_PRINT("enter", ("sd: %d, buf: 0x%lx, size: %d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d buf: 0x%lx size: %d", vio->sd, buf, size)); if (!WriteFile(vio->hPipe, (char*) buf, size, &length, NULL)) DBUG_RETURN(-1); @@ -470,7 +470,7 @@ int vio_read_shared_memory(Vio * vio, gptr buf, int size) char *current_postion; DBUG_ENTER("vio_read_shared_memory"); - DBUG_PRINT("enter", ("sd: %d, buf: 0x%lx, size: %d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d buf: 0x%lx size: %d", vio->sd, buf, size)); remain_local = size; current_postion=buf; @@ -531,7 +531,7 @@ int vio_write_shared_memory(Vio * vio, const gptr buf, int size) char *current_postion; DBUG_ENTER("vio_write_shared_memory"); - DBUG_PRINT("enter", ("sd: %d, buf: 0x%lx, size: %d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d buf: 0x%lx size: %d", vio->sd, buf, size)); remain = size; current_postion = buf; diff --git a/vio/viossl.c b/vio/viossl.c index b5fd0e11c02..f436262a3ce 100644 --- a/vio/viossl.c +++ b/vio/viossl.c @@ -87,8 +87,8 @@ int vio_ssl_read(Vio *vio, gptr buf, int size) { int r; DBUG_ENTER("vio_ssl_read"); - DBUG_PRINT("enter", ("sd: %d, buf: 0x%lx, size: %d, ssl_: 0x%lx", - vio->sd, buf, size, vio->ssl_arg)); + DBUG_PRINT("enter", ("sd: %d buf: 0x%lx size: %d ssl: 0x%lx", + vio->sd, (long) buf, size, (long) vio->ssl_arg)); r= SSL_read((SSL*) vio->ssl_arg, buf, size); #ifndef DBUG_OFF @@ -104,7 +104,7 @@ int vio_ssl_write(Vio *vio, const gptr buf, int size) { int r; DBUG_ENTER("vio_ssl_write"); - DBUG_PRINT("enter", ("sd: %d, buf: 0x%lx, size: %d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d buf: 0x%lx size: %d", vio->sd, (long) buf, size)); r= SSL_write((SSL*) vio->ssl_arg, buf, size); #ifndef DBUG_OFF @@ -133,7 +133,7 @@ int vio_ssl_close(Vio *vio) break; /* Fallthrough */ default: /* Shutdown failed */ - DBUG_PRINT("vio_error", ("SSL_shutdown() failed, error: %s", + DBUG_PRINT("vio_error", ("SSL_shutdown() failed, error: %d", SSL_get_error(ssl, r))); break; } @@ -151,8 +151,8 @@ int sslaccept(struct st_VioSSLFd *ptr, Vio *vio, long timeout) my_bool net_blocking; enum enum_vio_type old_type; DBUG_ENTER("sslaccept"); - DBUG_PRINT("enter", ("sd: %d ptr: %p, timeout: %d", - vio->sd, ptr, timeout)); + DBUG_PRINT("enter", ("sd: %d ptr: 0x%lx, timeout: %ld", + vio->sd, (long) ptr, timeout)); old_type= vio->type; net_blocking= vio_is_blocking(vio); @@ -168,7 +168,7 @@ int sslaccept(struct st_VioSSLFd *ptr, Vio *vio, long timeout) DBUG_RETURN(1); } vio->ssl_arg= (void*)ssl; - DBUG_PRINT("info", ("ssl_: %p timeout: %ld", ssl, timeout)); + DBUG_PRINT("info", ("ssl: 0x%lx timeout: %ld", (long) ssl, timeout)); SSL_clear(ssl); SSL_SESSION_set_timeout(SSL_get_session(ssl), timeout); SSL_set_fd(ssl, vio->sd); @@ -226,8 +226,8 @@ int sslconnect(struct st_VioSSLFd *ptr, Vio *vio, long timeout) enum enum_vio_type old_type; DBUG_ENTER("sslconnect"); - DBUG_PRINT("enter", ("sd: %d, ptr: %p, ctx: %p", - vio->sd, ptr, ptr->ssl_context)); + DBUG_PRINT("enter", ("sd: %d ptr: 0x%lx ctx: 0x%lx", + vio->sd, (long) ptr, (long) ptr->ssl_context)); old_type= vio->type; net_blocking= vio_is_blocking(vio); @@ -242,7 +242,7 @@ int sslconnect(struct st_VioSSLFd *ptr, Vio *vio, long timeout) DBUG_RETURN(1); } vio->ssl_arg= (void*)ssl; - DBUG_PRINT("info", ("ssl: %p, timeout: %ld", ssl, timeout)); + DBUG_PRINT("info", ("ssl: 0x%lx timeout: %ld", (long) ssl, timeout)); SSL_clear(ssl); SSL_SESSION_set_timeout(SSL_get_session(ssl), timeout); SSL_set_fd(ssl, vio->sd); diff --git a/vio/viosslfactories.c b/vio/viosslfactories.c index 34ce1fefaa9..014ce25d754 100644 --- a/vio/viosslfactories.c +++ b/vio/viosslfactories.c @@ -79,8 +79,8 @@ static int vio_set_cert_stuff(SSL_CTX *ctx, const char *cert_file, const char *key_file) { DBUG_ENTER("vio_set_cert_stuff"); - DBUG_PRINT("enter", ("ctx: %p, cert_file: %s, key_file: %s", - ctx, cert_file, key_file)); + DBUG_PRINT("enter", ("ctx: 0x%lx cert_file: %s key_file: %s", + (long) ctx, cert_file, key_file)); if (cert_file) { if (SSL_CTX_use_certificate_file(ctx, cert_file, SSL_FILETYPE_PEM) <= 0) @@ -128,7 +128,7 @@ vio_verify_callback(int ok, X509_STORE_CTX *ctx) X509 *err_cert; DBUG_ENTER("vio_verify_callback"); - DBUG_PRINT("enter", ("ok: %d, ctx: %p", ok, ctx)); + DBUG_PRINT("enter", ("ok: %d ctx: 0x%lx", ok, (long) ctx)); err_cert= X509_STORE_CTX_get_current_cert(ctx); X509_NAME_oneline(X509_get_subject_name(err_cert), buf, sizeof(buf)); @@ -139,7 +139,7 @@ vio_verify_callback(int ok, X509_STORE_CTX *ctx) err= X509_STORE_CTX_get_error(ctx); depth= X509_STORE_CTX_get_error_depth(ctx); - DBUG_PRINT("error",("verify error: %d, '%s'",err, + DBUG_PRINT("error",("verify error: %d '%s'",err, X509_verify_cert_error_string(err))); /* Approve cert if depth is greater then "verify_depth", currently From 1ae9f3b985c59f0c94922cb2bc3c186abae5c619 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 21 Nov 2006 13:45:01 +0400 Subject: [PATCH 46/57] Bug#22413 EXPLAIN SELECT FROM view with ORDER BY yield server crash disable filling of I_S tables for EXPLAIN mysql-test/r/information_schema.result: Bug#22413 EXPLAIN SELECT FROM view with ORDER BY yield server crash test case mysql-test/t/information_schema.test: Bug#22413 EXPLAIN SELECT FROM view with ORDER BY yield server crash test case --- mysql-test/r/information_schema.result | 15 +++++++++++++++ mysql-test/t/information_schema.test | 14 ++++++++++++++ sql/sql_select.cc | 2 ++ 3 files changed, 31 insertions(+) diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index 3fffce73aa9..43eedc19f12 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -1254,3 +1254,18 @@ COLUMN_NAME MD5(COLUMN_DEFAULT) LENGTH(COLUMN_DEFAULT) COLUMN_DEFAULT=get_value( fld1 7cf7a6782be951a1f2464a350da926a5 65532 1 DROP TABLE bug23037; DROP FUNCTION get_value; +create view v1 as +select table_schema as object_schema, +table_name as object_name, +table_type as object_type +from information_schema.tables +order by object_schema; +explain select * from v1; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY system NULL NULL NULL NULL 0 const row not found +2 DERIVED tables ALL NULL NULL NULL NULL 2 Using filesort +explain select * from (select table_name from information_schema.tables) as a; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY system NULL NULL NULL NULL 0 const row not found +2 DERIVED tables ALL NULL NULL NULL NULL 2 +drop view v1; diff --git a/mysql-test/t/information_schema.test b/mysql-test/t/information_schema.test index 27007bbe16a..dd203add344 100644 --- a/mysql-test/t/information_schema.test +++ b/mysql-test/t/information_schema.test @@ -973,4 +973,18 @@ DROP FUNCTION get_value; + +# +# Bug#22413: EXPLAIN SELECT FROM view with ORDER BY yield server crash +# +create view v1 as +select table_schema as object_schema, + table_name as object_name, + table_type as object_type +from information_schema.tables +order by object_schema; +explain select * from v1; +explain select * from (select table_name from information_schema.tables) as a; +drop view v1; + # End of 5.0 tests. diff --git a/sql/sql_select.cc b/sql/sql_select.cc index adb57e3e9f4..4d664edd5af 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1471,6 +1471,7 @@ JOIN::exec() curr_join->examined_rows= 0; if ((curr_join->select_lex->options & OPTION_SCHEMA_TABLE) && + !thd->lex->describe && get_schema_tables_result(curr_join)) { DBUG_VOID_RETURN; @@ -12278,6 +12279,7 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, /* Fill schema tables with data before filesort if it's necessary */ if ((join->select_lex->options & OPTION_SCHEMA_TABLE) && + !thd->lex->describe && get_schema_tables_result(join)) goto err; From c0c319395160ec07c609a9628c6f66485852a33a Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 21 Nov 2006 16:45:05 +0400 Subject: [PATCH 47/57] after merge fix --- mysql-test/r/information_schema.result | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index 2caa11f245b..9db11883db4 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -1338,7 +1338,7 @@ order by object_schema; explain select * from v1; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY system NULL NULL NULL NULL 0 const row not found -2 DERIVED tables ALL NULL NULL NULL NULL 2 Using filesort +2 DERIVED tables ALL NULL NULL NULL NULL 0 Using filesort explain select * from (select table_name from information_schema.tables) as a; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY system NULL NULL NULL NULL 0 const row not found From c0f63d8564fdd1aacd001a882271dd896721d93a Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 21 Nov 2006 21:52:39 +0300 Subject: [PATCH 48/57] Fix typo in comment --- sql/filesort.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/filesort.cc b/sql/filesort.cc index 12b17846fe4..6156d364b2b 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -974,7 +974,7 @@ uint read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek, Put all room used by freed buffer to use in adjacent buffer. Note, that we can't simply distribute memory evenly between all buffers, because new areas must not overlap with old ones. - SYNOPSYS + SYNOPSIS reuse_freed_buff() queue IN list of non-empty buffers, without freed buffer reuse IN empty buffer From 85a590a26cb2e8d60a630678ae9fbf55fb97f4da Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 21 Nov 2006 22:32:58 +0200 Subject: [PATCH 49/57] Added --debug-info to most clients to detect memory leaks in mysql-test-run Moved .progress files into the log directory Moved 'cluster' database tables into the MySQL database, to not have 'cluster' beeing a reserved database name Fixed bug where mysqld got a core dump when trying to use a table created by MySQL 3.23 Fixed some compiler warnings Fixed small memory leak in libmysql Note that this doesn't changeset doesn't include the new mysqldump.c code required to run some tests. This will be added when I merge 5.0 to 5.1 client/client_priv.h: Added OPT_DEBUG_INFO and OPT_COLUMN_TYPES client/mysql.cc: Split --debug-info into --debug-info and --column-type-info client/mysql_upgrade.c: Give only error info at end if using --debug-info client/mysqladmin.cc: Added --debug-info to detect memory leaks in mysqltest client/mysqlbinlog.cc: Added --debug-info to detect memory leaks in mysqltest client/mysqlcheck.c: Added --debug-info to detect memory leaks in mysqltest client/mysqlimport.c: Added --debug-info to detect memory leaks in mysqltest client/mysqlshow.c: Added --debug-info to detect memory leaks in mysqltest client/mysqltest.c: Added --debug-info to detect memory leaks in mysqltest Added option --logdir to force .progress files in a specific directory libmysql/libmysql.c: Fixed memory leak mysql-test/include/ndb_setup_slave.inc: Moved cluster tables under 'mysql' mysql-test/include/query_cache.inc: Added more tests for query cache mysql-test/lib/init_db.sql: Move cluster.binlog_index -> mysql.binlog_index mysql-test/lib/mtr_report.pl: Find memory leaks mysql-test/mysql-test-run-shell.sh: Added --debug-info to programs to detect memory leaks mysql-test/mysql-test-run.pl: Added --debug-info to programs to detect memory leaks Force log files to var/log cluster tables moved under 'mysql' mysql-test/r/cache_innodb.result: New query cache test with innodb mysql-test/r/connect.result: binlog_index is now in mysql mysql-test/r/drop.result: Removed 'cluster' database mysql-test/r/information_schema.result: Removed 'cluster' database mysql-test/r/mysqlcheck.result: cluster.binlog_index -> mysql.binlog_index mysql-test/r/ndb_binlog_basic.result: cluster.binlog_index -> mysql.binlog_index mysql-test/r/ndb_binlog_ddl_multi.result: cluster -> mysql mysql-test/r/ndb_binlog_discover.result: cluster -> mysql mysql-test/r/ndb_binlog_multi.result: cluster -> mysql mysql-test/r/ndb_restore_compat.result: cluster -> mysql mysql-test/r/ps_1general.result: Removed cluster database mysql-test/r/rpl_create_database.result: Removed cluster database mysql-test/r/rpl_load_from_master.result: Removed cluster database mysql-test/r/rpl_loaddata_m.result: Removed cluster database mysql-test/r/rpl_ndb_bank.result: cluster -> mysql mysql-test/r/rpl_ndb_dd_advance.result: cluster -> mysql mysql-test/r/rpl_ndb_dd_basic.result: cluster -> mysql mysql-test/r/rpl_ndb_idempotent.result: cluster -> mysql mysql-test/r/rpl_ndb_log.result: cluster -> mysql mysql-test/r/rpl_ndb_multi.result: cluster -> mysql mysql-test/r/rpl_ndb_sync.result: cluster -> mysql mysql-test/r/rpl_row_basic_11bugs.result: Removed cluster database mysql-test/r/rpl_truncate_7ndb.result: Position have changed mysql-test/r/rpl_truncate_7ndb_2.result: cluster -> mysql mysql-test/r/schema.result: Removed cluster database mysql-test/r/show_check.result: Removed cluster database mysql-test/r/system_mysql_db.result: binlog_index moved under mysql mysql-test/r/upgrade.result: More tests mysql-test/t/information_schema.test: cluster -> mysql mysql-test/t/mysqldump.test: More test to detect memory leaks mysql-test/t/ndb_binlog_basic.test: cluster -> mysql mysql-test/t/ndb_binlog_multi.test: cluster -> mysql mysql-test/t/ndb_restore_compat.test: cluster -> mysql mysql-test/t/rpl_ndb_bank.test: cluster -> mysql mysql-test/t/rpl_ndb_dd_advance.test: cluster -> mysql mysql-test/t/rpl_ndb_idempotent.test: cluster -> mysql mysql-test/t/rpl_ndb_multi.test: cluster -> mysql mysql-test/t/rpl_ndb_sync.test: cluster -> mysql mysql-test/t/system_mysql_db_fix.test: Drop also binlog_index mysql-test/t/upgrade.test: Check that tables created with 3.23 works mysys/my_error.c: Remove all registered errors (to avoid memory leaks) mysys/my_init.c: call my_error_unregister_all() to avoid some possible memory leaks mysys/mysys_priv.h: Added prototype scripts/mysql_create_system_tables.sh: cluster -> mysql scripts/mysql_fix_privilege_tables.sql: Changed -- to # Create binlog_index table if it dosesn't exist sql/field.cc: Made offset() inline sql/field.h: Changed prototype of offset() to not make it depending on TABLE structure sql/field_conv.cc: Removed some usage of current_thd sql/ha_ndbcluster_tables.h: cluster -> mysql sql/item_sum.cc: Usage of new 'offset' parameters sql/key.cc: Changed 'find_ref_key' to not be dependent on field->table (This fixed a crash when using a table created in MySQL 3.23) sql/mysql_priv.h: Changed find_ref_key() to not use field->table sql/mysqld.cc: Fix that plugin_init() works properly on bootstrap (Previously we got warnings from plugin_init) sql/sql_class.cc: new 'field->offset()' sql/sql_select.cc: New field->offset() sql/table.cc: We don't need field->table to be set in find_ref_key() or find_field() anymore. storage/archive/ha_archive.cc: New field->offset() storage/federated/ha_federated.cc: New field->offset() storage/innobase/handler/ha_innodb.cc: Fixed compiler warnings storage/myisam/ha_myisam.cc: Fixed compiler warnings (Wrong paramter to sprintf()) New field->offset() storage/myisam/ha_myisam.h: Fixed wrong type of table_flags storage/ndb/tools/restore/Restore.cpp: Fixed compiler warning mysql-test/r/1st.result: New BitKeeper file ``mysql-test/r/1st.result'' mysql-test/std_data/old_table-323.frm: New BitKeeper file ``mysql-test/std_data/old_table-323.frm'' mysql-test/t/1st.test: New BitKeeper file ``mysql-test/t/1st.test'' --- client/client_priv.h | 3 +- client/mysql.cc | 23 +++--- client/mysql_upgrade.c | 2 +- client/mysqladmin.cc | 8 +- client/mysqlbinlog.cc | 10 ++- client/mysqlcheck.c | 10 ++- client/mysqlimport.c | 8 +- client/mysqlshow.c | 13 ++-- client/mysqltest.c | 23 ++++-- libmysql/libmysql.c | 5 +- mysql-test/include/ndb_setup_slave.inc | 4 +- mysql-test/include/query_cache.inc | 80 +++++++++++++++++++- mysql-test/lib/init_db.sql | 3 +- mysql-test/lib/mtr_report.pl | 3 +- mysql-test/mysql-test-run-shell.sh | 16 ++-- mysql-test/mysql-test-run.pl | 35 ++++----- mysql-test/r/1st.result | 29 ++++++++ mysql-test/r/cache_innodb.result | 91 +++++++++++++++++++++++ mysql-test/r/connect.result | 3 + mysql-test/r/drop.result | 2 - mysql-test/r/information_schema.result | 7 +- mysql-test/r/mysqlcheck.result | 3 +- mysql-test/r/ndb_binlog_basic.result | 14 ++-- mysql-test/r/ndb_binlog_ddl_multi.result | 6 +- mysql-test/r/ndb_binlog_discover.result | 2 +- mysql-test/r/ndb_binlog_multi.result | 18 ++--- mysql-test/r/ndb_restore_compat.result | 4 +- mysql-test/r/ps_1general.result | 1 - mysql-test/r/rpl_create_database.result | 4 - mysql-test/r/rpl_load_from_master.result | 3 - mysql-test/r/rpl_loaddata_m.result | 1 - mysql-test/r/rpl_ndb_bank.result | 12 +-- mysql-test/r/rpl_ndb_dd_advance.result | 10 +-- mysql-test/r/rpl_ndb_dd_basic.result | 2 +- mysql-test/r/rpl_ndb_idempotent.result | 4 +- mysql-test/r/rpl_ndb_log.result | 24 +++--- mysql-test/r/rpl_ndb_multi.result | 4 +- mysql-test/r/rpl_ndb_sync.result | 8 +- mysql-test/r/rpl_row_basic_11bugs.result | 2 - mysql-test/r/rpl_truncate_7ndb.result | 52 ++++++------- mysql-test/r/rpl_truncate_7ndb_2.result | 8 +- mysql-test/r/schema.result | 1 - mysql-test/r/show_check.result | 1 - mysql-test/r/system_mysql_db.result | 1 + mysql-test/r/upgrade.result | 2 + mysql-test/std_data/old_table-323.frm | Bin 0 -> 8606 bytes mysql-test/t/1st.test | 5 ++ mysql-test/t/information_schema.test | 4 +- mysql-test/t/mysqldump.test | 3 + mysql-test/t/ndb_binlog_basic.test | 14 ++-- mysql-test/t/ndb_binlog_multi.test | 10 +-- mysql-test/t/ndb_restore_compat.test | 4 +- mysql-test/t/rpl_ndb_bank.test | 8 +- mysql-test/t/rpl_ndb_dd_advance.test | 10 +-- mysql-test/t/rpl_ndb_idempotent.test | 6 +- mysql-test/t/rpl_ndb_multi.test | 6 +- mysql-test/t/rpl_ndb_sync.test | 6 +- mysql-test/t/system_mysql_db_fix.test | 2 +- mysql-test/t/upgrade.test | 9 +++ mysys/my_error.c | 12 +++ mysys/my_init.c | 4 +- mysys/mysys_priv.h | 2 + scripts/mysql_create_system_tables.sh | 3 +- scripts/mysql_fix_privilege_tables.sql | 86 ++++++++++----------- sql/field.cc | 6 -- sql/field.h | 5 +- sql/field_conv.cc | 12 +-- sql/ha_ndbcluster_tables.h | 2 +- sql/item_sum.cc | 11 ++- sql/key.cc | 49 ++++++++---- sql/mysql_priv.h | 3 +- sql/mysqld.cc | 2 +- sql/sql_class.cc | 4 +- sql/sql_select.cc | 9 ++- sql/table.cc | 20 ++--- storage/archive/ha_archive.cc | 18 +++-- storage/federated/ha_federated.cc | 3 +- storage/innobase/handler/ha_innodb.cc | 4 +- storage/myisam/ha_myisam.cc | 14 ++-- storage/myisam/ha_myisam.h | 2 +- storage/ndb/tools/restore/Restore.cpp | 2 +- 81 files changed, 596 insertions(+), 329 deletions(-) create mode 100644 mysql-test/r/1st.result create mode 100644 mysql-test/std_data/old_table-323.frm create mode 100644 mysql-test/t/1st.test diff --git a/client/client_priv.h b/client/client_priv.h index bcaa74d3228..78457a4977d 100644 --- a/client/client_priv.h +++ b/client/client_priv.h @@ -58,5 +58,6 @@ enum options_client OPT_IGNORE_TABLE,OPT_INSERT_IGNORE,OPT_SHOW_WARNINGS,OPT_DROP_DATABASE, OPT_TZ_UTC, OPT_AUTO_CLOSE, OPT_CREATE_SLAP_SCHEMA, OPT_MYSQL_REPLACE_INTO, OPT_BASE64_OUTPUT, OPT_SERVER_ID, - OPT_FIX_TABLE_NAMES, OPT_FIX_DB_NAMES, OPT_SSL_VERIFY_SERVER_CERT + OPT_FIX_TABLE_NAMES, OPT_FIX_DB_NAMES, OPT_SSL_VERIFY_SERVER_CERT, + OPT_DEBUG_INFO, OPT_COLUMN_TYPES }; diff --git a/client/mysql.cc b/client/mysql.cc index 1dbdeb8be97..687894d2a4c 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -44,7 +44,7 @@ #include #endif -const char *VER= "14.12"; +const char *VER= "14.13"; /* Don't try to make a nice table if the data is too big */ #define MAX_COLUMN_LENGTH 1024 @@ -140,6 +140,7 @@ static my_bool info_flag=0,ignore_errors=0,wait_flag=0,quick=0, default_charset_used= 0, opt_secure_auth= 0, default_pager_set= 0, opt_sigint_ignore= 0, show_warnings= 0, executing_query= 0, interrupted_query= 0; +static my_bool column_types_flag; static ulong opt_max_allowed_packet, opt_net_buffer_length; static uint verbose=0,opt_silent=0,opt_mysql_port=0, opt_local_infile=0; static my_string opt_mysql_unix_port=0; @@ -530,7 +531,7 @@ sig_handler mysql_end(int sig) my_free(current_prompt,MYF(MY_ALLOW_ZERO_PTR)); mysql_server_end(); free_defaults(defaults_argv); - my_end(info_flag ? MY_CHECK_ERROR | MY_GIVE_INFO : 0); + my_end(info_flag ? MY_CHECK_ERROR : 0); exit(status.exit_status); } @@ -585,12 +586,13 @@ static struct my_option my_long_options[] = {"character-sets-dir", OPT_CHARSETS_DIR, "Directory where character sets are.", (gptr*) &charsets_dir, (gptr*) &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"default-character-set", OPT_DEFAULT_CHARSET, - "Set the default character set.", (gptr*) &default_charset, - (gptr*) &default_charset, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"column-type-info", OPT_COLUMN_TYPES, "Display column type information.", + (gptr*) &column_types_flag, (gptr*) &column_types_flag, + 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"compress", 'C', "Use compression in server/client protocol.", (gptr*) &opt_compress, (gptr*) &opt_compress, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + #ifdef DBUG_OFF {"debug", '#', "This is a non-debug version. Catch this and exit", 0,0, 0, GET_DISABLED, OPT_ARG, 0, 0, 0, 0, 0, 0}, @@ -598,8 +600,13 @@ static struct my_option my_long_options[] = {"debug", '#', "Output debug log", (gptr*) &default_dbug_option, (gptr*) &default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #endif + {"debug-info", 'T', "Print some debug info at exit.", (gptr*) &info_flag, + (gptr*) &info_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"database", 'D', "Database to use.", (gptr*) ¤t_db, (gptr*) ¤t_db, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"default-character-set", OPT_DEFAULT_CHARSET, + "Set the default character set.", (gptr*) &default_charset, + (gptr*) &default_charset, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"delimiter", OPT_DELIMITER, "Delimiter to be used.", (gptr*) &delimiter_str, (gptr*) &delimiter_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"execute", 'e', "Execute command and quit. (Disables --force and history file)", 0, @@ -696,8 +703,6 @@ static struct my_option my_long_options[] = #include "sslopt-longopts.h" {"table", 't', "Output in table format.", (gptr*) &output_tables, (gptr*) &output_tables, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"debug-info", 'T', "Print some debug info at exit.", (gptr*) &info_flag, - (gptr*) &info_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"tee", OPT_TEE, "Append everything into outfile. See interactive help (\\h) also. Does not work in batch mode. Disable with --disable-tee. This option is disabled by default.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, @@ -2070,7 +2075,7 @@ com_go(String *buffer,char *line __attribute__((unused))) time_buff[0]=0; if (result) { - if (!mysql_num_rows(result) && ! quick && !info_flag) + if (!mysql_num_rows(result) && ! quick && !column_types_flag) { strmov(buff, "Empty set"); } @@ -2307,7 +2312,7 @@ print_table_data(MYSQL_RES *result) bool *num_flag; num_flag=(bool*) my_alloca(sizeof(bool)*mysql_num_fields(result)); - if (info_flag) + if (column_types_flag) { print_field_types(result); if (!mysql_num_rows(result)) diff --git a/client/mysql_upgrade.c b/client/mysql_upgrade.c index 6ec361392c8..b3146ab75cc 100644 --- a/client/mysql_upgrade.c +++ b/client/mysql_upgrade.c @@ -418,6 +418,6 @@ fix_priv_tables: err_exit: if (upgrade_defaults_created) my_delete(upgrade_defaults_path, MYF(0)); - my_end(info_flag ? MY_CHECK_ERROR | MY_GIVE_INFO : 0); + my_end(info_flag ? MY_CHECK_ERROR : 0); return error; } /* main */ diff --git a/client/mysqladmin.cc b/client/mysqladmin.cc index 57ab4e071fb..bde0a5fa143 100644 --- a/client/mysqladmin.cc +++ b/client/mysqladmin.cc @@ -28,7 +28,7 @@ #include "../ndb/src/mgmclient/ndb_mgmclient.h" #endif -#define ADMIN_VERSION "8.41" +#define ADMIN_VERSION "8.42" #define MAX_MYSQL_VAR 256 #define SHUTDOWN_DEF_TIMEOUT 3600 /* Wait for shutdown */ #define MAX_TRUNC_LENGTH 3 @@ -41,7 +41,7 @@ ulonglong last_values[MAX_MYSQL_VAR]; static int interval=0; static my_bool option_force=0,interrupted=0,new_line=0, opt_compress=0, opt_relative=0, opt_verbose=0, opt_vertical=0, - tty_password=0; + tty_password= 0, info_flag= 0; static uint tcp_port = 0, option_wait = 0, option_silent=0, nr_iterations, opt_count_iterations= 0; static ulong opt_connect_timeout, opt_shutdown_timeout; @@ -136,6 +136,8 @@ static struct my_option my_long_options[] = REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"debug", '#', "Output debug log. Often this is 'd:t:o,filename'.", 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, + {"debug-info", OPT_DEBUG_INFO, "Print some debug info at exit.", (gptr*) &info_flag, + (gptr*) &info_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"force", 'f', "Don't ask for confirmation on drop database; with multiple commands, continue even if an error occurs.", (gptr*) &option_force, (gptr*) &option_force, 0, GET_BOOL, NO_ARG, 0, 0, @@ -412,7 +414,7 @@ int main(int argc,char *argv[]) my_free(shared_memory_base_name,MYF(MY_ALLOW_ZERO_PTR)); #endif free_defaults(save_argv); - my_end(0); + my_end(info_flag ? MY_CHECK_ERROR : 0); exit(error ? 1 : 0); return 0; } diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc index ab94c415db7..6305923bb0e 100644 --- a/client/mysqlbinlog.cc +++ b/client/mysqlbinlog.cc @@ -66,7 +66,7 @@ static bool one_database=0, to_last_remote_log= 0, disable_log_bin= 0; static bool opt_hexdump= 0; static bool opt_base64_output= 0; static const char* database= 0; -static my_bool force_opt= 0, short_form= 0, remote_opt= 0; +static my_bool force_opt= 0, short_form= 0, remote_opt= 0, info_flag; static ulonglong offset = 0; static const char* host = 0; static int port= 0; @@ -716,6 +716,8 @@ static struct my_option my_long_options[] = {"debug", '#', "Output debug log.", (gptr*) &default_dbug_option, (gptr*) &default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #endif + {"debug-info", OPT_DEBUG_INFO, "Print some debug info at exit.", (gptr*) &info_flag, + (gptr*) &info_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"disable-log-bin", 'D', "Disable binary log. This is useful, if you " "enabled --to-last-log and are sending the output to the same MySQL server. " "This way you could avoid an endless loop. You would also like to use it " @@ -844,7 +846,7 @@ static void die(const char* fmt, ...) va_end(args); cleanup(); /* We cannot free DBUG, it is used in global destructors after exit(). */ - my_end(MY_DONT_FREE_DBUG); + my_end((info_flag ? MY_CHECK_ERROR : 0) | MY_DONT_FREE_DBUG); exit(1); } @@ -852,7 +854,7 @@ static void die(const char* fmt, ...) static void print_version() { - printf("%s Ver 3.1 for %s at %s\n", my_progname, SYSTEM_TYPE, MACHINE_TYPE); + printf("%s Ver 3.2 for %s at %s\n", my_progname, SYSTEM_TYPE, MACHINE_TYPE); NETWARE_SET_SCREEN_MODE(1); } @@ -1530,7 +1532,7 @@ int main(int argc, char** argv) free_defaults(defaults_argv); my_free_open_file_info(); /* We cannot free DBUG, it is used in global destructors after exit(). */ - my_end(MY_DONT_FREE_DBUG); + my_end((info_flag ? MY_CHECK_ERROR : 0) | MY_DONT_FREE_DBUG); exit(exit_value); DBUG_RETURN(exit_value); // Keep compilers happy } diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c index fdfd9fc36fb..09ddaadf233 100644 --- a/client/mysqlcheck.c +++ b/client/mysqlcheck.c @@ -16,7 +16,7 @@ /* By Jani Tolonen, 2001-04-20, MySQL Development Team */ -#define CHECK_VERSION "2.4.4" +#define CHECK_VERSION "2.4.5" #include "client_priv.h" #include @@ -34,7 +34,7 @@ static my_bool opt_alldbs = 0, opt_check_only_changed = 0, opt_extended = 0, opt_compress = 0, opt_databases = 0, opt_fast = 0, opt_medium_check = 0, opt_quick = 0, opt_all_in_1 = 0, opt_silent = 0, opt_auto_repair = 0, ignore_errors = 0, - tty_password = 0, opt_frm = 0, + tty_password= 0, opt_frm= 0, info_flag= 0, opt_fix_table_names= 0, opt_fix_db_names= 0, opt_upgrade= 0; static uint verbose = 0, opt_mysql_port=0; static my_string opt_mysql_unix_port = 0; @@ -96,6 +96,8 @@ static struct my_option my_long_options[] = {"debug", '#', "Output debug log. Often this is 'd:t:o,filename'.", 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #endif + {"debug-info", OPT_DEBUG_INFO, "Print some debug info at exit.", (gptr*) &info_flag, + (gptr*) &info_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"default-character-set", OPT_DEFAULT_CHARSET, "Set the default character set.", (gptr*) &default_charset, (gptr*) &default_charset, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, @@ -761,7 +763,7 @@ int main(int argc, char **argv) */ if (get_options(&argc, &argv)) { - my_end(0); + my_end(info_flag ? MY_CHECK_ERROR : 0); exit(EX_USAGE); } if (dbConnect(current_host, current_user, opt_password)) @@ -803,6 +805,6 @@ int main(int argc, char **argv) #ifdef HAVE_SMEM my_free(shared_memory_base_name,MYF(MY_ALLOW_ZERO_PTR)); #endif - my_end(0); + my_end(info_flag ? MY_CHECK_ERROR : 0); return(first_error!=0); } /* main */ diff --git a/client/mysqlimport.c b/client/mysqlimport.c index 2ef08c9a504..447c3322de7 100644 --- a/client/mysqlimport.c +++ b/client/mysqlimport.c @@ -25,7 +25,7 @@ ** * * ** ************************* */ -#define IMPORT_VERSION "3.5" +#define IMPORT_VERSION "3.6" #include "client_priv.h" #include "mysql_version.h" @@ -50,7 +50,7 @@ static char *add_load_option(char *ptr,const char *object, static my_bool verbose=0,lock_tables=0,ignore_errors=0,opt_delete=0, replace=0,silent=0,ignore=0,opt_compress=0, opt_low_priority= 0, tty_password= 0; -static my_bool opt_use_threads= 0; +static my_bool opt_use_threads= 0, info_flag= 0; static uint opt_local_file=0; static char *opt_password=0, *current_user=0, *current_host=0, *current_db=0, *fields_terminated=0, @@ -88,6 +88,8 @@ static struct my_option my_long_options[] = 0, 0, 0}, {"debug",'#', "Output debug log. Often this is 'd:t:o,filename'.", 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, + {"debug-info", OPT_DEBUG_INFO, "Print some debug info at exit.", (gptr*) &info_flag, + (gptr*) &info_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"delete", 'd', "First delete all rows from table.", (gptr*) &opt_delete, (gptr*) &opt_delete, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"fields-terminated-by", OPT_FTB, @@ -663,6 +665,6 @@ int main(int argc, char **argv) my_free(shared_memory_base_name,MYF(MY_ALLOW_ZERO_PTR)); #endif free_defaults(argv_to_free); - my_end(0); + my_end(info_flag ? MY_CHECK_ERROR : 0); return(exitcode); } diff --git a/client/mysqlshow.c b/client/mysqlshow.c index 40405c53565..153bddc48d7 100644 --- a/client/mysqlshow.c +++ b/client/mysqlshow.c @@ -16,7 +16,7 @@ /* Show databases, tables or columns */ -#define SHOW_VERSION "9.5" +#define SHOW_VERSION "9.6" #include "client_priv.h" #include @@ -28,8 +28,8 @@ #include static my_string host=0,opt_password=0,user=0; -static my_bool opt_show_keys= 0, opt_compress= 0, opt_count=0, opt_status= 0, - tty_password= 0, opt_table_type= 0; +static my_bool opt_show_keys= 0, opt_compress= 0, opt_count=0, opt_status= 0; +static my_bool tty_password= 0, opt_table_type= 0, info_flag= 0; static uint opt_verbose=0; static char *default_charset= (char*) MYSQL_DEFAULT_CHARSET_NAME; @@ -129,8 +129,7 @@ int main(int argc, char **argv) } mysql.reconnect= 1; - switch (argc) - { + switch (argc) { case 0: error=list_dbs(&mysql,wild); break; case 1: if (opt_status) @@ -151,7 +150,7 @@ int main(int argc, char **argv) #ifdef HAVE_SMEM my_free(shared_memory_base_name,MYF(MY_ALLOW_ZERO_PTR)); #endif - my_end(0); + my_end(info_flag ? MY_CHECK_ERROR : 0); exit(error ? 1 : 0); return 0; /* No compiler warnings */ } @@ -177,6 +176,8 @@ static struct my_option my_long_options[] = 0, 0, 0}, {"debug", '#', "Output debug log. Often this is 'd:t:o,filename'.", 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, + {"debug-info", OPT_DEBUG_INFO, "Print some debug info at exit.", (gptr*) &info_flag, + (gptr*) &info_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"help", '?', "Display this help and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"host", 'h', "Connect to host.", (gptr*) &host, (gptr*) &host, 0, GET_STR, diff --git a/client/mysqltest.c b/client/mysqltest.c index f9c4ae617fd..c0d3d378563 100644 --- a/client/mysqltest.c +++ b/client/mysqltest.c @@ -97,7 +97,7 @@ enum {OPT_SKIP_SAFEMALLOC=256, OPT_SSL_SSL, OPT_SSL_KEY, OPT_SSL_CERT, OPT_SSL_CA, OPT_SSL_CAPATH, OPT_SSL_CIPHER, OPT_PS_PROTOCOL, OPT_SP_PROTOCOL, OPT_CURSOR_PROTOCOL, OPT_VIEW_PROTOCOL, OPT_SSL_VERIFY_SERVER_CERT, OPT_MAX_CONNECT_RETRIES, - OPT_MARK_PROGRESS}; + OPT_MARK_PROGRESS, OPT_LOG_DIR, OPT_DEBUG_INFO}; /* ************************************************************************ */ /* @@ -145,6 +145,7 @@ static uint global_expected_errors; static int record= 0, opt_sleep= -1; static char *db = 0, *pass=0; const char *user = 0, *host = 0, *unix_sock = 0, *opt_basedir="./"; +const char *opt_logdir= ""; const char *opt_include= 0; static int port = 0; static int opt_max_connect_retries; @@ -155,7 +156,7 @@ static my_bool ps_protocol= 0, ps_protocol_enabled= 0; static my_bool sp_protocol= 0, sp_protocol_enabled= 0; static my_bool view_protocol= 0, view_protocol_enabled= 0; static my_bool cursor_protocol= 0, cursor_protocol_enabled= 0; -static my_bool opt_valgrind_test= 0; +static my_bool opt_valgrind_test= 0, info_flag; static int parsing_disabled= 0; static char **default_argv; @@ -658,7 +659,7 @@ static void die(const char *fmt, ...) /* Clean up and exit */ free_used_memory(); - my_end(MY_CHECK_ERROR); + my_end(info_flag ? MY_CHECK_ERROR | MY_GIVE_INFO : MY_CHECK_ERROR); if (!silent) printf("not ok\n"); @@ -698,7 +699,7 @@ static void abort_not_supported_test(const char *fmt, ...) /* Clean up and exit */ free_used_memory(); - my_end(MY_CHECK_ERROR); + my_end(info_flag ? MY_CHECK_ERROR | MY_GIVE_INFO : MY_CHECK_ERROR); if (!silent) printf("skipped\n"); @@ -3295,12 +3296,16 @@ static struct my_option my_long_options[] = {"debug", '#', "Output debug log. Often this is 'd:t:o,filename'.", 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #endif + {"debug-info", OPT_DEBUG_INFO, "Print some debug info at exit.", (gptr*) &info_flag, + (gptr*) &info_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"host", 'h', "Connect to host.", (gptr*) &host, (gptr*) &host, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"include", 'i', "Include SQL before each test case.", (gptr*) &opt_include, (gptr*) &opt_include, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"logdir", OPT_LOG_DIR, "Directory for log files", (gptr*) &opt_logdir, + (gptr*) &opt_logdir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"mark-progress", OPT_MARK_PROGRESS, - "Write linenumber and elapsed time to .progress ", + "Write linenumber and elapsed time to .progress", (gptr*) &opt_mark_progress, (gptr*) &opt_mark_progress, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"max-connect-retries", OPT_MAX_CONNECT_RETRIES, @@ -3538,7 +3543,8 @@ void dump_result_to_reject_file(const char *record_file, char *buf, int size) void dump_result_to_log_file(const char *record_file, char *buf, int size) { char log_file[FN_REFLEN]; - str_to_file(fn_format(log_file, record_file, "", ".log", + str_to_file(fn_format(log_file, record_file, opt_logdir, ".log", + *opt_logdir ? MY_REPLACE_DIR | MY_REPLACE_EXT: MY_REPLACE_EXT), buf, size); } @@ -3546,7 +3552,8 @@ void dump_result_to_log_file(const char *record_file, char *buf, int size) void dump_progress(const char *record_file) { char log_file[FN_REFLEN]; - str_to_file(fn_format(log_file, record_file, "", ".progress", + str_to_file(fn_format(log_file, record_file, opt_logdir, ".progress", + *opt_logdir ? MY_REPLACE_DIR | MY_REPLACE_EXT: MY_REPLACE_EXT), ds_progress.str, ds_progress.length); } @@ -5600,7 +5607,7 @@ int main(int argc, char **argv) if (!got_end_timer) timer_output(); /* No end_timer cmd, end it */ free_used_memory(); - my_end(MY_CHECK_ERROR); + my_end(info_flag ? MY_CHECK_ERROR | MY_GIVE_INFO : MY_CHECK_ERROR); /* Yes, if we got this far the test has suceeded! Sakila smiles */ if (!silent) diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c index e80b142e419..64f8f1be1e0 100644 --- a/libmysql/libmysql.c +++ b/libmysql/libmysql.c @@ -176,16 +176,15 @@ void STDCALL mysql_server_end() end_embedded_server(); #endif /* If library called my_init(), free memory allocated by it */ + finish_client_errs(); if (!org_my_init_done) { my_end(MY_DONT_FREE_DBUG); - /* Remove TRACING, if enabled by mysql_debug() */ + /* Remove TRACING, if enabled by mysql_debug() */ DBUG_POP(); } else mysql_thread_end(); - finish_client_errs(); - free_charsets(); vio_end(); mysql_client_init= org_my_init_done= 0; #ifdef EMBEDDED_SERVER diff --git a/mysql-test/include/ndb_setup_slave.inc b/mysql-test/include/ndb_setup_slave.inc index b1efeded90b..3cda48755b9 100644 --- a/mysql-test/include/ndb_setup_slave.inc +++ b/mysql-test/include/ndb_setup_slave.inc @@ -7,7 +7,7 @@ # 1. --connection slave --replace_column 1 -SELECT @the_epoch:=MAX(epoch) FROM cluster.apply_status; +SELECT @the_epoch:=MAX(epoch) FROM mysql.apply_status; --let $the_epoch= `select @the_epoch` # 2. @@ -15,7 +15,7 @@ SELECT @the_epoch:=MAX(epoch) FROM cluster.apply_status; --replace_result $the_epoch --replace_column 1 eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1) - FROM cluster.binlog_index WHERE epoch > $the_epoch ORDER BY epoch ASC LIMIT 1; + FROM mysql.binlog_index WHERE epoch > $the_epoch ORDER BY epoch ASC LIMIT 1; --let $the_pos= `SELECT @the_pos` --let $the_file= `SELECT @the_file` diff --git a/mysql-test/include/query_cache.inc b/mysql-test/include/query_cache.inc index 3b63167a737..70249a9a5aa 100644 --- a/mysql-test/include/query_cache.inc +++ b/mysql-test/include/query_cache.inc @@ -100,4 +100,82 @@ eval set GLOBAL query_cache_size=$save_query_cache_size; --enable_query_log } -# End of 4.1 tests +# +# Test query cache with two interleaving transactions +# + +# Establish connection1 +connect (connection1,localhost,root,,); +eval SET SESSION STORAGE_ENGINE = $engine_type; +SET @@autocommit=1; + +connection default; +--echo connection default +-- This should be 'YES'. +SHOW VARIABLES LIKE 'have_query_cache'; + +SET GLOBAL query_cache_size = 200000; +flush status; +SET @@autocommit=1; +eval SET SESSION STORAGE_ENGINE = $engine_type; +CREATE TABLE t2 (s1 int, s2 varchar(1000), key(s1)); +INSERT INTO t2 VALUES (1,repeat('a',10)),(2,repeat('a',10)),(3,repeat('a',10)),(4,repeat('a',10)); +COMMIT; +START TRANSACTION; +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +UPDATE t2 SET s2 = 'w' WHERE s1 = 3; +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +show status like "Qcache_queries_in_cache"; + +connection connection1; +--echo connection connection1 +START TRANSACTION; +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +INSERT INTO t2 VALUES (5,'w'); +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +COMMIT; +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; + +show status like "Qcache_queries_in_cache"; + +connection default; +--echo connection default +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +COMMIT; + +show status like "Qcache_queries_in_cache"; + +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +show status like "Qcache_queries_in_cache"; + +connection connection1; +--echo connection connection1 +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; + +START TRANSACTION; +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +INSERT INTO t2 VALUES (6,'w'); +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; + +connection default; +--echo connection default +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +START TRANSACTION; +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +DELETE from t2 WHERE s1=3; +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +COMMIT; + +connection connection1; +--echo connection connection1 + +COMMIT; +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; + +show status like "Qcache_queries_in_cache"; +show status like "Qcache_hits"; + +# Final cleanup +connection default; +drop table t2; +disconnect connection1; diff --git a/mysql-test/lib/init_db.sql b/mysql-test/lib/init_db.sql index a5736ed4b9b..b7618b3ab46 100644 --- a/mysql-test/lib/init_db.sql +++ b/mysql-test/lib/init_db.sql @@ -634,5 +634,4 @@ CREATE TABLE event ( PRIMARY KEY (db, name) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT 'Events'; -CREATE DATABASE IF NOT EXISTS cluster; -CREATE TABLE IF NOT EXISTS cluster.binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts BIGINT UNSIGNED NOT NULL, updates BIGINT UNSIGNED NOT NULL, deletes BIGINT UNSIGNED NOT NULL, schemaops BIGINT UNSIGNED NOT NULL, PRIMARY KEY(epoch)) ENGINE=MYISAM; +CREATE TABLE IF NOT EXISTS mysql.binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts BIGINT UNSIGNED NOT NULL, updates BIGINT UNSIGNED NOT NULL, deletes BIGINT UNSIGNED NOT NULL, schemaops BIGINT UNSIGNED NOT NULL, PRIMARY KEY(epoch)) ENGINE=MYISAM; diff --git a/mysql-test/lib/mtr_report.pl b/mysql-test/lib/mtr_report.pl index 6e3796133f2..1ef5787ba3e 100644 --- a/mysql-test/lib/mtr_report.pl +++ b/mysql-test/lib/mtr_report.pl @@ -38,7 +38,7 @@ sub mtr_show_failed_diff ($) { my $reject_file= "r/$tname.reject"; my $result_file= "r/$tname.result"; - my $log_file= "r/$tname.log"; + my $log_file= "$::opt_vardir/log/$tname.log"; my $eval_file= "r/$tname.eval"; if ( $::opt_suite ne "main" ) @@ -251,6 +251,7 @@ sub mtr_report_stats ($) { foreach my $pattern ( "^Warning:", "^Error:", "^==.* at 0x", "InnoDB: Warning", "missing DBUG_RETURN", "mysqld: Warning", + "allocated at line", "Attempting backtrace", "Assertion .* failed" ) { foreach my $errlog ( sort glob("$::opt_vardir/log/*.err") ) diff --git a/mysql-test/mysql-test-run-shell.sh b/mysql-test/mysql-test-run-shell.sh index f2200c4be07..56ca7f3ec74 100644 --- a/mysql-test/mysql-test-run-shell.sh +++ b/mysql-test/mysql-test-run-shell.sh @@ -882,15 +882,15 @@ fi # Save path and name of mysqldump MYSQL_DUMP_DIR="$MYSQL_DUMP" export MYSQL_DUMP_DIR -MYSQL_CHECK="$MYSQL_CHECK --no-defaults -uroot --socket=$MASTER_MYSOCK --password=$DBPASSWD $EXTRA_MYSQLCHECK_OPT" -MYSQL_DUMP="$MYSQL_DUMP --no-defaults -uroot --socket=$MASTER_MYSOCK --password=$DBPASSWD $EXTRA_MYSQLDUMP_OPT" +MYSQL_CHECK="$MYSQL_CHECK --no-defaults --debug-info -uroot --socket=$MASTER_MYSOCK --password=$DBPASSWD $EXTRA_MYSQLCHECK_OPT" +MYSQL_DUMP="$MYSQL_DUMP --no-defaults --debug-info -uroot --socket=$MASTER_MYSOCK --password=$DBPASSWD $EXTRA_MYSQLDUMP_OPT" MYSQL_SLAP="$MYSQL_SLAP -uroot --socket=$MASTER_MYSOCK --password=$DBPASSWD $EXTRA_MYSQLSLAP_OPT" MYSQL_DUMP_SLAVE="$MYSQL_DUMP_DIR --no-defaults -uroot --socket=$SLAVE_MYSOCK --password=$DBPASSWD $EXTRA_MYSQLDUMP_OPT" -MYSQL_SHOW="$MYSQL_SHOW -uroot --socket=$MASTER_MYSOCK --password=$DBPASSWD $EXTRA_MYSQLSHOW_OPT" -MYSQL_BINLOG="$MYSQL_BINLOG --no-defaults --local-load=$MYSQL_TMP_DIR --character-sets-dir=$CHARSETSDIR $EXTRA_MYSQLBINLOG_OPT" -MYSQL_IMPORT="$MYSQL_IMPORT -uroot --socket=$MASTER_MYSOCK --password=$DBPASSWD $EXTRA_MYSQLDUMP_OPT" +MYSQL_SHOW="$MYSQL_SHOW --no-defaults --debug-info -uroot --socket=$MASTER_MYSOCK --password=$DBPASSWD $EXTRA_MYSQLSHOW_OPT" +MYSQL_BINLOG="$MYSQL_BINLOG --debug-info --no-defaults --local-load=$MYSQL_TMP_DIR --character-sets-dir=$CHARSETSDIR $EXTRA_MYSQLBINLOG_OPT" +MYSQL_IMPORT="$MYSQL_IMPORT --debug-info -uroot --socket=$MASTER_MYSOCK --password=$DBPASSWD $EXTRA_MYSQLDUMP_OPT" MYSQL_FIX_SYSTEM_TABLES="$MYSQL_FIX_SYSTEM_TABLES --no-defaults --host=localhost --port=$MASTER_MYPORT --socket=$MASTER_MYSOCK --user=root --password=$DBPASSWD --basedir=$BASEDIR --bindir=$CLIENT_BINDIR --verbose" -MYSQL="$MYSQL --no-defaults --host=localhost --port=$MASTER_MYPORT --socket=$MASTER_MYSOCK --user=root --password=$DBPASSWD" +MYSQL="$MYSQL --no-defaults --debug-info --host=localhost --port=$MASTER_MYPORT --socket=$MASTER_MYSOCK --user=root --password=$DBPASSWD" export MYSQL MYSQL_CHECK MYSQL_DUMP MYSQL_DUMP_SLAVE MYSQL_SHOW MYSQL_BINLOG MYSQL_FIX_SYSTEM_TABLES MYSQL_IMPORT export CLIENT_BINDIR MYSQL_CLIENT_TEST CHARSETSDIR MYSQL_MY_PRINT_DEFAULTS export MYSQL_SLAP @@ -1263,8 +1263,8 @@ start_ndbcluster() rm_ndbcluster_tables() { - $RM -f $1/cluster/apply_status* - $RM -f $1/cluster/schema* + $RM -f $1/mysql/apply_status* + $RM -f $1/mysql/schema* } stop_ndbcluster() diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index e152ec2d208..f9ed18b2a04 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -1330,7 +1330,7 @@ sub executable_setup () { sub generate_cmdline_mysqldump ($) { my($mysqld) = @_; return - "$exe_mysqldump --no-defaults -uroot " . + "$exe_mysqldump --no-defaults --debug-info -uroot " . "--port=$mysqld->{'port'} " . "--socket=$mysqld->{'path_sock'} --password="; } @@ -1468,7 +1468,7 @@ sub environment_setup () { # Setup env so childs can execute mysqlcheck # ---------------------------------------------------- my $cmdline_mysqlcheck= - "$exe_mysqlcheck --no-defaults -uroot " . + "$exe_mysqlcheck --no-defaults --debug-info -uroot " . "--port=$master->[0]->{'port'} " . "--socket=$master->[0]->{'path_sock'} --password="; @@ -1519,7 +1519,7 @@ sub environment_setup () { # Setup env so childs can execute mysqlimport # ---------------------------------------------------- my $cmdline_mysqlimport= - "$exe_mysqlimport -uroot " . + "$exe_mysqlimport --debug-info -uroot " . "--port=$master->[0]->{'port'} " . "--socket=$master->[0]->{'path_sock'} --password="; @@ -1535,7 +1535,7 @@ sub environment_setup () { # Setup env so childs can execute mysqlshow # ---------------------------------------------------- my $cmdline_mysqlshow= - "$exe_mysqlshow -uroot " . + "$exe_mysqlshow --debug-info -uroot " . "--port=$master->[0]->{'port'} " . "--socket=$master->[0]->{'path_sock'} --password="; @@ -1551,7 +1551,7 @@ sub environment_setup () { # ---------------------------------------------------- my $cmdline_mysqlbinlog= "$exe_mysqlbinlog" . - " --no-defaults --local-load=$opt_tmpdir" . + " --no-defaults --debug-info --local-load=$opt_tmpdir" . " --character-sets-dir=$path_charsetsdir"; if ( $opt_debug ) @@ -1565,7 +1565,7 @@ sub environment_setup () { # Setup env so childs can execute mysql # ---------------------------------------------------- my $cmdline_mysql= - "$exe_mysql --no-defaults --host=localhost --user=root --password= " . + "$exe_mysql --no-defaults --debug-info --host=localhost --user=root --password= " . "--port=$master->[0]->{'port'} " . "--socket=$master->[0]->{'path_sock'} ". "--character-sets-dir=$path_charsetsdir"; @@ -2116,8 +2116,8 @@ sub ndbcluster_start ($$) { sub rm_ndbcluster_tables ($) { my $dir= shift; - foreach my $bin ( glob("$dir/cluster/apply_status*"), - glob("$dir/cluster/schema*") ) + foreach my $bin ( glob("$dir/mysql/apply_status*"), + glob("$dir/mysql/schema*") ) { unlink($bin); } @@ -2616,11 +2616,11 @@ sub do_after_run_mysqltest($) my $tname= $tinfo->{'name'}; #MASV cleanup - # Save info from this testcase run to mysqltest.log - my $testcase_log= mtr_fromfile($path_timefile) if -f $path_timefile; - mtr_tofile($path_mysqltest_log,"CURRENT TEST $tname\n"); - mtr_tofile($path_mysqltest_log, $testcase_log); - } + # Save info from this testcase run to mysqltest.log + my $testcase_log= mtr_fromfile($path_timefile) if -f $path_timefile; + mtr_tofile($path_mysqltest_log,"CURRENT TEST $tname\n"); + mtr_tofile($path_mysqltest_log, $testcase_log); +} ############################################################################## @@ -2670,6 +2670,8 @@ sub run_testcase ($) { my $res= run_mysqltest($tinfo); mtr_report_test_name($tinfo); + do_after_run_mysqltest($tinfo); + if ( $res == 0 ) { mtr_report_test_passed($tinfo); @@ -2702,8 +2704,6 @@ sub run_testcase ($) { report_failure_and_restart($tinfo); } - - do_after_run_mysqltest($tinfo); } # ---------------------------------------------------------------------- @@ -3643,11 +3643,11 @@ sub run_testcase_start_servers($) { # First wait for first mysql server to have created ndb system tables ok # FIXME This is a workaround so that only one mysqld creates the tables if ( ! sleep_until_file_created( - "$master->[0]->{'path_myddir'}/cluster/apply_status.ndb", + "$master->[0]->{'path_myddir'}/mysql/apply_status.ndb", $master->[0]->{'start_timeout'}, $master->[0]->{'pid'})) { - mtr_report("Failed to create 'cluster/apply_status' table"); + mtr_report("Failed to create 'mysql/apply_status' table"); report_failure_and_restart($tinfo); return; } @@ -3808,6 +3808,7 @@ sub run_mysqltest ($) { mtr_add_arg($args, "-v"); mtr_add_arg($args, "--skip-safemalloc"); mtr_add_arg($args, "--tmpdir=%s", $opt_tmpdir); + mtr_add_arg($args, "--logdir=%s/log", $opt_vardir); if ($tinfo->{'component_id'} eq 'im') { diff --git a/mysql-test/r/1st.result b/mysql-test/r/1st.result new file mode 100644 index 00000000000..7e35f1a7ce6 --- /dev/null +++ b/mysql-test/r/1st.result @@ -0,0 +1,29 @@ +show databases; +Database +information_schema +mysql +test +show tables in mysql; +Tables_in_mysql +binlog_index +columns_priv +db +event +func +general_log +help_category +help_keyword +help_relation +help_topic +host +plugin +proc +procs_priv +slow_log +tables_priv +time_zone +time_zone_leap_second +time_zone_name +time_zone_transition +time_zone_transition_type +user diff --git a/mysql-test/r/cache_innodb.result b/mysql-test/r/cache_innodb.result index 7f9b3e279a9..17cfcd69ec3 100644 --- a/mysql-test/r/cache_innodb.result +++ b/mysql-test/r/cache_innodb.result @@ -128,3 +128,94 @@ select t1.* from t1, t2, t3 where t3.state & 1 = 0 and t3.t1_id = t1.id and t3.t id a 1 me drop table t3,t2,t1; +SET SESSION STORAGE_ENGINE = InnoDB; +SET @@autocommit=1; +connection default +SHOW VARIABLES LIKE 'have_query_cache'; +Variable_name Value +have_query_cache YES +SET GLOBAL query_cache_size = 200000; +flush status; +SET @@autocommit=1; +SET SESSION STORAGE_ENGINE = InnoDB; +CREATE TABLE t2 (s1 int, s2 varchar(1000), key(s1)); +INSERT INTO t2 VALUES (1,repeat('a',10)),(2,repeat('a',10)),(3,repeat('a',10)),(4,repeat('a',10)); +COMMIT; +START TRANSACTION; +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +count(*) +0 +UPDATE t2 SET s2 = 'w' WHERE s1 = 3; +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +count(*) +1 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +connection connection1 +START TRANSACTION; +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +count(*) +0 +INSERT INTO t2 VALUES (5,'w'); +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +count(*) +1 +COMMIT; +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +count(*) +1 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +connection default +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +count(*) +1 +COMMIT; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +count(*) +2 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +connection connection1 +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +count(*) +2 +START TRANSACTION; +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +count(*) +2 +INSERT INTO t2 VALUES (6,'w'); +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +count(*) +3 +connection default +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +count(*) +2 +START TRANSACTION; +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +count(*) +2 +DELETE from t2 WHERE s1=3; +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +count(*) +1 +COMMIT; +connection connection1 +COMMIT; +SELECT sql_cache count(*) FROM t2 WHERE s2 = 'w'; +count(*) +2 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 2 +drop table t2; diff --git a/mysql-test/r/connect.result b/mysql-test/r/connect.result index 862260346f5..08710217afc 100644 --- a/mysql-test/r/connect.result +++ b/mysql-test/r/connect.result @@ -1,6 +1,7 @@ drop table if exists t1,t2; show tables; Tables_in_mysql +binlog_index columns_priv db event @@ -32,6 +33,7 @@ grant ALL on *.* to test@localhost identified by "gambling"; grant ALL on *.* to test@127.0.0.1 identified by "gambling"; show tables; Tables_in_mysql +binlog_index columns_priv db event @@ -71,6 +73,7 @@ ERROR HY000: Password hash should be a 41-digit hexadecimal number set password=old_password('gambling3'); show tables; Tables_in_mysql +binlog_index columns_priv db event diff --git a/mysql-test/r/drop.result b/mysql-test/r/drop.result index 53b2ea8c84a..d122dabc4ec 100644 --- a/mysql-test/r/drop.result +++ b/mysql-test/r/drop.result @@ -47,7 +47,6 @@ create database mysqltest; show databases; Database information_schema -cluster mysql mysqltest test @@ -59,7 +58,6 @@ drop database mysqltest; show databases; Database information_schema -cluster mysql test drop database mysqltest; diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index b73b59a433a..2502d900158 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -14,7 +14,6 @@ NULL test latin1 latin1_swedish_ci NULL select schema_name from information_schema.schemata; schema_name information_schema -cluster mysql test show databases like 't%'; @@ -23,7 +22,6 @@ test show databases; Database information_schema -cluster mysql test show databases where `database` = 't%'; @@ -35,7 +33,7 @@ create table t3(a int, KEY a_data (a)); create table mysqltest.t4(a int); create table t5 (id int auto_increment primary key); insert into t5 values (10); -create view v1 (c) as select table_name from information_schema.TABLES where table_schema!='cluster'; +create view v1 (c) as select table_name from information_schema.TABLES where table_name<>'binlog_index' AND table_name<>'apply_status'; select * from v1; c CHARACTER_SETS @@ -352,7 +350,6 @@ create view v0 (c) as select schema_name from information_schema.schemata; select * from v0; c information_schema -cluster mysql test explain select * from v0; @@ -851,7 +848,7 @@ VIEWS TABLE_NAME select delete from mysql.user where user='mysqltest_4'; delete from mysql.db where user='mysqltest_4'; flush privileges; -SELECT table_schema, count(*) FROM information_schema.TABLES where TABLE_SCHEMA!='cluster' GROUP BY TABLE_SCHEMA; +SELECT table_schema, count(*) FROM information_schema.TABLES where table_name<>'binlog_index' AND table_name<>'apply_status' GROUP BY TABLE_SCHEMA; table_schema count(*) information_schema 27 mysql 21 diff --git a/mysql-test/r/mysqlcheck.result b/mysql-test/r/mysqlcheck.result index c34aa995b2b..d6149981f37 100644 --- a/mysql-test/r/mysqlcheck.result +++ b/mysql-test/r/mysqlcheck.result @@ -2,7 +2,7 @@ drop database if exists client_test_db; DROP SCHEMA test; CREATE SCHEMA test; use test; -cluster.binlog_index OK +mysql.binlog_index OK mysql.columns_priv OK mysql.db OK mysql.event OK @@ -26,6 +26,7 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +mysql.binlog_index OK mysql.columns_priv OK mysql.db OK mysql.event OK diff --git a/mysql-test/r/ndb_binlog_basic.result b/mysql-test/r/ndb_binlog_basic.result index a8f88c2192e..43c19278d2c 100644 --- a/mysql-test/r/ndb_binlog_basic.result +++ b/mysql-test/r/ndb_binlog_basic.result @@ -6,7 +6,7 @@ drop database mysqltest; use test; create table t1 (a int primary key) engine=ndb; insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); -select @max_epoch:=max(epoch)-1 from cluster.binlog_index; +select @max_epoch:=max(epoch)-1 from mysql.binlog_index; @max_epoch:=max(epoch)-1 # delete from t1; @@ -19,19 +19,19 @@ update t2 set b=1 where a=3; delete from t2 where a=4; commit; drop table t2; -select inserts from cluster.binlog_index where epoch > @max_epoch and inserts > 5; +select inserts from mysql.binlog_index where epoch > @max_epoch and inserts > 5; inserts 10 -select deletes from cluster.binlog_index where epoch > @max_epoch and deletes > 5; +select deletes from mysql.binlog_index where epoch > @max_epoch and deletes > 5; deletes 10 select inserts,updates,deletes from -cluster.binlog_index where epoch > @max_epoch and updates > 0; +mysql.binlog_index where epoch > @max_epoch and updates > 0; inserts updates deletes 2 1 1 flush logs; purge master logs before now(); -select count(*) from cluster.binlog_index; +select count(*) from mysql.binlog_index; count(*) 0 create table t1 (a int primary key, b int) engine=ndb; @@ -40,12 +40,12 @@ use mysqltest; create table t1 (c int, d int primary key) engine=ndb; use test; insert into mysqltest.t1 values (2,1),(2,2); -select @max_epoch:=max(epoch)-1 from cluster.binlog_index; +select @max_epoch:=max(epoch)-1 from mysql.binlog_index; @max_epoch:=max(epoch)-1 # drop table t1; drop database mysqltest; select inserts,updates,deletes from -cluster.binlog_index where epoch > @max_epoch and inserts > 0; +mysql.binlog_index where epoch > @max_epoch and inserts > 0; inserts updates deletes 2 0 0 diff --git a/mysql-test/r/ndb_binlog_ddl_multi.result b/mysql-test/r/ndb_binlog_ddl_multi.result index 19414cf75c5..b278bb25d25 100644 --- a/mysql-test/r/ndb_binlog_ddl_multi.result +++ b/mysql-test/r/ndb_binlog_ddl_multi.result @@ -44,7 +44,7 @@ show binlog events from ; Log_name Pos Event_type Server_id End_log_pos Info master-bin1.000001 # Query # # BEGIN master-bin1.000001 # Table_map # # table_id: # (test.t2) -master-bin1.000001 # Table_map # # table_id: # (cluster.apply_status) +master-bin1.000001 # Table_map # # table_id: # (mysql.apply_status) master-bin1.000001 # Write_rows # # table_id: # master-bin1.000001 # Write_rows # # table_id: # flags: STMT_END_F master-bin1.000001 # Query # # COMMIT @@ -180,14 +180,14 @@ Log_name Pos Event_type Server_id End_log_pos Info master-bin1.000001 # Query # # use `test`; create table t1 (a int key) engine=ndb master-bin1.000001 # Query # # BEGIN master-bin1.000001 # Table_map # # table_id: # (test.t1) -master-bin1.000001 # Table_map # # table_id: # (cluster.apply_status) +master-bin1.000001 # Table_map # # table_id: # (mysql.apply_status) master-bin1.000001 # Write_rows # # table_id: # master-bin1.000001 # Write_rows # # table_id: # flags: STMT_END_F master-bin1.000001 # Query # # COMMIT master-bin1.000001 # Query # # use `test`; rename table `test.t1` to `test.t2` master-bin1.000001 # Query # # BEGIN master-bin1.000001 # Table_map # # table_id: # (test.t2) -master-bin1.000001 # Table_map # # table_id: # (cluster.apply_status) +master-bin1.000001 # Table_map # # table_id: # (mysql.apply_status) master-bin1.000001 # Write_rows # # table_id: # master-bin1.000001 # Write_rows # # table_id: # flags: STMT_END_F master-bin1.000001 # Query # # COMMIT diff --git a/mysql-test/r/ndb_binlog_discover.result b/mysql-test/r/ndb_binlog_discover.result index 01e15dc1c39..e81d5cfc6f3 100644 --- a/mysql-test/r/ndb_binlog_discover.result +++ b/mysql-test/r/ndb_binlog_discover.result @@ -5,7 +5,7 @@ show binlog events from ; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query # # BEGIN master-bin.000001 # Table_map # # table_id: # (test.t1) -master-bin.000001 # Table_map # # table_id: # (cluster.apply_status) +master-bin.000001 # Table_map # # table_id: # (mysql.apply_status) master-bin.000001 # Write_rows # # table_id: # master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F master-bin.000001 # Query # # COMMIT diff --git a/mysql-test/r/ndb_binlog_multi.result b/mysql-test/r/ndb_binlog_multi.result index 119174039f9..ffd0b44484b 100644 --- a/mysql-test/r/ndb_binlog_multi.result +++ b/mysql-test/r/ndb_binlog_multi.result @@ -11,7 +11,7 @@ Log_name Pos Event_type Server_id End_log_pos Info master-bin1.000001 # Query # # use `test`; CREATE TABLE t2 (a INT PRIMARY KEY, b int) ENGINE = NDB master-bin1.000001 # Query # # BEGIN master-bin1.000001 # Table_map # # table_id: # (test.t2) -master-bin1.000001 # Table_map # # table_id: # (cluster.apply_status) +master-bin1.000001 # Table_map # # table_id: # (mysql.apply_status) master-bin1.000001 # Write_rows # # table_id: # master-bin1.000001 # Write_rows # # table_id: # flags: STMT_END_F master-bin1.000001 # Query # # COMMIT @@ -20,7 +20,7 @@ a b 1 1 2 2 SELECT @the_epoch:=epoch,inserts,updates,deletes,schemaops FROM -cluster.binlog_index ORDER BY epoch DESC LIMIT 1; +mysql.binlog_index ORDER BY epoch DESC LIMIT 1; @the_epoch:=epoch inserts updates deletes schemaops 2 0 0 0 SELECT * FROM t2 ORDER BY a; @@ -33,13 +33,13 @@ Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query # # use `test`; CREATE TABLE t2 (a INT PRIMARY KEY, b int) ENGINE = NDB master-bin.000001 # Query # # BEGIN master-bin.000001 # Table_map # # table_id: # (test.t2) -master-bin.000001 # Table_map # # table_id: # (cluster.apply_status) +master-bin.000001 # Table_map # # table_id: # (mysql.apply_status) master-bin.000001 # Write_rows # # table_id: # master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F master-bin.000001 # Query # # COMMIT master-bin.000001 # Query # # use `test`; DROP TABLE t2 SELECT inserts,updates,deletes,schemaops FROM -cluster.binlog_index WHERE epoch=; +mysql.binlog_index WHERE epoch=; inserts updates deletes schemaops 2 0 0 0 reset master; @@ -51,16 +51,16 @@ Log_name Pos Event_type Server_id End_log_pos Info master-bin1.000001 # Query # # use `test`; CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE = NDB master-bin1.000001 # Query # # BEGIN master-bin1.000001 # Table_map # # table_id: # (test.t1) -master-bin1.000001 # Table_map # # table_id: # (cluster.apply_status) +master-bin1.000001 # Table_map # # table_id: # (mysql.apply_status) master-bin1.000001 # Write_rows # # table_id: # master-bin1.000001 # Write_rows # # table_id: # flags: STMT_END_F master-bin1.000001 # Query # # COMMIT SELECT @the_epoch2:=epoch,inserts,updates,deletes,schemaops FROM -cluster.binlog_index ORDER BY epoch DESC LIMIT 1; +mysql.binlog_index ORDER BY epoch DESC LIMIT 1; @the_epoch2:=epoch inserts updates deletes schemaops 2 0 0 0 SELECT inserts,updates,deletes,schemaops FROM -cluster.binlog_index WHERE epoch > AND epoch <= ; +mysql.binlog_index WHERE epoch > AND epoch <= ; inserts updates deletes schemaops 2 0 0 0 drop table t1; @@ -69,12 +69,12 @@ Log_name Pos Event_type Server_id End_log_pos Info master-bin1.000001 # Query # # use `test`; CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE = NDB master-bin1.000001 # Query # # BEGIN master-bin1.000001 # Table_map # # table_id: # (test.t1) -master-bin1.000001 # Table_map # # table_id: # (cluster.apply_status) +master-bin1.000001 # Table_map # # table_id: # (mysql.apply_status) master-bin1.000001 # Write_rows # # table_id: # master-bin1.000001 # Write_rows # # table_id: # flags: STMT_END_F master-bin1.000001 # Query # # COMMIT master-bin1.000001 # Query # # use `test`; drop table t1 SELECT inserts,updates,deletes,schemaops FROM -cluster.binlog_index WHERE epoch > AND epoch <= ; +mysql.binlog_index WHERE epoch > AND epoch <= ; inserts updates deletes schemaops 2 0 0 0 diff --git a/mysql-test/r/ndb_restore_compat.result b/mysql-test/r/ndb_restore_compat.result index 358ca36b2df..595c582e3b7 100644 --- a/mysql-test/r/ndb_restore_compat.result +++ b/mysql-test/r/ndb_restore_compat.result @@ -44,7 +44,7 @@ SELECT * FROM SYSTEM_VALUES ORDER BY SYSTEM_VALUES_ID; SYSTEM_VALUES_ID VALUE 0 2039 1 3 -SELECT * FROM cluster.apply_status WHERE server_id=0; +SELECT * FROM mysql.apply_status WHERE server_id=0; server_id epoch 0 151 TRUNCATE GL; @@ -98,7 +98,7 @@ SELECT * FROM SYSTEM_VALUES ORDER BY SYSTEM_VALUES_ID; SYSTEM_VALUES_ID VALUE 0 2297 1 5 -SELECT * FROM cluster.apply_status WHERE server_id=0; +SELECT * FROM mysql.apply_status WHERE server_id=0; server_id epoch 0 331 DROP DATABASE BANK; diff --git a/mysql-test/r/ps_1general.result b/mysql-test/r/ps_1general.result index 67959920248..762ceeaa03b 100644 --- a/mysql-test/r/ps_1general.result +++ b/mysql-test/r/ps_1general.result @@ -259,7 +259,6 @@ prepare stmt4 from ' show databases '; execute stmt4; Database information_schema -cluster mysql test prepare stmt4 from ' show tables from test like ''t2%'' '; diff --git a/mysql-test/r/rpl_create_database.result b/mysql-test/r/rpl_create_database.result index 0593501f623..0cfd44bc58c 100644 --- a/mysql-test/r/rpl_create_database.result +++ b/mysql-test/r/rpl_create_database.result @@ -23,7 +23,6 @@ ALTER DATABASE mysqltest_bob CHARACTER SET latin1; SHOW DATABASES; Database information_schema -cluster mysql mysqltest_bob mysqltest_prometheus @@ -32,7 +31,6 @@ test SHOW DATABASES; Database information_schema -cluster mysql mysqltest_prometheus mysqltest_sisyfos @@ -47,7 +45,6 @@ CREATE TABLE t2 (a INT); SHOW DATABASES; Database information_schema -cluster mysql mysqltest_bob mysqltest_prometheus @@ -56,7 +53,6 @@ test SHOW DATABASES; Database information_schema -cluster mysql mysqltest_prometheus mysqltest_sisyfos diff --git a/mysql-test/r/rpl_load_from_master.result b/mysql-test/r/rpl_load_from_master.result index c279ee6e0aa..08b45ec1db0 100644 --- a/mysql-test/r/rpl_load_from_master.result +++ b/mysql-test/r/rpl_load_from_master.result @@ -33,7 +33,6 @@ create database mysqltest; show databases; Database information_schema -cluster mysql mysqltest mysqltest2 @@ -51,7 +50,6 @@ set sql_log_bin = 1; show databases; Database information_schema -cluster mysql test create database mysqltest2; @@ -71,7 +69,6 @@ load data from master; show databases; Database information_schema -cluster mysql mysqltest mysqltest2 diff --git a/mysql-test/r/rpl_loaddata_m.result b/mysql-test/r/rpl_loaddata_m.result index ec2f788a5e1..9dbae6d38c4 100644 --- a/mysql-test/r/rpl_loaddata_m.result +++ b/mysql-test/r/rpl_loaddata_m.result @@ -21,7 +21,6 @@ COUNT(*) SHOW DATABASES; Database information_schema -cluster mysql mysqltest test diff --git a/mysql-test/r/rpl_ndb_bank.result b/mysql-test/r/rpl_ndb_bank.result index 62ab3f18d37..06c005427d1 100644 --- a/mysql-test/r/rpl_ndb_bank.result +++ b/mysql-test/r/rpl_ndb_bank.result @@ -47,17 +47,17 @@ CREATE DATABASE IF NOT EXISTS BANK; DROP DATABASE BANK; CREATE DATABASE BANK; RESET MASTER; -CREATE TABLE IF NOT EXISTS cluster.backup_info (id INT, backup_id INT) ENGINE = HEAP; -DELETE FROM cluster.backup_info; -LOAD DATA INFILE '../tmp.dat' INTO TABLE cluster.backup_info FIELDS TERMINATED BY ','; -SELECT @the_backup_id:=backup_id FROM cluster.backup_info; +CREATE TABLE IF NOT EXISTS mysql.backup_info (id INT, backup_id INT) ENGINE = HEAP; +DELETE FROM mysql.backup_info; +LOAD DATA INFILE '../tmp.dat' INTO TABLE mysql.backup_info FIELDS TERMINATED BY ','; +SELECT @the_backup_id:=backup_id FROM mysql.backup_info; @the_backup_id:=backup_id -SELECT @the_epoch:=MAX(epoch) FROM cluster.apply_status; +SELECT @the_epoch:=MAX(epoch) FROM mysql.apply_status; @the_epoch:=MAX(epoch) SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1) -FROM cluster.binlog_index WHERE epoch > ORDER BY epoch ASC LIMIT 1; +FROM mysql.binlog_index WHERE epoch > ORDER BY epoch ASC LIMIT 1; @the_pos:=Position @the_file:=SUBSTRING_INDEX(FILE, '/', -1) master-bin.000001 CHANGE MASTER TO diff --git a/mysql-test/r/rpl_ndb_dd_advance.result b/mysql-test/r/rpl_ndb_dd_advance.result index bbc67a04027..2a87670837b 100644 --- a/mysql-test/r/rpl_ndb_dd_advance.result +++ b/mysql-test/r/rpl_ndb_dd_advance.result @@ -355,13 +355,13 @@ COUNT(*) SELECT COUNT(*) FROM history; COUNT(*) 2000 -CREATE TEMPORARY TABLE IF NOT EXISTS cluster.backup_info (id INT, backup_id INT) ENGINE = HEAP; -DELETE FROM cluster.backup_info; -LOAD DATA INFILE '../tmp.dat' INTO TABLE cluster.backup_info FIELDS TERMINATED BY ','; -SELECT @the_backup_id:=backup_id FROM cluster.backup_info; +CREATE TEMPORARY TABLE IF NOT EXISTS mysql.backup_info (id INT, backup_id INT) ENGINE = HEAP; +DELETE FROM mysql.backup_info; +LOAD DATA INFILE '../tmp.dat' INTO TABLE mysql.backup_info FIELDS TERMINATED BY ','; +SELECT @the_backup_id:=backup_id FROM mysql.backup_info; @the_backup_id:=backup_id -DROP TABLE IF EXISTS cluster.backup_info; +DROP TABLE IF EXISTS mysql.backup_info; ************ Restore the slave ************************ CREATE DATABASE tpcb; ***** Check a few slave restore values *************** diff --git a/mysql-test/r/rpl_ndb_dd_basic.result b/mysql-test/r/rpl_ndb_dd_basic.result index bb5919193eb..75323767427 100644 --- a/mysql-test/r/rpl_ndb_dd_basic.result +++ b/mysql-test/r/rpl_ndb_dd_basic.result @@ -57,7 +57,7 @@ tablespace ts1 storage disk engine ndb master-bin.000001 # Query # # BEGIN master-bin.000001 # Table_map # # table_id: # (test.t1) -master-bin.000001 # Table_map # # table_id: # (cluster.apply_status) +master-bin.000001 # Table_map # # table_id: # (mysql.apply_status) master-bin.000001 # Write_rows # # table_id: # master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F master-bin.000001 # Query # # COMMIT diff --git a/mysql-test/r/rpl_ndb_idempotent.result b/mysql-test/r/rpl_ndb_idempotent.result index 1ba23e703c2..e8a96aec137 100644 --- a/mysql-test/r/rpl_ndb_idempotent.result +++ b/mysql-test/r/rpl_ndb_idempotent.result @@ -9,14 +9,14 @@ INSERT INTO t1 VALUES ("row1","will go away",1); SELECT * FROM t1 ORDER BY c3; c1 c2 c3 row1 will go away 1 -SELECT @the_epoch:=MAX(epoch) FROM cluster.apply_status; +SELECT @the_epoch:=MAX(epoch) FROM mysql.apply_status; @the_epoch:=MAX(epoch) SELECT * FROM t1 ORDER BY c3; c1 c2 c3 row1 will go away 1 SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1) -FROM cluster.binlog_index WHERE epoch = ; +FROM mysql.binlog_index WHERE epoch = ; @the_pos:=Position @the_file:=SUBSTRING_INDEX(FILE, '/', -1) master-bin.000001 INSERT INTO t1 VALUES ("row2","will go away",2),("row3","will change",3),("row4","D",4); diff --git a/mysql-test/r/rpl_ndb_log.result b/mysql-test/r/rpl_ndb_log.result index e0135a94c63..a594fa6c1dc 100644 --- a/mysql-test/r/rpl_ndb_log.result +++ b/mysql-test/r/rpl_ndb_log.result @@ -22,7 +22,7 @@ master-bin.000001 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4 master-bin.000001 # Query 1 # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=NDB master-bin.000001 # Query 1 # BEGIN master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Table_map 1 # table_id: # (cluster.apply_status) +master-bin.000001 # Table_map 1 # table_id: # (mysql.apply_status) master-bin.000001 # Write_rows 1 # table_id: # master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F master-bin.000001 # Query 1 # COMMIT @@ -30,7 +30,7 @@ master-bin.000001 # Query 1 # use `test`; drop table t1 master-bin.000001 # Query 1 # use `test`; create table t1 (word char(20) not null)ENGINE=NDB master-bin.000001 # Query 1 # BEGIN master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Table_map 1 # table_id: # (cluster.apply_status) +master-bin.000001 # Table_map 1 # table_id: # (mysql.apply_status) master-bin.000001 # Write_rows 1 # table_id: # master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F master-bin.000001 # Query 1 # COMMIT @@ -61,7 +61,7 @@ master-bin.000001 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4 master-bin.000001 # Query 1 # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=NDB master-bin.000001 # Query 1 # BEGIN master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Table_map 1 # table_id: # (cluster.apply_status) +master-bin.000001 # Table_map 1 # table_id: # (mysql.apply_status) master-bin.000001 # Write_rows 1 # table_id: # master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F master-bin.000001 # Query 1 # COMMIT @@ -69,7 +69,7 @@ master-bin.000001 # Query 1 # use `test`; drop table t1 master-bin.000001 # Query 1 # use `test`; create table t1 (word char(20) not null)ENGINE=NDB master-bin.000001 # Query 1 # BEGIN master-bin.000001 # Table_map 1 # table_id: # (test.t1) -master-bin.000001 # Table_map 1 # table_id: # (cluster.apply_status) +master-bin.000001 # Table_map 1 # table_id: # (mysql.apply_status) master-bin.000001 # Write_rows 1 # table_id: # master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F master-bin.000001 # Query 1 # COMMIT @@ -81,18 +81,18 @@ master-bin.000002 # Query 1 # use `test`; create table t3 (a int)ENGINE=NDB master-bin.000002 # Query 1 # use `test`; create table t2 (n int)ENGINE=NDB master-bin.000002 # Query 1 # BEGIN master-bin.000002 # Table_map 1 # table_id: # (test.t2) -master-bin.000002 # Table_map 1 # table_id: # (cluster.apply_status) +master-bin.000002 # Table_map 1 # table_id: # (mysql.apply_status) master-bin.000002 # Write_rows 1 # table_id: # master-bin.000002 # Write_rows 1 # table_id: # flags: STMT_END_F master-bin.000002 # Query 1 # COMMIT show binary logs; Log_name File_size -master-bin.000001 1698 -master-bin.000002 591 +master-bin.000001 1694 +master-bin.000002 589 start slave; show binary logs; Log_name File_size -slave-bin.000001 1793 +slave-bin.000001 1789 slave-bin.000002 198 show binlog events in 'slave-bin.000001' from 4; Log_name Pos Event_type Server_id End_log_pos Info @@ -100,7 +100,7 @@ slave-bin.000001 # Format_desc 2 # Server ver: VERSION, Binlog ver: 4 slave-bin.000001 # Query 1 # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=NDB slave-bin.000001 # Query 2 # BEGIN slave-bin.000001 # Table_map 2 # table_id: # (test.t1) -slave-bin.000001 # Table_map 2 # table_id: # (cluster.apply_status) +slave-bin.000001 # Table_map 2 # table_id: # (mysql.apply_status) slave-bin.000001 # Write_rows 2 # table_id: # slave-bin.000001 # Write_rows 2 # table_id: # flags: STMT_END_F slave-bin.000001 # Query 2 # COMMIT @@ -108,7 +108,7 @@ slave-bin.000001 # Query 1 # use `test`; drop table t1 slave-bin.000001 # Query 1 # use `test`; create table t1 (word char(20) not null)ENGINE=NDB slave-bin.000001 # Query 2 # BEGIN slave-bin.000001 # Table_map 2 # table_id: # (test.t1) -slave-bin.000001 # Table_map 2 # table_id: # (cluster.apply_status) +slave-bin.000001 # Table_map 2 # table_id: # (mysql.apply_status) slave-bin.000001 # Write_rows 2 # table_id: # slave-bin.000001 # Write_rows 2 # table_id: # flags: STMT_END_F slave-bin.000001 # Query 2 # COMMIT @@ -120,13 +120,13 @@ slave-bin.000002 # Format_desc 2 # Server ver: VERSION, Binlog ver: 4 slave-bin.000002 # Query 1 # use `test`; create table t2 (n int)ENGINE=NDB slave-bin.000002 # Query 2 # BEGIN slave-bin.000002 # Table_map 2 # table_id: # (test.t2) -slave-bin.000002 # Table_map 2 # table_id: # (cluster.apply_status) +slave-bin.000002 # Table_map 2 # table_id: # (mysql.apply_status) slave-bin.000002 # Write_rows 2 # table_id: # slave-bin.000002 # Write_rows 2 # table_id: # flags: STMT_END_F slave-bin.000002 # Query 2 # COMMIT show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000002 591 # # master-bin.000002 Yes Yes # 0 0 591 # None 0 No # +# 127.0.0.1 root MASTER_PORT 1 master-bin.000002 589 # # master-bin.000002 Yes Yes # 0 0 589 # None 0 No # show binlog events in 'slave-bin.000005' from 4; ERROR HY000: Error when executing command SHOW BINLOG EVENTS: Could not find target log DROP TABLE t1; diff --git a/mysql-test/r/rpl_ndb_multi.result b/mysql-test/r/rpl_ndb_multi.result index 13751060ed3..74e06b5ff38 100644 --- a/mysql-test/r/rpl_ndb_multi.result +++ b/mysql-test/r/rpl_ndb_multi.result @@ -16,7 +16,7 @@ row1 will go away 1 SELECT * FROM t1 ORDER BY c3; c1 c2 c3 row1 will go away 1 -SELECT @the_epoch:=MAX(epoch) FROM cluster.apply_status; +SELECT @the_epoch:=MAX(epoch) FROM mysql.apply_status; @the_epoch:=MAX(epoch) SELECT * FROM t1 ORDER BY c3; @@ -24,7 +24,7 @@ c1 c2 c3 row1 will go away 1 stop slave; SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1) -FROM cluster.binlog_index WHERE epoch = ; +FROM mysql.binlog_index WHERE epoch = ; @the_pos:=Position @the_file:=SUBSTRING_INDEX(FILE, '/', -1) 102 master-bin1.000001 CHANGE MASTER TO diff --git a/mysql-test/r/rpl_ndb_sync.result b/mysql-test/r/rpl_ndb_sync.result index 4ca73167603..2b9ca24fca0 100644 --- a/mysql-test/r/rpl_ndb_sync.result +++ b/mysql-test/r/rpl_ndb_sync.result @@ -60,11 +60,11 @@ hex(c2) hex(c3) c1 0 1 BCDEF 1 0 CD 0 0 DEFGHIJKL -SELECT @the_epoch:=MAX(epoch) FROM cluster.apply_status; +SELECT @the_epoch:=MAX(epoch) FROM mysql.apply_status; @the_epoch:=MAX(epoch) SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1) -FROM cluster.binlog_index WHERE epoch > ORDER BY epoch ASC LIMIT 1; +FROM mysql.binlog_index WHERE epoch > ORDER BY epoch ASC LIMIT 1; @the_pos:=Position @the_file:=SUBSTRING_INDEX(FILE, '/', -1) master-bin.000001 CHANGE MASTER TO @@ -89,8 +89,8 @@ hex(c2) hex(c3) c1 DROP DATABASE ndbsynctest; STOP SLAVE; reset master; -select * from cluster.binlog_index; +select * from mysql.binlog_index; Position File epoch inserts updates deletes schemaops reset slave; -select * from cluster.apply_status; +select * from mysql.apply_status; server_id epoch diff --git a/mysql-test/r/rpl_row_basic_11bugs.result b/mysql-test/r/rpl_row_basic_11bugs.result index e49facd2d70..8af2e8639aa 100644 --- a/mysql-test/r/rpl_row_basic_11bugs.result +++ b/mysql-test/r/rpl_row_basic_11bugs.result @@ -9,7 +9,6 @@ CREATE DATABASE test_ignore; SHOW DATABASES; Database information_schema -cluster mysql test test_ignore @@ -34,7 +33,6 @@ master-bin.000001 235 Write_rows 1 282 table_id: # flags: STMT_END_F SHOW DATABASES; Database information_schema -cluster mysql test USE test; diff --git a/mysql-test/r/rpl_truncate_7ndb.result b/mysql-test/r/rpl_truncate_7ndb.result index 0e1b21d31aa..c57eb2e1dae 100644 --- a/mysql-test/r/rpl_truncate_7ndb.result +++ b/mysql-test/r/rpl_truncate_7ndb.result @@ -33,12 +33,12 @@ master-bin.000001 4 Format_desc 1 102 Server ver: SERVER_VERSION, Binlog ver: 4 master-bin.000001 102 Query 1 219 use `test`; CREATE TABLE t1 (a INT PRIMARY KEY, b LONG) ENGINE=NDB master-bin.000001 219 Query 1 283 BEGIN master-bin.000001 283 Table_map 1 40 table_id: # (test.t1) -master-bin.000001 323 Table_map 1 93 table_id: # (cluster.apply_status) -master-bin.000001 376 Write_rows 1 135 table_id: # -master-bin.000001 418 Write_rows 1 182 table_id: # flags: STMT_END_F -master-bin.000001 465 Query 1 530 COMMIT -master-bin.000001 530 Query 1 610 use `test`; TRUNCATE TABLE t1 -master-bin.000001 610 Query 1 686 use `test`; DROP TABLE t1 +master-bin.000001 323 Table_map 1 91 table_id: # (mysql.apply_status) +master-bin.000001 374 Write_rows 1 133 table_id: # +master-bin.000001 416 Write_rows 1 180 table_id: # flags: STMT_END_F +master-bin.000001 463 Query 1 528 COMMIT +master-bin.000001 528 Query 1 608 use `test`; TRUNCATE TABLE t1 +master-bin.000001 608 Query 1 684 use `test`; DROP TABLE t1 **** On Master **** CREATE TABLE t1 (a INT PRIMARY KEY, b LONG) ENGINE=NDB; INSERT INTO t1 VALUES (1,1), (2,2); @@ -69,23 +69,23 @@ master-bin.000001 4 Format_desc 1 102 Server ver: SERVER_VERSION, Binlog ver: 4 master-bin.000001 102 Query 1 219 use `test`; CREATE TABLE t1 (a INT PRIMARY KEY, b LONG) ENGINE=NDB master-bin.000001 219 Query 1 283 BEGIN master-bin.000001 283 Table_map 1 40 table_id: # (test.t1) -master-bin.000001 323 Table_map 1 93 table_id: # (cluster.apply_status) -master-bin.000001 376 Write_rows 1 135 table_id: # -master-bin.000001 418 Write_rows 1 182 table_id: # flags: STMT_END_F -master-bin.000001 465 Query 1 530 COMMIT -master-bin.000001 530 Query 1 610 use `test`; TRUNCATE TABLE t1 -master-bin.000001 610 Query 1 686 use `test`; DROP TABLE t1 -master-bin.000001 686 Query 1 803 use `test`; CREATE TABLE t1 (a INT PRIMARY KEY, b LONG) ENGINE=NDB -master-bin.000001 803 Query 1 867 BEGIN -master-bin.000001 867 Table_map 1 40 table_id: # (test.t1) -master-bin.000001 907 Table_map 1 93 table_id: # (cluster.apply_status) -master-bin.000001 960 Write_rows 1 135 table_id: # -master-bin.000001 1002 Write_rows 1 182 table_id: # flags: STMT_END_F -master-bin.000001 1049 Query 1 1114 COMMIT -master-bin.000001 1114 Query 1 1178 BEGIN -master-bin.000001 1178 Table_map 1 40 table_id: # (test.t1) -master-bin.000001 1218 Table_map 1 93 table_id: # (cluster.apply_status) -master-bin.000001 1271 Write_rows 1 135 table_id: # -master-bin.000001 1313 Delete_rows 1 174 table_id: # flags: STMT_END_F -master-bin.000001 1352 Query 1 1417 COMMIT -master-bin.000001 1417 Query 1 1493 use `test`; DROP TABLE t1 +master-bin.000001 323 Table_map 1 91 table_id: # (mysql.apply_status) +master-bin.000001 374 Write_rows 1 133 table_id: # +master-bin.000001 416 Write_rows 1 180 table_id: # flags: STMT_END_F +master-bin.000001 463 Query 1 528 COMMIT +master-bin.000001 528 Query 1 608 use `test`; TRUNCATE TABLE t1 +master-bin.000001 608 Query 1 684 use `test`; DROP TABLE t1 +master-bin.000001 684 Query 1 801 use `test`; CREATE TABLE t1 (a INT PRIMARY KEY, b LONG) ENGINE=NDB +master-bin.000001 801 Query 1 865 BEGIN +master-bin.000001 865 Table_map 1 40 table_id: # (test.t1) +master-bin.000001 905 Table_map 1 91 table_id: # (mysql.apply_status) +master-bin.000001 956 Write_rows 1 133 table_id: # +master-bin.000001 998 Write_rows 1 180 table_id: # flags: STMT_END_F +master-bin.000001 1045 Query 1 1110 COMMIT +master-bin.000001 1110 Query 1 1174 BEGIN +master-bin.000001 1174 Table_map 1 40 table_id: # (test.t1) +master-bin.000001 1214 Table_map 1 91 table_id: # (mysql.apply_status) +master-bin.000001 1265 Write_rows 1 133 table_id: # +master-bin.000001 1307 Delete_rows 1 172 table_id: # flags: STMT_END_F +master-bin.000001 1346 Query 1 1411 COMMIT +master-bin.000001 1411 Query 1 1487 use `test`; DROP TABLE t1 diff --git a/mysql-test/r/rpl_truncate_7ndb_2.result b/mysql-test/r/rpl_truncate_7ndb_2.result index 0e1b21d31aa..ca323e193fa 100644 --- a/mysql-test/r/rpl_truncate_7ndb_2.result +++ b/mysql-test/r/rpl_truncate_7ndb_2.result @@ -33,7 +33,7 @@ master-bin.000001 4 Format_desc 1 102 Server ver: SERVER_VERSION, Binlog ver: 4 master-bin.000001 102 Query 1 219 use `test`; CREATE TABLE t1 (a INT PRIMARY KEY, b LONG) ENGINE=NDB master-bin.000001 219 Query 1 283 BEGIN master-bin.000001 283 Table_map 1 40 table_id: # (test.t1) -master-bin.000001 323 Table_map 1 93 table_id: # (cluster.apply_status) +master-bin.000001 323 Table_map 1 93 table_id: # (mysql.apply_status) master-bin.000001 376 Write_rows 1 135 table_id: # master-bin.000001 418 Write_rows 1 182 table_id: # flags: STMT_END_F master-bin.000001 465 Query 1 530 COMMIT @@ -69,7 +69,7 @@ master-bin.000001 4 Format_desc 1 102 Server ver: SERVER_VERSION, Binlog ver: 4 master-bin.000001 102 Query 1 219 use `test`; CREATE TABLE t1 (a INT PRIMARY KEY, b LONG) ENGINE=NDB master-bin.000001 219 Query 1 283 BEGIN master-bin.000001 283 Table_map 1 40 table_id: # (test.t1) -master-bin.000001 323 Table_map 1 93 table_id: # (cluster.apply_status) +master-bin.000001 323 Table_map 1 93 table_id: # (mysql.apply_status) master-bin.000001 376 Write_rows 1 135 table_id: # master-bin.000001 418 Write_rows 1 182 table_id: # flags: STMT_END_F master-bin.000001 465 Query 1 530 COMMIT @@ -78,13 +78,13 @@ master-bin.000001 610 Query 1 686 use `test`; DROP TABLE t1 master-bin.000001 686 Query 1 803 use `test`; CREATE TABLE t1 (a INT PRIMARY KEY, b LONG) ENGINE=NDB master-bin.000001 803 Query 1 867 BEGIN master-bin.000001 867 Table_map 1 40 table_id: # (test.t1) -master-bin.000001 907 Table_map 1 93 table_id: # (cluster.apply_status) +master-bin.000001 907 Table_map 1 93 table_id: # (mysql.apply_status) master-bin.000001 960 Write_rows 1 135 table_id: # master-bin.000001 1002 Write_rows 1 182 table_id: # flags: STMT_END_F master-bin.000001 1049 Query 1 1114 COMMIT master-bin.000001 1114 Query 1 1178 BEGIN master-bin.000001 1178 Table_map 1 40 table_id: # (test.t1) -master-bin.000001 1218 Table_map 1 93 table_id: # (cluster.apply_status) +master-bin.000001 1218 Table_map 1 93 table_id: # (mysql.apply_status) master-bin.000001 1271 Write_rows 1 135 table_id: # master-bin.000001 1313 Delete_rows 1 174 table_id: # flags: STMT_END_F master-bin.000001 1352 Query 1 1417 COMMIT diff --git a/mysql-test/r/schema.result b/mysql-test/r/schema.result index 8ed1a587588..538abd8d039 100644 --- a/mysql-test/r/schema.result +++ b/mysql-test/r/schema.result @@ -6,7 +6,6 @@ foo CREATE DATABASE `foo` /*!40100 DEFAULT CHARACTER SET latin1 */ show schemas; Database information_schema -cluster foo mysql test diff --git a/mysql-test/r/show_check.result b/mysql-test/r/show_check.result index b473bbac923..fa62ff7a1f7 100644 --- a/mysql-test/r/show_check.result +++ b/mysql-test/r/show_check.result @@ -53,7 +53,6 @@ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length I show databases; Database information_schema -cluster mysql test show databases like "test%"; diff --git a/mysql-test/r/system_mysql_db.result b/mysql-test/r/system_mysql_db.result index b9d3504993c..ab140fe2782 100644 --- a/mysql-test/r/system_mysql_db.result +++ b/mysql-test/r/system_mysql_db.result @@ -1,6 +1,7 @@ drop table if exists t1,t1aa,t2aa; show tables; Tables_in_db +binlog_index columns_priv db event diff --git a/mysql-test/r/upgrade.result b/mysql-test/r/upgrade.result index 8a2249480e9..76e0359c405 100644 --- a/mysql-test/r/upgrade.result +++ b/mysql-test/r/upgrade.result @@ -57,3 +57,5 @@ s1 1 drop table `txu@0023p@0023p1`; drop table `txu#p#p1`; +truncate t1; +drop table t1; diff --git a/mysql-test/std_data/old_table-323.frm b/mysql-test/std_data/old_table-323.frm new file mode 100644 index 0000000000000000000000000000000000000000..316dfd76050b5b90de8b1339cf7fb31674e5131e GIT binary patch literal 8606 zcmeI$ziz@X5C-t?9CIk0qIN?ayg)jnO4Tu2DzWZ(VQ8WyINPsusN0#tIMtH#YyPuy8(iCuKceqFa3Q&Lo6rcbFC_n)U zP=EpypunvN81vk`@?jxzFt7ti3^4~q3?zA#%r^UNax2B&WgW-1D|f|lmr87k0u-PC z1t>rP3Q&Lo6rcbFE))0_M3-gw-=_ru3Y1L~OFa5k7U-Yi0yZ#DFaCL6u0PAX7|&cW zpH9c6o8&9M=iqv'binlog_index' AND table_name<>'apply_status'; select * from v1; select c,table_name from v1 @@ -528,7 +528,7 @@ flush privileges; # Bug #9404 information_schema: Weird error messages # with SELECT SUM() ... GROUP BY queries # -SELECT table_schema, count(*) FROM information_schema.TABLES where TABLE_SCHEMA!='cluster' GROUP BY TABLE_SCHEMA; +SELECT table_schema, count(*) FROM information_schema.TABLES where table_name<>'binlog_index' AND table_name<>'apply_status' GROUP BY TABLE_SCHEMA; # diff --git a/mysql-test/t/mysqldump.test b/mysql-test/t/mysqldump.test index 327b071afeb..e8a40e62bc6 100644 --- a/mysql-test/t/mysqldump.test +++ b/mysql-test/t/mysqldump.test @@ -1394,6 +1394,9 @@ revoke all privileges on mysqldump_myDB.* from myDB_User@localhost; drop user myDB_User; drop database mysqldump_myDB; use test; +connection default; +disconnect root; +disconnect user1; --echo # --echo # BUG#13926: --order-by-primary fails if PKEY contains quote character diff --git a/mysql-test/t/ndb_binlog_basic.test b/mysql-test/t/ndb_binlog_basic.test index 3886900037d..c2a36423445 100644 --- a/mysql-test/t/ndb_binlog_basic.test +++ b/mysql-test/t/ndb_binlog_basic.test @@ -19,7 +19,7 @@ create table t1 (a int primary key) engine=ndb; insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); save_master_pos; --replace_column 1 # -select @max_epoch:=max(epoch)-1 from cluster.binlog_index; +select @max_epoch:=max(epoch)-1 from mysql.binlog_index; delete from t1; alter table t1 add (b int); @@ -38,10 +38,10 @@ drop table t2; # (save_master_pos waits for last gcp to complete, ensuring that we have # the expected data in the binlog) save_master_pos; -select inserts from cluster.binlog_index where epoch > @max_epoch and inserts > 5; -select deletes from cluster.binlog_index where epoch > @max_epoch and deletes > 5; +select inserts from mysql.binlog_index where epoch > @max_epoch and inserts > 5; +select deletes from mysql.binlog_index where epoch > @max_epoch and deletes > 5; select inserts,updates,deletes from - cluster.binlog_index where epoch > @max_epoch and updates > 0; + mysql.binlog_index where epoch > @max_epoch and updates > 0; # # check that purge clears the binlog_index @@ -49,7 +49,7 @@ select inserts,updates,deletes from flush logs; --sleep 1 purge master logs before now(); -select count(*) from cluster.binlog_index; +select count(*) from mysql.binlog_index; # # several tables in different databases @@ -64,9 +64,9 @@ use test; insert into mysqltest.t1 values (2,1),(2,2); save_master_pos; --replace_column 1 # -select @max_epoch:=max(epoch)-1 from cluster.binlog_index; +select @max_epoch:=max(epoch)-1 from mysql.binlog_index; drop table t1; drop database mysqltest; select inserts,updates,deletes from - cluster.binlog_index where epoch > @max_epoch and inserts > 0; + mysql.binlog_index where epoch > @max_epoch and inserts > 0; diff --git a/mysql-test/t/ndb_binlog_multi.test b/mysql-test/t/ndb_binlog_multi.test index e023a54b61c..1c6a1063fea 100644 --- a/mysql-test/t/ndb_binlog_multi.test +++ b/mysql-test/t/ndb_binlog_multi.test @@ -38,7 +38,7 @@ INSERT INTO t2 VALUES (1,1),(2,2); select * from t2 order by a; --replace_column 1 SELECT @the_epoch:=epoch,inserts,updates,deletes,schemaops FROM - cluster.binlog_index ORDER BY epoch DESC LIMIT 1; + mysql.binlog_index ORDER BY epoch DESC LIMIT 1; let $the_epoch= `SELECT @the_epoch`; # see if we got something on server1 @@ -50,7 +50,7 @@ DROP TABLE t2; --source include/show_binlog_events.inc --replace_result $the_epoch eval SELECT inserts,updates,deletes,schemaops FROM - cluster.binlog_index WHERE epoch=$the_epoch; + mysql.binlog_index WHERE epoch=$the_epoch; # reset for next test connection server1; @@ -65,12 +65,12 @@ INSERT INTO t1 VALUES (1),(2); --source include/show_binlog_events.inc --replace_column 1 SELECT @the_epoch2:=epoch,inserts,updates,deletes,schemaops FROM - cluster.binlog_index ORDER BY epoch DESC LIMIT 1; + mysql.binlog_index ORDER BY epoch DESC LIMIT 1; let $the_epoch2= `SELECT @the_epoch2`; --replace_result $the_epoch $the_epoch2 eval SELECT inserts,updates,deletes,schemaops FROM - cluster.binlog_index WHERE epoch > $the_epoch AND epoch <= $the_epoch2; + mysql.binlog_index WHERE epoch > $the_epoch AND epoch <= $the_epoch2; # now see that we have the events on the other server connection server2; @@ -80,4 +80,4 @@ drop table t1; --source include/show_binlog_events.inc --replace_result $the_epoch $the_epoch2 eval SELECT inserts,updates,deletes,schemaops FROM - cluster.binlog_index WHERE epoch > $the_epoch AND epoch <= $the_epoch2; + mysql.binlog_index WHERE epoch > $the_epoch AND epoch <= $the_epoch2; diff --git a/mysql-test/t/ndb_restore_compat.test b/mysql-test/t/ndb_restore_compat.test index 774011e362d..ee55e827d0e 100644 --- a/mysql-test/t/ndb_restore_compat.test +++ b/mysql-test/t/ndb_restore_compat.test @@ -21,7 +21,7 @@ SELECT * FROM GL ORDER BY TIME,ACCOUNT_TYPE; SELECT * FROM ACCOUNT ORDER BY ACCOUNT_ID; SELECT COUNT(*) FROM TRANSACTION; SELECT * FROM SYSTEM_VALUES ORDER BY SYSTEM_VALUES_ID; -SELECT * FROM cluster.apply_status WHERE server_id=0; +SELECT * FROM mysql.apply_status WHERE server_id=0; # # verify restore of 5.0 backup @@ -39,5 +39,5 @@ SELECT * FROM GL ORDER BY TIME,ACCOUNT_TYPE; SELECT * FROM ACCOUNT ORDER BY ACCOUNT_ID; SELECT COUNT(*) FROM TRANSACTION; SELECT * FROM SYSTEM_VALUES ORDER BY SYSTEM_VALUES_ID; -SELECT * FROM cluster.apply_status WHERE server_id=0; +SELECT * FROM mysql.apply_status WHERE server_id=0; DROP DATABASE BANK; diff --git a/mysql-test/t/rpl_ndb_bank.test b/mysql-test/t/rpl_ndb_bank.test index d6a10e4ccac..9174d09484b 100644 --- a/mysql-test/t/rpl_ndb_bank.test +++ b/mysql-test/t/rpl_ndb_bank.test @@ -118,12 +118,12 @@ RESET MASTER; # there is no neat way to find the backupid, this is a hack to find it... --exec $NDB_TOOLS_DIR/ndb_select_all --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -d sys --delimiter=',' SYSTAB_0 | grep 520093696 > $MYSQLTEST_VARDIR/tmp.dat -CREATE TABLE IF NOT EXISTS cluster.backup_info (id INT, backup_id INT) ENGINE = HEAP; -DELETE FROM cluster.backup_info; -LOAD DATA INFILE '../tmp.dat' INTO TABLE cluster.backup_info FIELDS TERMINATED BY ','; +CREATE TABLE IF NOT EXISTS mysql.backup_info (id INT, backup_id INT) ENGINE = HEAP; +DELETE FROM mysql.backup_info; +LOAD DATA INFILE '../tmp.dat' INTO TABLE mysql.backup_info FIELDS TERMINATED BY ','; --exec rm $MYSQLTEST_VARDIR/tmp.dat || true --replace_column 1 -SELECT @the_backup_id:=backup_id FROM cluster.backup_info; +SELECT @the_backup_id:=backup_id FROM mysql.backup_info; let the_backup_id=`select @the_backup_id`; # restore on slave, first check that nothing is there diff --git a/mysql-test/t/rpl_ndb_dd_advance.test b/mysql-test/t/rpl_ndb_dd_advance.test index 1fe36ecd8a1..1afc61c98bf 100644 --- a/mysql-test/t/rpl_ndb_dd_advance.test +++ b/mysql-test/t/rpl_ndb_dd_advance.test @@ -436,19 +436,19 @@ SELECT COUNT(*) FROM history; --exec $NDB_TOOLS_DIR/ndb_select_all --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -d sys --delimiter=',' SYSTAB_0 | grep 520093696 > $MYSQLTEST_VARDIR/tmp.dat -CREATE TEMPORARY TABLE IF NOT EXISTS cluster.backup_info (id INT, backup_id INT) ENGINE = HEAP; +CREATE TEMPORARY TABLE IF NOT EXISTS mysql.backup_info (id INT, backup_id INT) ENGINE = HEAP; -DELETE FROM cluster.backup_info; +DELETE FROM mysql.backup_info; -LOAD DATA INFILE '../tmp.dat' INTO TABLE cluster.backup_info FIELDS TERMINATED BY ','; +LOAD DATA INFILE '../tmp.dat' INTO TABLE mysql.backup_info FIELDS TERMINATED BY ','; --exec rm $MYSQLTEST_VARDIR/tmp.dat || true --replace_column 1 -SELECT @the_backup_id:=backup_id FROM cluster.backup_info; +SELECT @the_backup_id:=backup_id FROM mysql.backup_info; let the_backup_id=`select @the_backup_id`; -DROP TABLE IF EXISTS cluster.backup_info; +DROP TABLE IF EXISTS mysql.backup_info; #RESET MASTER; --echo ************ Restore the slave ************************ diff --git a/mysql-test/t/rpl_ndb_idempotent.test b/mysql-test/t/rpl_ndb_idempotent.test index eb47ec08695..477e7ff02e5 100644 --- a/mysql-test/t/rpl_ndb_idempotent.test +++ b/mysql-test/t/rpl_ndb_idempotent.test @@ -4,7 +4,7 @@ # # Currently test only works with ndb since it retrieves "old" -# binlog positions with cluster.binlog_index and apply_status; +# binlog positions with mysql.binlog_index and apply_status; # # create a table with one row @@ -15,7 +15,7 @@ SELECT * FROM t1 ORDER BY c3; # sync slave and retrieve epoch sync_slave_with_master; --replace_column 1 -SELECT @the_epoch:=MAX(epoch) FROM cluster.apply_status; +SELECT @the_epoch:=MAX(epoch) FROM mysql.apply_status; let $the_epoch= `select @the_epoch` ; SELECT * FROM t1 ORDER BY c3; @@ -24,7 +24,7 @@ connection master; --replace_result $the_epoch --replace_column 1 eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1) - FROM cluster.binlog_index WHERE epoch = $the_epoch ; + FROM mysql.binlog_index WHERE epoch = $the_epoch ; let $the_pos= `SELECT @the_pos` ; let $the_file= `SELECT @the_file` ; diff --git a/mysql-test/t/rpl_ndb_multi.test b/mysql-test/t/rpl_ndb_multi.test index fc7ecab00ac..630668ad369 100644 --- a/mysql-test/t/rpl_ndb_multi.test +++ b/mysql-test/t/rpl_ndb_multi.test @@ -7,7 +7,7 @@ # # Currently test only works with ndb since it retrieves "old" -# binlog positions with cluster.binlog_index and apply_status; +# binlog positions with mysql.binlog_index and apply_status; # # create a table with one row, and make sure the other "master" gets it @@ -25,7 +25,7 @@ SELECT * FROM t1 ORDER BY c3; connection master; sync_slave_with_master; --replace_column 1 -SELECT @the_epoch:=MAX(epoch) FROM cluster.apply_status; +SELECT @the_epoch:=MAX(epoch) FROM mysql.apply_status; let $the_epoch= `select @the_epoch` ; SELECT * FROM t1 ORDER BY c3; stop slave; @@ -34,7 +34,7 @@ stop slave; connection server2; --replace_result $the_epoch eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1) - FROM cluster.binlog_index WHERE epoch = $the_epoch ; + FROM mysql.binlog_index WHERE epoch = $the_epoch ; let $the_pos= `SELECT @the_pos` ; let $the_file= `SELECT @the_file` ; diff --git a/mysql-test/t/rpl_ndb_sync.test b/mysql-test/t/rpl_ndb_sync.test index 20d4f5707f8..10f7dd534a3 100644 --- a/mysql-test/t/rpl_ndb_sync.test +++ b/mysql-test/t/rpl_ndb_sync.test @@ -6,7 +6,7 @@ # # Currently test only works with ndb since it retrieves "old" -# binlog positions with cluster.binlog_index and apply_status; +# binlog positions with mysql.binlog_index and apply_status; # # stop the save @@ -94,11 +94,11 @@ STOP SLAVE; --connection master reset master; # should now contain nothing -select * from cluster.binlog_index; +select * from mysql.binlog_index; --connection slave reset slave; # should now contain nothing -select * from cluster.apply_status; +select * from mysql.apply_status; # End 5.1 Test diff --git a/mysql-test/t/system_mysql_db_fix.test b/mysql-test/t/system_mysql_db_fix.test index daba3b6ff86..cd6de5828b5 100644 --- a/mysql-test/t/system_mysql_db_fix.test +++ b/mysql-test/t/system_mysql_db_fix.test @@ -96,7 +96,7 @@ INSERT INTO user VALUES ('localhost','', '','N','N','N','N','N','N','N','N',' DROP TABLE db, host, user, func, plugin, tables_priv, columns_priv, procs_priv, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, -time_zone_transition_type, general_log, slow_log, event; +time_zone_transition_type, general_log, slow_log, event, binlog_index; -- enable_query_log diff --git a/mysql-test/t/upgrade.test b/mysql-test/t/upgrade.test index 5c5046cf7e9..f517c7787f8 100644 --- a/mysql-test/t/upgrade.test +++ b/mysql-test/t/upgrade.test @@ -47,3 +47,12 @@ select * from `txu@0023p@0023p1`; select * from `txu#p#p1`; drop table `txu@0023p@0023p1`; drop table `txu#p#p1`; + +# +# Check if old tables work +# + +system cp $MYSQL_TEST_DIR/std_data/old_table-323.frm $MYSQLTEST_VARDIR/master-data/test/t1.frm; +truncate t1; +drop table t1; + diff --git a/mysys/my_error.c b/mysys/my_error.c index e60c4eb21d7..cc7c28b6207 100644 --- a/mysys/my_error.c +++ b/mysys/my_error.c @@ -252,3 +252,15 @@ const char **my_error_unregister(int first, int last) return errmsgs; } + + +void my_error_unregister_all(void) +{ + struct my_err_head *list, *next; + for (list= my_errmsgs_globerrs.meh_next; list; list= next) + { + next= list->meh_next; + my_free((gptr) list, MYF(0)); + } + my_errmsgs_list= &my_errmsgs_globerrs; +} diff --git a/mysys/my_init.c b/mysys/my_init.c index dca68637161..149ccac531e 100644 --- a/mysys/my_init.c +++ b/mysys/my_init.c @@ -154,6 +154,7 @@ void my_end(int infoflag) } } free_charsets(); + my_error_unregister_all(); my_once_free(); if ((infoflag & MY_GIVE_INFO) || print_info) @@ -208,7 +209,8 @@ Voluntary context switches %ld, Involuntary context switches %ld\n", Check on destroying of mutexes. A few may be left that will get cleaned up by C++ destructors */ - safe_mutex_end(infoflag & MY_GIVE_INFO ? stderr : (FILE *) 0); + safe_mutex_end((infoflag & (MY_GIVE_INFO | MY_CHECK_ERROR)) ? stderr : + (FILE *) 0); #endif /* defined(SAFE_MUTEX) */ #endif /* THREAD */ diff --git a/mysys/mysys_priv.h b/mysys/mysys_priv.h index 89a6d8aa2a7..1d9c2812eb6 100644 --- a/mysys/mysys_priv.h +++ b/mysys/mysys_priv.h @@ -41,3 +41,5 @@ extern pthread_mutex_t THR_LOCK_charset; #ifndef EDQUOT #define EDQUOT (-1) #endif + +void my_error_unregister_all(void); diff --git a/scripts/mysql_create_system_tables.sh b/scripts/mysql_create_system_tables.sh index 69ea8e3d004..a8e4fed5d27 100644 --- a/scripts/mysql_create_system_tables.sh +++ b/scripts/mysql_create_system_tables.sh @@ -874,8 +874,7 @@ $c_pp $c_gl $c_sl $c_ev -CREATE DATABASE IF NOT EXISTS cluster; -CREATE TABLE IF NOT EXISTS cluster.binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts BIGINT UNSIGNED NOT NULL, updates BIGINT UNSIGNED NOT NULL, deletes BIGINT UNSIGNED NOT NULL, schemaops BIGINT UNSIGNED NOT NULL, PRIMARY KEY(epoch)) ENGINE=MYISAM; +CREATE TABLE IF NOT EXISTS mysql.binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts BIGINT UNSIGNED NOT NULL, updates BIGINT UNSIGNED NOT NULL, deletes BIGINT UNSIGNED NOT NULL, schemaops BIGINT UNSIGNED NOT NULL, PRIMARY KEY(epoch)) ENGINE=MYISAM; END_OF_DATA diff --git a/scripts/mysql_fix_privilege_tables.sql b/scripts/mysql_fix_privilege_tables.sql index f3c0c7f13be..15ebef7d9c0 100644 --- a/scripts/mysql_fix_privilege_tables.sql +++ b/scripts/mysql_fix_privilege_tables.sql @@ -1,13 +1,13 @@ --- This script converts any old privilege tables to privilege tables suitable --- for this version of MySQL +# This script converts any old privilege tables to privilege tables suitable +# for this version of MySQL --- You can safely ignore all 'Duplicate column' and 'Unknown column' errors" --- because these just mean that your tables are already up to date. --- This script is safe to run even if your tables are already up to date! +# You can safely ignore all 'Duplicate column' and 'Unknown column' errors" +# because these just mean that your tables are already up to date. +# This script is safe to run even if your tables are already up to date! --- On unix, you should use the mysql_fix_privilege_tables script to execute --- this sql script. --- On windows you should do 'mysql --force mysql < mysql_fix_privilege_tables.sql' +# On unix, you should use the mysql_fix_privilege_tables script to execute +# this sql script. +# On windows you should do 'mysql --force mysql < mysql_fix_privilege_tables.sql' set storage_engine=MyISAM; @@ -27,7 +27,7 @@ CREATE TABLE IF NOT EXISTS plugin ( ALTER TABLE user add File_priv enum('N','Y') COLLATE utf8_general_ci NOT NULL; --- Detect whether or not we had the Grant_priv column +# Detect whether or not we had the Grant_priv column SET @hadGrantPriv:=0; SELECT @hadGrantPriv:=1 FROM user WHERE Grant_priv LIKE '%'; @@ -35,14 +35,14 @@ ALTER TABLE user add Grant_priv enum('N','Y') COLLATE utf8_general_ci NOT NULL,a ALTER TABLE host add Grant_priv enum('N','Y') NOT NULL,add References_priv enum('N','Y') COLLATE utf8_general_ci NOT NULL,add Index_priv enum('N','Y') COLLATE utf8_general_ci NOT NULL,add Alter_priv enum('N','Y') COLLATE utf8_general_ci NOT NULL; ALTER TABLE db add Grant_priv enum('N','Y') COLLATE utf8_general_ci NOT NULL,add References_priv enum('N','Y') COLLATE utf8_general_ci NOT NULL,add Index_priv enum('N','Y') COLLATE utf8_general_ci NOT NULL,add Alter_priv enum('N','Y') COLLATE utf8_general_ci NOT NULL; --- Fix privileges for old tables +# Fix privileges for old tables UPDATE user SET Grant_priv=File_priv,References_priv=Create_priv,Index_priv=Create_priv,Alter_priv=Create_priv WHERE @hadGrantPriv = 0; UPDATE db SET References_priv=Create_priv,Index_priv=Create_priv,Alter_priv=Create_priv WHERE @hadGrantPriv = 0; UPDATE host SET References_priv=Create_priv,Index_priv=Create_priv,Alter_priv=Create_priv WHERE @hadGrantPriv = 0; --- --- The second alter changes ssl_type to new 4.0.2 format --- Adding columns needed by GRANT .. REQUIRE (openssl)" +# +# The second alter changes ssl_type to new 4.0.2 format +# Adding columns needed by GRANT .. REQUIRE (openssl)" ALTER TABLE user ADD ssl_type enum('','ANY','X509', 'SPECIFIED') COLLATE utf8_general_ci NOT NULL, @@ -51,9 +51,9 @@ ADD x509_issuer BLOB NOT NULL, ADD x509_subject BLOB NOT NULL; ALTER TABLE user MODIFY ssl_type enum('','ANY','X509', 'SPECIFIED') NOT NULL; --- --- Create tables_priv and columns_priv if they don't exists --- +# +# Create tables_priv and columns_priv if they don't exists +# CREATE TABLE IF NOT EXISTS tables_priv ( Host char(60) binary DEFAULT '' NOT NULL, @@ -66,7 +66,7 @@ CREATE TABLE IF NOT EXISTS tables_priv ( Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name) ) CHARACTER SET utf8 COLLATE utf8_bin; --- Fix collation of set fields +# Fix collation of set fields ALTER TABLE tables_priv modify Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter') COLLATE utf8_general_ci DEFAULT '' NOT NULL, modify Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL; @@ -88,26 +88,26 @@ CREATE TABLE IF NOT EXISTS columns_priv ( Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name,Column_name) ) CHARACTER SET utf8 COLLATE utf8_bin; --- Fix collation of set fields +# Fix collation of set fields ALTER TABLE columns_priv MODIFY Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL; --- --- Name change of Type -> Column_priv from MySQL 3.22.12 --- +# +# Name change of Type -> Column_priv from MySQL 3.22.12 +# ALTER TABLE columns_priv change Type Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL; --- --- Add the new 'type' column to the func table. --- +# +# Add the new 'type' column to the func table. +# ALTER TABLE func add type enum ('function','aggregate') COLLATE utf8_general_ci NOT NULL; --- --- Change the user,db and host tables to MySQL 4.0 format --- +# +# Change the user,db and host tables to MySQL 4.0 format +# # Detect whether we had Show_db_priv SET @hadShowDbPriv:=0; @@ -122,22 +122,22 @@ ADD Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTE ADD Repl_slave_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Execute_priv, ADD Repl_client_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Repl_slave_priv; --- Convert privileges so that users have similar privileges as before +# Convert privileges so that users have similar privileges as before UPDATE user SET Show_db_priv= Select_priv, Super_priv=Process_priv, Execute_priv=Process_priv, Create_tmp_table_priv='Y', Lock_tables_priv='Y', Repl_slave_priv=file_priv, Repl_client_priv=File_priv where user<>"" AND @hadShowDbPriv = 0; --- Add fields that can be used to limit number of questions and connections --- for some users. +# Add fields that can be used to limit number of questions and connections +# for some users. ALTER TABLE user ADD max_questions int(11) NOT NULL DEFAULT 0 AFTER x509_subject, ADD max_updates int(11) unsigned NOT NULL DEFAULT 0 AFTER max_questions, ADD max_connections int(11) unsigned NOT NULL DEFAULT 0 AFTER max_updates; --- --- Add Create_tmp_table_priv and Lock_tables_priv to db and host --- +# +# Add Create_tmp_table_priv and Lock_tables_priv to db and host +# ALTER TABLE db ADD Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, @@ -156,8 +156,8 @@ alter table func comment='User defined functions'; alter table tables_priv comment='Table privileges'; alter table columns_priv comment='Column privileges'; --- Convert all tables to UTF-8 with binary collation --- and reset all char columns to correct width +# Convert all tables to UTF-8 with binary collation +# and reset all char columns to correct width ALTER TABLE user MODIFY Host char(60) NOT NULL default '', MODIFY User char(16) NOT NULL default '', @@ -385,7 +385,7 @@ Time_zone_id int unsigned NOT NULL auto_increment, Use_leap_seconds enum('Y','N') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY TzId (Time_zone_id) ) CHARACTER SET utf8 comment='Time zones'; --- Make enum field case-insensitive +# Make enum field case-insensitive ALTER TABLE time_zone MODIFY Use_leap_seconds enum('Y','N') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL; @@ -527,9 +527,9 @@ ALTER TABLE proc MODIFY db MODIFY comment char(64) collate utf8_bin DEFAULT '' NOT NULL; --- --- Create missing log tables (5.1) --- +# +# Create missing log tables (5.1) +# delimiter // CREATE PROCEDURE create_log_tables() @@ -673,9 +673,9 @@ ALTER TABLE event ADD sql_mode UPDATE user SET Event_priv=Super_priv WHERE @hadEventPriv = 0; ALTER TABLE event MODIFY name char(64) CHARACTER SET utf8 NOT NULL default ''; --- --- TRIGGER privilege --- +# +# TRIGGER privilege +# SET @hadTriggerPriv := 0; SELECT @hadTriggerPriv :=1 FROM user WHERE Trigger_priv LIKE '%'; @@ -687,6 +687,8 @@ ALTER TABLE tables_priv MODIFY Table_priv set('Select','Insert','Update','Delete UPDATE user SET Trigger_priv=Super_priv WHERE @hadTriggerPriv = 0; +CREATE TABLE IF NOT EXISTS binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts BIGINT UNSIGNED NOT NULL, updates BIGINT UNSIGNED NOT NULL, deletes BIGINT UNSIGNED NOT NULL, schemaops BIGINT UNSIGNED NOT NULL, PRIMARY KEY(epoch)) ENGINE=MYISAM; + # Activate the new, possible modified privilege tables # This should not be needed, but gives us some extra testing that the above # changes was correct diff --git a/sql/field.cc b/sql/field.cc index 09e919d872a..0453a4be6c3 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -1237,12 +1237,6 @@ Field::Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg, } -uint Field::offset() -{ - return (uint) (ptr - (char*) table->record[0]); -} - - void Field::hash(ulong *nr, ulong *nr2) { if (is_null()) diff --git a/sql/field.h b/sql/field.h index 9b81931d416..b3541b38973 100644 --- a/sql/field.h +++ b/sql/field.h @@ -342,7 +342,10 @@ public: virtual int pack_cmp(const char *b, uint key_length_arg, my_bool insert_or_update) { return cmp(ptr,b); } - uint offset(); // Should be inline ... + uint offset(byte *record) + { + return (uint) (ptr - (char*) record); + } void copy_from_tmp(int offset); uint fill_cache_field(struct st_cache_field *copy); virtual bool get_date(TIME *ltime,uint fuzzydate); diff --git a/sql/field_conv.cc b/sql/field_conv.cc index 7bc6c432d1c..01b5306f5a4 100644 --- a/sql/field_conv.cc +++ b/sql/field_conv.cc @@ -119,12 +119,12 @@ set_field_to_null(Field *field) return 0; } field->reset(); - if (current_thd->count_cuted_fields == CHECK_FIELD_WARN) + if (field->table->in_use->count_cuted_fields == CHECK_FIELD_WARN) { field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); return 0; } - if (!current_thd->no_errors) + if (!field->table->in_use->no_errors) my_error(ER_BAD_NULL_ERROR, MYF(0), field->field_name); return -1; } @@ -176,12 +176,12 @@ set_field_to_null_with_conversions(Field *field, bool no_conversions) field->table->auto_increment_field_not_null= FALSE; return 0; // field is set in handler.cc } - if (current_thd->count_cuted_fields == CHECK_FIELD_WARN) + if (field->table->in_use->count_cuted_fields == CHECK_FIELD_WARN) { field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_BAD_NULL_ERROR, 1); return 0; } - if (!current_thd->no_errors) + if (!field->table->in_use->no_errors) my_error(ER_BAD_NULL_ERROR, MYF(0), field->field_name); return -1; } @@ -403,7 +403,7 @@ static void do_varstring1(Copy_field *copy) if (length > copy->to_length- 1) { length=copy->to_length - 1; - if (current_thd->count_cuted_fields) + if (copy->from_field->table->in_use->count_cuted_fields) copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); } @@ -418,7 +418,7 @@ static void do_varstring2(Copy_field *copy) if (length > copy->to_length- HA_KEY_BLOB_LENGTH) { length=copy->to_length-HA_KEY_BLOB_LENGTH; - if (current_thd->count_cuted_fields) + if (copy->from_field->table->in_use->count_cuted_fields) copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); } diff --git a/sql/ha_ndbcluster_tables.h b/sql/ha_ndbcluster_tables.h index 12124cd8820..9d7fda33102 100644 --- a/sql/ha_ndbcluster_tables.h +++ b/sql/ha_ndbcluster_tables.h @@ -15,7 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#define NDB_REP_DB "cluster" +#define NDB_REP_DB "mysql" #define NDB_REP_TABLE "binlog_index" #define NDB_APPLY_TABLE "apply_status" #define NDB_SCHEMA_TABLE "schema" diff --git a/sql/item_sum.cc b/sql/item_sum.cc index c656faa7c49..5e0e64116b9 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -2928,13 +2928,14 @@ int group_concat_key_cmp_with_distinct(void* arg, byte* key1, */ Field *field= (*field_item)->get_tmp_table_field(); /* - If field_item is a const item then either get_tp_table_field returns 0 + If field_item is a const item then either get_tmp_table_field returns 0 or it is an item over a const table. */ if (field && !(*field_item)->const_item()) { int res; - uint offset= field->offset() - table->s->null_bytes; + uint offset= (field->offset(field->table->record[0]) - + table->s->null_bytes); if ((res= field->cmp((char *) key1 + offset, (char *) key2 + offset))) return res; } @@ -2972,7 +2973,8 @@ int group_concat_key_cmp_with_order(void* arg, byte* key1, byte* key2) if (field && !item->const_item()) { int res; - uint offset= field->offset() - table->s->null_bytes; + uint offset= (field->offset(field->table->record[0]) - + table->s->null_bytes); if ((res= field->cmp((char *) key1 + offset, (char *) key2 + offset))) return (*order_item)->asc ? res : -res; } @@ -3039,7 +3041,8 @@ int dump_leaf_key(byte* key, element_count count __attribute__((unused)), because it contains both order and arg list fields. */ Field *field= (*arg)->get_tmp_table_field(); - uint offset= field->offset() - table->s->null_bytes; + uint offset= (field->offset(field->table->record[0]) - + table->s->null_bytes); DBUG_ASSERT(offset < table->s->reclength); res= field->val_str(&tmp, (char *) key + offset); } diff --git a/sql/key.cc b/sql/key.cc index be21bf11c3c..dceeab1c011 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -19,37 +19,54 @@ #include "mysql_priv.h" - /* - ** Search after with key field is. If no key starts with field test - ** if field is part of some key. - ** - ** returns number of key. keylength is set to length of key before - ** (not including) field - ** Used when calculating key for NEXT_NUMBER - */ +/* + Search after a key that starts with 'field' -int find_ref_key(KEY *key, uint key_count, Field *field, uint *key_length) + SYNOPSIS + find_ref_key() + key First key to check + key_count How many keys to check + record Start of record + field Field to search after + key_length On partial match, contains length of fields before + field + + NOTES + Used when calculating key for NEXT_NUMBER + + IMPLEMENTATION + If no key starts with field test if field is part of some key. If we find + one, then return first key and set key_length to the number of bytes + preceding 'field'. + + RETURN + -1 field is not part of the key + # Key part for key matching key. + key_length is set to length of key before (not including) field +*/ + +int find_ref_key(KEY *key, uint key_count, byte *record, Field *field, + uint *key_length) { reg2 int i; reg3 KEY *key_info; uint fieldpos; - fieldpos= field->offset(); - - /* Test if some key starts as fieldpos */ + fieldpos= field->offset(record); + /* Test if some key starts as fieldpos */ for (i= 0, key_info= key ; i < (int) key_count ; i++, key_info++) { if (key_info->key_part[0].offset == fieldpos) - { /* Found key. Calc keylength */ + { /* Found key. Calc keylength */ *key_length=0; - return(i); /* Use this key */ + return(i); /* Use this key */ } } - /* Test if some key contains fieldpos */ + /* Test if some key contains fieldpos */ for (i= 0, key_info= key; i < (int) key_count ; i++, key_info++) @@ -62,7 +79,7 @@ int find_ref_key(KEY *key, uint key_count, Field *field, uint *key_length) j++, key_part++) { if (key_part->offset == fieldpos) - return(i); /* Use this key */ + return(i); /* Use this key */ *key_length+=key_part->store_length; } } diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 0bf75982626..e68fb4e442e 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1427,7 +1427,8 @@ void print_plan(JOIN* join,uint idx, double record_count, double read_time, #endif void mysql_print_status(); /* key.cc */ -int find_ref_key(KEY *key, uint key_count, Field *field, uint *key_length); +int find_ref_key(KEY *key, uint key_count, byte *record, Field *field, + uint *key_length); void key_copy(byte *to_key, byte *from_record, KEY *key_info, uint key_length); void key_restore(byte *to_record, byte *from_key, KEY *key_info, uint key_length); diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 05cc75f1c05..355b3f174dd 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -3187,7 +3187,7 @@ server."); using_update_log=1; } - if (plugin_init(0)) + if (plugin_init(opt_bootstrap)) { sql_print_error("Failed to init plugins."); return 1; diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 5da8d27a887..7f31cde7819 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -2520,7 +2520,9 @@ my_size_t THD::max_row_length_blob(TABLE *table, const byte *data) const for (uint *ptr= beg ; ptr != end ; ++ptr) { Field_blob* const blob= (Field_blob*) table->field[*ptr]; - length+= blob->get_length((const char *) (data + blob->offset())) + 2; + length+= blob->get_length((const char*) (data + + blob->offset(table->record[0]))) + + HA_KEY_BLOB_LENGTH; } return length; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index e5399c03e17..9db7d1dd398 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -9488,7 +9488,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, bool maybe_null=(*cur_group->item)->maybe_null; key_part_info->null_bit=0; key_part_info->field= field; - key_part_info->offset= field->offset(); + key_part_info->offset= field->offset(table->record[0]); key_part_info->length= (uint16) field->key_length(); key_part_info->type= (uint8) field->key_type(); key_part_info->key_type = @@ -9585,7 +9585,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, { key_part_info->null_bit=0; key_part_info->field= *reg_field; - key_part_info->offset= (*reg_field)->offset(); + key_part_info->offset= (*reg_field)->offset(table->record[0]); key_part_info->length= (uint16) (*reg_field)->pack_length(); key_part_info->type= (uint8) (*reg_field)->key_type(); key_part_info->key_type = @@ -12557,8 +12557,9 @@ remove_duplicates(JOIN *join, TABLE *entry,List &fields, Item *having) DBUG_RETURN(0); } Field **first_field=entry->field+entry->s->fields - field_count; - offset= field_count ? - entry->field[entry->s->fields - field_count]->offset() : 0; + offset= (field_count ? + entry->field[entry->s->fields - field_count]-> + offset(entry->record[0]) : 0); reclength=entry->s->reclength-offset; free_io_cache(entry); // Safety diff --git a/sql/table.cc b/sql/table.cc index f15555138f8..58649c1d0f7 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -30,7 +30,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, File file); static void fix_type_pointers(const char ***array, TYPELIB *point_to_type, uint types, char **names); -static uint find_field(Field **fields, uint start, uint length); +static uint find_field(Field **fields, byte *record, uint start, uint length); /* Get column name from column hash */ @@ -1070,6 +1070,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, Field *field; if (new_field_pack_flag <= 1) key_part->fieldnr= (uint16) find_field(share->field, + share->default_values, (uint) key_part->offset, (uint) key_part->length); if (!key_part->fieldnr) @@ -1233,24 +1234,19 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, if (share->found_next_number_field) { - /* - We must have a table object for find_ref_key to calculate field offset - */ - TABLE tmp_table; - tmp_table.record[0]= share->default_values; - reg_field= *share->found_next_number_field; - reg_field->table= &tmp_table; if ((int) (share->next_number_index= (uint) - find_ref_key(share->key_info, share->keys, reg_field, + find_ref_key(share->key_info, share->keys, + share->default_values, reg_field, &share->next_number_key_offset)) < 0) { + /* Wrong field definition */ + DBUG_ASSERT(0); reg_field->unireg_check= Field::NONE; /* purecov: inspected */ share->found_next_number_field= 0; } else reg_field->flags |= AUTO_INCREMENT_FLAG; - reg_field->table= 0; } if (share->blob_fields) @@ -1970,7 +1966,7 @@ TYPELIB *typelib(MEM_ROOT *mem_root, List &strings) # field number +1 */ -static uint find_field(Field **fields, uint start, uint length) +static uint find_field(Field **fields, byte *record, uint start, uint length) { Field **field; uint i, pos; @@ -1978,7 +1974,7 @@ static uint find_field(Field **fields, uint start, uint length) pos= 0; for (field= fields, i=1 ; *field ; i++,field++) { - if ((*field)->offset() == start) + if ((*field)->offset(record) == start) { if ((*field)->key_length() == length) return (i); diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index b2b07981bdb..83fafe27d21 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -817,10 +817,11 @@ int ha_archive::write_row(byte *buf) int rc; byte *read_buf= NULL; ulonglong temp_auto; + byte *record= table->record[0]; DBUG_ENTER("ha_archive::write_row"); if (share->crashed) - DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); ha_statistic_increment(&SSV::ha_write_count); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) @@ -883,7 +884,8 @@ int ha_archive::write_row(byte *buf) while (!(get_row(&archive, read_buf))) { - if (!memcmp(read_buf + mfield->offset(), table->next_number_field->ptr, + if (!memcmp(read_buf + mfield->offset(record), + table->next_number_field->ptr, mfield->max_length())) { rc= HA_ERR_FOUND_DUPP_KEY; @@ -914,15 +916,16 @@ int ha_archive::write_row(byte *buf) for (Field **field=table->field ; *field ; field++) { DBUG_PRINT("archive",("Pack is %d\n", (*field)->pack_length())); - DBUG_PRINT("archive",("MyPack is %d\n", (*field)->data_length((char*) buf + (*field)->offset()))); + DBUG_PRINT("archive",("MyPack is %d\n", (*field)->data_length((char*) buf + (*field)->offset(record)))); if ((*field)->real_type() == MYSQL_TYPE_VARCHAR) { - uint actual_length= (*field)->data_length((char*) buf + (*field)->offset()); - uint offset= (*field)->offset() + actual_length + + uint actual_length= (*field)->data_length((char*) buf + + (*field)->offset(record)); + uint offset= (*field)->offset(record) + actual_length + (actual_length > 255 ? 2 : 1); DBUG_PRINT("archive",("Offset is %d -> %d\n", actual_length, offset)); /* - if ((*field)->pack_length() + (*field)->offset() != offset) + if ((*field)->pack_length() + (*field)->offset(record) != offset) bzero(buf + offset, (size_t)((*field)->pack_length() + (actual_length > 255 ? 2 : 1) - (*field)->data_length)); */ } @@ -1306,7 +1309,8 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) { Field *field= table->found_next_number_field; ulonglong auto_value= - (ulonglong) field->val_int((char*)(buf + field->offset())); + (ulonglong) field->val_int((char*)(buf + + field->offset(table->record[0]))); if (share->auto_increment_value < auto_value) stats.auto_increment_value= share->auto_increment_value= auto_value; diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index 51c9f4c192e..669a57d324a 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -1856,6 +1856,7 @@ int ha_federated::update_row(const byte *old_data, byte *new_data) String where_string(where_buffer, sizeof(where_buffer), &my_charset_bin); + byte *record= table->record[0]; DBUG_ENTER("ha_federated::update_row"); /* set string lengths to 0 to avoid misc chars in string @@ -1914,7 +1915,7 @@ int ha_federated::update_row(const byte *old_data, byte *new_data) bool needs_quote= (*field)->str_needs_quotes(); where_string.append(STRING_WITH_LEN(" = ")); (*field)->val_str(&field_value, - (char*) (old_data + (*field)->offset())); + (char*) (old_data + (*field)->offset(record))); if (needs_quote) where_string.append('\''); field_value.print(&where_string); diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 3434cf6b8ba..de1a5f53fc8 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -6630,7 +6630,7 @@ innodb_mutex_show_status( mutex->count_spin_rounds, mutex->count_os_wait, mutex->count_os_yield, - mutex->lspent_time/1000); + (ulong) (mutex->lspent_time/1000)); if (stat_print(thd, innobase_hton_name, hton_name_len, buf1, buf1len, @@ -6660,7 +6660,7 @@ innodb_mutex_show_status( rw_lock_count, rw_lock_count_spin_loop, rw_lock_count_spin_rounds, rw_lock_count_os_wait, rw_lock_count_os_yield, - rw_lock_wait_time/1000); + (ulong) (rw_lock_wait_time/1000)); if (stat_print(thd, innobase_hton_name, hton_name_len, STRING_WITH_LEN("rw_lock_mutexes"), buf2, buf2len)) { diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index e4da7652860..4047569ee63 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -609,7 +609,7 @@ int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt) { param.testflag&= ~T_RETRY_WITHOUT_QUICK; sql_print_information("Retrying repair of: '%s' without quick", - table->s->path); + table->s->path.str); continue; } param.testflag&= ~T_QUICK; @@ -617,7 +617,7 @@ int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt) { param.testflag= (param.testflag & ~T_REP_BY_SORT) | T_REP; sql_print_information("Retrying repair of: '%s' with keycache", - table->s->path); + table->s->path.str); continue; } break; @@ -629,7 +629,7 @@ int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt) sql_print_information("Found %s of %s rows when repairing '%s'", llstr(file->state->records, llbuff), llstr(start_records, llbuff2), - table->s->path); + table->s->path.str); } return error; } @@ -1157,7 +1157,7 @@ bool ha_myisam::check_and_repair(THD *thd) // Don't use quick if deleted rows if (!file->state->del && (myisam_recover_options & HA_RECOVER_QUICK)) check_opt.flags|=T_QUICK; - sql_print_warning("Checking table: '%s'",table->s->path); + sql_print_warning("Checking table: '%s'",table->s->path.str); old_query= thd->query; old_query_length= thd->query_length; @@ -1168,7 +1168,7 @@ bool ha_myisam::check_and_repair(THD *thd) if ((marked_crashed= mi_is_crashed(file)) || check(thd, &check_opt)) { - sql_print_warning("Recovering table: '%s'",table->s->path); + sql_print_warning("Recovering table: '%s'",table->s->path.str); check_opt.flags= ((myisam_recover_options & HA_RECOVER_BACKUP ? T_BACKUP_DATA : 0) | (marked_crashed ? 0 : T_QUICK) | @@ -1460,6 +1460,7 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, bool found_real_auto_increment=0; enum ha_base_keytype type; char buff[FN_REFLEN]; + byte *record; KEY *pos; MI_KEYDEF *keydef; MI_COLUMNDEF *recinfo,*recinfo_pos; @@ -1564,6 +1565,7 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, found_real_auto_increment= share->next_number_key_offset == 0; } + record= table_arg->record[0]; recpos=0; recinfo_pos=recinfo; while (recpos < (uint) share->reclength) { @@ -1573,7 +1575,7 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, for (field=table_arg->field ; *field ; field++) { - if ((fieldpos=(*field)->offset()) >= recpos && + if ((fieldpos=(*field)->offset(record)) >= recpos && fieldpos <= minpos) { /* skip null fields */ diff --git a/storage/myisam/ha_myisam.h b/storage/myisam/ha_myisam.h index 7ad938c06a7..6e9108e8731 100644 --- a/storage/myisam/ha_myisam.h +++ b/storage/myisam/ha_myisam.h @@ -37,7 +37,7 @@ extern ulong myisam_recover_options; class ha_myisam: public handler { MI_INFO *file; - ulong int_table_flags; + ulonglong int_table_flags; char *data_file_name, *index_file_name; bool can_enable_indexes; int repair(THD *thd, MI_CHECK ¶m, bool optimize); diff --git a/storage/ndb/tools/restore/Restore.cpp b/storage/ndb/tools/restore/Restore.cpp index ef535cf9e26..4e3d299239b 100644 --- a/storage/ndb/tools/restore/Restore.cpp +++ b/storage/ndb/tools/restore/Restore.cpp @@ -317,7 +317,7 @@ RestoreMetaData::markSysTables() Uint32 j; for (j = 0; j < getNoOfTables(); j++) { TableS* table = allTables[j]; - if (table->getTableId() == id1) { + if (table->getTableId() == (Uint32) id1) { if (table->isSysTable) blobTable->isSysTable = true; break; From 42f6fd5f547324c3e714c76fbe5ec92cfad4043e Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 23 Nov 2006 16:41:21 +0200 Subject: [PATCH 50/57] Added some missing DBUG_RETURN Fixed that --valgrind works again with mysql-test-run.sh Extended error messages when loosing connection during mysql_real_connect() client/mysqldump.c: Added some missing DBUG_RETURN include/errmsg.h: Extended error messages when loosing connection during mysql_real_connect() libmysql/errmsg.c: Extended error messages when loosing connection during mysql_real_connect() mysql-test/mysql-test-run.pl: Don't remove .reject files at startup mysql-test/mysql-test-run.sh: Fixed that --valgrind works again Don't give warnings for directores in var/tmp sql-common/client.c: Extended error messages when loosing connection during mysql_real_connect() --- client/mysqldump.c | 6 +-- include/errmsg.h | 3 +- libmysql/errmsg.c | 3 ++ mysql-test/mysql-test-run.pl | 6 +-- mysql-test/mysql-test-run.sh | 4 +- sql-common/client.c | 76 +++++++++++++++++++++++++++++++++--- 6 files changed, 83 insertions(+), 15 deletions(-) diff --git a/client/mysqldump.c b/client/mysqldump.c index b44c3944f41..2be60b2df9a 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -2203,7 +2203,7 @@ static void dump_table(char *table, char *db) The "table" could be a view. If so, we don't do anything here. */ if (strcmp (table_type, "VIEW") == 0) - return; + DBUG_VOID_RETURN; /* Check --no-data flag */ if (opt_no_data) @@ -2869,7 +2869,7 @@ static int dump_all_tables_in_db(char *database) *afterdot++= '.'; if (init_dumping(database, init_dumping_tables)) - return 1; + DBUG_RETURN(1); if (opt_xml) print_xml_tag(md_result_file, "", "\n", "database", "name=", database, NullS); if (lock_tables) @@ -2923,7 +2923,7 @@ static int dump_all_tables_in_db(char *database) fprintf(md_result_file,"\n--\n-- Flush Grant Tables \n--\n"); fprintf(md_result_file,"\n/*! FLUSH PRIVILEGES */;\n"); } - return 0; + DBUG_RETURN(0); } /* dump_all_tables_in_db */ diff --git a/include/errmsg.h b/include/errmsg.h index 1dd5759c104..aca7c4b6a1f 100644 --- a/include/errmsg.h +++ b/include/errmsg.h @@ -97,6 +97,7 @@ extern const char *client_errors[]; /* Error messages */ #define CR_NO_STMT_METADATA 2052 #define CR_NO_RESULT_SET 2053 #define CR_NOT_IMPLEMENTED 2054 -#define CR_ERROR_LAST /*Copy last error nr:*/ 2054 +#define CR_SERVER_LOST_EXTENDED 2055 +#define CR_ERROR_LAST /*Copy last error nr:*/ 2055 /* Add error numbers before CR_ERROR_LAST and change it accordingly. */ diff --git a/libmysql/errmsg.c b/libmysql/errmsg.c index 9e1d70a47df..59089d5ec18 100644 --- a/libmysql/errmsg.c +++ b/libmysql/errmsg.c @@ -82,6 +82,7 @@ const char *client_errors[]= "Prepared statement contains no metadata", "Attempt to read a row while there is no result set associated with the statement", "This feature is not implemented yet", + "Lost connection to MySQL server at '%s', system error: %d", "" }; @@ -145,6 +146,7 @@ const char *client_errors[]= "Prepared statement contains no metadata", "Attempt to read a row while there is no result set associated with the statement", "This feature is not implemented yet", + "Lost connection to MySQL server at '%s', system error: %d", "" }; @@ -206,6 +208,7 @@ const char *client_errors[]= "Prepared statement contains no metadata", "Attempt to read a row while there is no result set associated with the statement", "This feature is not implemented yet", + "Lost connection to MySQL server at '%s', system error: %d", "" }; #endif diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index c06a5a6524a..de252bff215 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -2047,7 +2047,7 @@ sub cleanup_stale_files () { } # Remove old log files - foreach my $name (glob("r/*.reject r/*.progress r/*.log r/*.warnings")) + foreach my $name (glob("r/*.progress r/*.log r/*.warnings")) { unlink($name); } @@ -2995,14 +2995,14 @@ sub find_testcase_skipped_reason($) { my ($tinfo)= @_; - # Open mysqltest.log + # Open mysqltest-time my $F= IO::File->new($path_timefile) or mtr_error("can't open file \"$path_timefile\": $!"); my $reason; while ( my $line= <$F> ) { - # Look for "reason: " + # Look for "reason: " if ( $line =~ /reason: (.*)/ ) { $reason= $1; diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh index 9f53d818d2b..a81a3b8b607 100644 --- a/mysql-test/mysql-test-run.sh +++ b/mysql-test/mysql-test-run.sh @@ -123,7 +123,7 @@ find_valgrind() fi # >=2.1.2 requires the --tool option, some versions write to stdout, some to stderr valgrind --help 2>&1 | grep "\-\-tool" > /dev/null && FIND_VALGRIND="$FIND_VALGRIND --tool=memcheck" - FIND_VALGRIND="$FIND_VALGRIND --alignment=8 --leak-check=yes --num-callers=16 --suppressions=$CWD/valgrind.supp" + FIND_VALGRIND="$FIND_VALGRIND --alignment=8 --leak-check=yes --num-callers=16 --suppressions=$MYSQL_TEST_DIR/valgrind.supp" } # No paths below as we can't be sure where the program is! @@ -2087,7 +2087,7 @@ then # Remove files that can cause problems $RM -rf $MYSQL_TEST_DIR/var/ndbcluster - $RM -f $MYSQL_TEST_DIR/var/run/* $MYSQL_TEST_DIR/var/tmp/* + $RM -rf $MYSQL_TEST_DIR/var/run/* $MYSQL_TEST_DIR/var/tmp/* # Remove old berkeley db log files that can confuse the server $RM -f $MASTER_MYDDIR/log.* diff --git a/sql-common/client.c b/sql-common/client.c index 0354cffec32..ea158c2a3c9 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -753,6 +753,29 @@ void set_mysql_error(MYSQL *mysql, int errcode, const char *sqlstate) DBUG_VOID_RETURN; } + +static void set_mysql_extended_error(MYSQL *mysql, int errcode, + const char *sqlstate, + const char *format, ...) +{ + NET *net; + va_list args; + DBUG_ENTER("set_mysql_extended_error"); + DBUG_PRINT("enter", ("error :%d '%s'", errcode, format)); + DBUG_ASSERT(mysql != 0); + + net= &mysql->net; + net->last_errno= errcode; + va_start(args, format); + my_vsnprintf(net->last_error, sizeof(net->last_error)-1, + format, args); + va_end(args); + strmov(net->sqlstate, sqlstate); + + DBUG_VOID_RETURN; +} + + /* Flush result set sent from server */ @@ -850,6 +873,7 @@ static int check_license(MYSQL *mysql) void end_server(MYSQL *mysql) { + int save_errno= errno; DBUG_ENTER("end_server"); if (mysql->net.vio != 0) { @@ -862,6 +886,7 @@ void end_server(MYSQL *mysql) } net_end(&mysql->net); free_old_query(mysql); + errno= save_errno; DBUG_VOID_RETURN; } @@ -2031,7 +2056,10 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, if (mysql->options.connect_timeout && vio_poll_read(net->vio, mysql->options.connect_timeout)) { - set_mysql_error(mysql, CR_SERVER_LOST, unknown_sqlstate); + set_mysql_extended_error(mysql, CR_SERVER_LOST, unknown_sqlstate, + ER(CR_SERVER_LOST_EXTENDED), + "waiting for initial communication packet", + errno); goto error; } @@ -2040,8 +2068,14 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, */ if ((pkt_length=cli_safe_read(mysql)) == packet_error) + { + if (mysql->net.last_errno == CR_SERVER_LOST) + set_mysql_extended_error(mysql, CR_SERVER_LOST, unknown_sqlstate, + ER(CR_SERVER_LOST_EXTENDED), + "reading initial communication packet", + errno); goto error; - + } /* Check if version of protocol matches current one */ mysql->protocol_version= net->read_pos[0]; @@ -2175,7 +2209,10 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, */ if (my_net_write(net,buff,(uint) (end-buff)) || net_flush(net)) { - set_mysql_error(mysql, CR_SERVER_LOST, unknown_sqlstate); + set_mysql_extended_error(mysql, CR_SERVER_LOST, unknown_sqlstate, + ER(CR_SERVER_LOST_EXTENDED), + "sending connection information to server", + errno); goto error; } @@ -2254,7 +2291,10 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, /* Write authentication package */ if (my_net_write(net,buff,(ulong) (end-buff)) || net_flush(net)) { - set_mysql_error(mysql, CR_SERVER_LOST, unknown_sqlstate); + set_mysql_extended_error(mysql, CR_SERVER_LOST, unknown_sqlstate, + ER(CR_SERVER_LOST_EXTENDED), + "sending authentication information", + errno); goto error; } @@ -2264,7 +2304,14 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, */ if ((pkt_length=cli_safe_read(mysql)) == packet_error) + { + if (mysql->net.last_errno == CR_SERVER_LOST) + set_mysql_extended_error(mysql, CR_SERVER_LOST, unknown_sqlstate, + ER(CR_SERVER_LOST_EXTENDED), + "reading authorization packet", + errno); goto error; + } if (pkt_length == 1 && net->read_pos[0] == 254 && mysql->server_capabilities & CLIENT_SECURE_CONNECTION) @@ -2276,12 +2323,22 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, scramble_323(buff, mysql->scramble, passwd); if (my_net_write(net, buff, SCRAMBLE_LENGTH_323 + 1) || net_flush(net)) { - set_mysql_error(mysql, CR_SERVER_LOST, unknown_sqlstate); + set_mysql_extended_error(mysql, CR_SERVER_LOST, unknown_sqlstate, + ER(CR_SERVER_LOST_EXTENDED), + "sending password information", + errno); goto error; } /* Read what server thinks about out new auth message report */ if (cli_safe_read(mysql) == packet_error) + { + if (mysql->net.last_errno == CR_SERVER_LOST) + set_mysql_extended_error(mysql, CR_SERVER_LOST, unknown_sqlstate, + ER(CR_SERVER_LOST_EXTENDED), + "reading final connect information", + errno); goto error; + } } if (client_flag & CLIENT_COMPRESS) /* We will use compression */ @@ -2292,8 +2349,15 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, goto error; #endif - if (db && mysql_select_db(mysql,db)) + if (db && mysql_select_db(mysql, db)) + { + if (mysql->net.last_errno == CR_SERVER_LOST) + set_mysql_extended_error(mysql, CR_SERVER_LOST, unknown_sqlstate, + ER(CR_SERVER_LOST_EXTENDED), + "Setting intital database", + errno); goto error; + } if (mysql->options.init_commands) { From 52fc261bcab0becc9d23f8d1e511760413a6d85d Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 23 Nov 2006 18:39:10 +0200 Subject: [PATCH 51/57] Added option --mtr-build-thread to mysql-test-run.pl Load shared libraries from zlib (fixed that mysql-test-run.pl didn't work on some Solaris boxes) Added connect timeout to test to make im_daemon_life_cycle more predictable mysql-test/mysql-test-run.pl: Added option --mtr-build-thread Load shared libraries from zlib (fixed that mysql-test-run.pl didn't work on some Solaris boxes) mysql-test/t/wait_for_socket.sh: Added connect timeout (to make test predictable) --- mysql-test/mysql-test-run.pl | 103 ++++++++++++++++++++------------ mysql-test/t/wait_for_socket.sh | 2 +- 2 files changed, 67 insertions(+), 38 deletions(-) diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index de252bff215..353674632a0 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -199,6 +199,7 @@ our $opt_client_ddd; our $opt_manual_gdb; our $opt_manual_ddd; our $opt_manual_debug; +our $opt_mtr_build_thread=0; our $opt_debugger; our $opt_client_debugger; @@ -213,6 +214,11 @@ our $clusters; our $instance_manager; +our $opt_master_myport; +our $opt_slave_myport; +our $im_port; +our $im_mysqld1_port; +our $im_mysqld2_port; our $opt_ndbcluster_port; our $opt_ndbconnectstring; our $opt_ndbcluster_port_slave; @@ -313,6 +319,7 @@ our %mysqld_variables; sub main (); sub initial_setup (); sub command_line_setup (); +sub set_mtr_build_thread_ports($); sub datadir_setup (); sub executable_setup (); sub environment_setup (); @@ -455,45 +462,17 @@ sub command_line_setup () { $opt_suite= "main"; # Special default suite my $opt_comment; - my $opt_master_myport= 9306; - my $opt_slave_myport= 9308; + $opt_master_myport= 9306; + $opt_slave_myport= 9308; $opt_ndbcluster_port= 9310; $opt_ndbcluster_port_slave= 9311; - my $im_port= 9312; - my $im_mysqld1_port= 9313; - my $im_mysqld2_port= 9314; + $im_port= 9312; + $im_mysqld1_port= 9313; + $im_mysqld2_port= 9314; - # - # To make it easier for different devs to work on the same host, - # an environment variable can be used to control all ports. A small - # number is to be used, 0 - 16 or similar. - # - # Note the MASTER_MYPORT has to be set the same in all 4.x and 5.x - # versions of this script, else a 4.0 test run might conflict with a - # 5.1 test run, even if different MTR_BUILD_THREAD is used. This means - # all port numbers might not be used in this version of the script. - # - # Also note the limiteation of ports we are allowed to hand out. This - # differs between operating systems and configuration, see - # http://www.ncftp.com/ncftpd/doc/misc/ephemeral_ports.html - # But a fairly safe range seems to be 5001 - 32767 if ( $ENV{'MTR_BUILD_THREAD'} ) { - # Up to two masters, up to three slaves - $opt_master_myport= $ENV{'MTR_BUILD_THREAD'} * 10 + 10000; # and 1 - $opt_slave_myport= $opt_master_myport + 2; # and 3 4 - $opt_ndbcluster_port= $opt_master_myport + 5; - $opt_ndbcluster_port_slave= $opt_master_myport + 6; - $im_port= $opt_master_myport + 7; - $im_mysqld1_port= $opt_master_myport + 8; - $im_mysqld2_port= $opt_master_myport + 9; - } - - if ( $opt_master_myport < 5001 or $opt_master_myport + 10 >= 32767 ) - { - mtr_error("MTR_BUILD_THREAD number results in a port", - "outside 5001 - 32767", - "($opt_master_myport - $opt_master_myport + 10)"); + set_mtr_build_thread_ports($ENV{'MTR_BUILD_THREAD'}); } # This is needed for test log evaluation in "gen-build-status-page" @@ -545,6 +524,7 @@ sub command_line_setup () { 'im-port=i' => \$im_port, # Instance Manager port. 'im-mysqld1-port=i' => \$im_mysqld1_port, # Port of mysqld, controlled by IM 'im-mysqld2-port=i' => \$im_mysqld2_port, # Port of mysqld, controlled by IM + 'mtr-build-thread=i' => \$opt_mtr_build_thread, # Test case authoring 'record' => \$opt_record, @@ -627,6 +607,15 @@ sub command_line_setup () { $glob_scriptname= basename($0); + if ($opt_mtr_build_thread != 0) + { + set_mtr_build_thread_ports($opt_mtr_build_thread) + } + elsif ($ENV{'MTR_BUILD_THREAD'}) + { + $opt_mtr_build_thread= $ENV{'MTR_BUILD_THREAD'}; + } + # We require that we are in the "mysql-test" directory # to run mysql-test-run if (! -f $glob_scriptname) @@ -775,7 +764,7 @@ sub command_line_setup () { { mtr_report("Using tmpfs in $fs"); $opt_mem= "$fs/var"; - $opt_mem .= $ENV{'MTR_BUILD_THREAD'} if $ENV{'MTR_BUILD_THREAD'}; + $opt_mem .= $opt_mtr_build_thread if $opt_mtr_build_thread; last; } } @@ -1230,6 +1219,43 @@ sub command_line_setup () { $path_snapshot= "$opt_tmpdir/snapshot_$opt_master_myport/"; } +# +# To make it easier for different devs to work on the same host, +# an environment variable can be used to control all ports. A small +# number is to be used, 0 - 16 or similar. +# +# Note the MASTER_MYPORT has to be set the same in all 4.x and 5.x +# versions of this script, else a 4.0 test run might conflict with a +# 5.1 test run, even if different MTR_BUILD_THREAD is used. This means +# all port numbers might not be used in this version of the script. +# +# Also note the limitation of ports we are allowed to hand out. This +# differs between operating systems and configuration, see +# http://www.ncftp.com/ncftpd/doc/misc/ephemeral_ports.html +# But a fairly safe range seems to be 5001 - 32767 +# + +sub set_mtr_build_thread_ports() { + my $mtr_build_thread= shift; + + # Up to two masters, up to three slaves + $opt_master_myport= $mtr_build_thread * 10 + 10000; # and 1 + $opt_slave_myport= $opt_master_myport + 2; # and 3 4 + $opt_ndbcluster_port= $opt_master_myport + 5; + $opt_ndbcluster_port_slave= $opt_master_myport + 6; + $im_port= $opt_master_myport + 7; + $im_mysqld1_port= $opt_master_myport + 8; + $im_mysqld2_port= $opt_master_myport + 9; + + if ( $opt_master_myport < 5001 or $opt_master_myport + 10 >= 32767 ) + { + mtr_error("MTR_BUILD_THREAD number results in a port", + "outside 5001 - 32767", + "($opt_master_myport - $opt_master_myport + 10)"); + } +} + + sub datadir_setup () { # Make a list of all data_dirs @@ -1576,7 +1602,8 @@ sub environment_setup () { if ( $opt_source_dist ) { push(@ld_library_paths, "$glob_basedir/libmysql/.libs/", - "$glob_basedir/libmysql_r/.libs/"); + "$glob_basedir/libmysql_r/.libs/", + "$glob_basedir/zlib.libs/"); } else { @@ -1647,7 +1674,7 @@ sub environment_setup () { $ENV{'IM_PATH_SOCK'}= $instance_manager->{path_sock}; $ENV{'IM_USERNAME'}= $instance_manager->{admin_login}; $ENV{'IM_PASSWORD'}= $instance_manager->{admin_password}; - $ENV{MTR_BUILD_THREAD}= 0 unless $ENV{MTR_BUILD_THREAD}; # Set if not set + $ENV{MTR_BUILD_THREAD}= $opt_mtr_build_thread; $ENV{'EXE_MYSQL'}= $exe_mysql; @@ -4754,6 +4781,8 @@ Options that specify ports slave_port=PORT Specify the port number used by the first slave ndbcluster-port=PORT Specify the port number used by cluster ndbcluster-port-slave=PORT Specify the port number used by slave cluster + mtr-build-thread=# Specify unique collection of ports. Can also be set by + setting the environment variable MTR_BUILD_THREAD. Options for test case authoring diff --git a/mysql-test/t/wait_for_socket.sh b/mysql-test/t/wait_for_socket.sh index 3b900fa2208..b6526b7d19c 100755 --- a/mysql-test/t/wait_for_socket.sh +++ b/mysql-test/t/wait_for_socket.sh @@ -33,7 +33,7 @@ fi ########################################################################### -client_args="--silent --socket=$socket_path " +client_args="--silent --socket=$socket_path --connect_timeout=1 " [ -n "$username" ] && client_args="$client_args --user=$username " [ -n "$password" ] && client_args="$client_args --password=$password " From 788ad30f081bc55ff97ceed78ec7ff545e25ed99 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 27 Nov 2006 01:47:38 +0200 Subject: [PATCH 52/57] Fixed a LOT of compiler warnings Added missing DBUG_RETURN statements (in mysqldump.c) Added missing enums Fixed a lot of wrong DBUG_PRINT() statements, some of which could cause crashes Removed usage of %lld and %p in printf strings as these are not portable or produces different results on different systems. client/mysqldump.c: Fixed some compiler warnings Added some missing DBUG_RETURN Remove copying of 'cluster' database client/mysqlslap.c: Fixed compiler warnings client/mysqltest.c: After merge fix extra/yassl/taocrypt/include/algebra.hpp: Removed compiler warning mysql-test/include/im_check_env.inc: Fixed race condition (mysqld1 could report 'starting' or 'online' mysql-test/mysql-test-run.pl: After merge fixes Added missing directory to LD_LIBRARY_PATH mysql-test/r/ctype_cp1250_ch.result: After merge fix mysql-test/r/im_cmd_line.result: Fixed race condition mysql-test/r/im_daemon_life_cycle.result: Fixed race condition mysql-test/r/im_instance_conf.result: Fixed race condition mysql-test/r/im_life_cycle.result: Fixed race condition mysql-test/r/im_utils.result: Fixed race condition mysql-test/r/log_tables.result: Fixed wrong result mysql-test/t/disabled.def: Disabled ndb_restore_partion, as ndb_restore_compate caused it to fail, becasue of table 'cluster/def/schema' which is stored in ndb_backup50 mysys/my_compress.c: Removed compiler warnings mysys/my_getopt.c: Ensure we always have at least one space between option name and value plugin/fulltext/plugin_example.c: Removed compiler warnings server-tools/instance-manager/mysql_connection.cc: After merge fix sql/event_data_objects.cc: Fixed compiler warnings Fixed platform compatibility issues (%lld is not portable) sql/event_data_objects.h: Fixed compiler warnings sql/event_db_repository.cc: Fixed compiler warnings sql/event_queue.cc: Fixed compiler warnings sql/event_scheduler.cc: Fixed compiler warnings sql/events.cc: Fixed compiler warnings sql/field.cc: Fixed compiler warnings sql/ha_ndbcluster.cc: Fixed compiler warnings sql/ha_ndbcluster_binlog.cc: Fixed compiler warnings sql/ha_partition.cc: Fixed compiler warnings sql/handler.cc: Fixed compiler warnings sql/item_cmpfunc.cc: Fixed DBUG_PRINT style sql/item_func.cc: Fixed compiler warnings sql/log.cc: Fixed compiler warnings sql/log_event.cc: Fixed compiler warnings sql/mysqld.cc: Fixed compiler warnings sql/opt_range.cc: Fixed compiler warnings sql/repl_failsafe.cc: Indentation fixes sql/rpl_rli.cc: Fixed compiler warnings sql/rpl_tblmap.cc: Fixed compiler warnings sql/set_var.cc: Fixed compiler warnings sql/slave.cc: Fixed compiler warnings sql/sp_head.cc: Fixed compiler warnings sql/sql_base.cc: Fixed compiler warnings Fixed indentation sql/sql_binlog.cc: Fixed compiler warnings sql/sql_cache.cc: Fixed compiler warnings sql/sql_class.cc: Fixed compiler warnings sql/sql_handler.cc: Fixed compiler warnings sql/sql_lex.cc: Fixed compiler warnings sql/sql_parse.cc: Fixed compiler warnings sql/sql_partition.cc: Fixed compiler warnings sql/sql_prepare.cc: Fixed compiler warnings sql/sql_table.cc: Fixed compiler warnings sql/sql_test.cc: Fixed DBUG_PRINT style sql/sql_trigger.cc: Fixed DBUG_PRINT style sql/table.cc: Fixed compiler warnings storage/federated/ha_federated.cc: Fixed compiler warnings storage/myisam/mi_rsamepos.c: Fixed compiler warnings storage/ndb/include/ndb_global.h.in: After merge fix storage/ndb/include/util/NdbOut.hpp: Inform gcc that ndbout_c takes a printf() string as argument storage/ndb/include/util/SimpleProperties.hpp: After merge fixes storage/ndb/src/kernel/blocks/backup/Backup.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp: Fixed compiler warnings Fixed usage of uninitialized value (Got help from Jonas with patch) storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/lgman.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/pgman.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/restore.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/suma/Suma.cpp: Fixed compiler warnings Added missing enum's to switch storage/ndb/src/kernel/vm/Configuration.cpp: Fixed compiler warnings storage/ndb/src/kernel/vm/DLHashTable.hpp: Fixed compiler warnings storage/ndb/src/kernel/vm/RWPool.hpp: Fixed compiler warnings storage/ndb/src/kernel/vm/SimulatedBlock.cpp: Fixed compiler warnings storage/ndb/src/kernel/vm/WOPool.hpp: Fixed compiler warnings storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp: Fixed compiler warnings storage/ndb/src/mgmclient/CommandInterpreter.cpp: Fixed compiler warnings storage/ndb/src/mgmsrv/MgmtSrvr.cpp: Fixed compiler warnings storage/ndb/src/ndbapi/DictCache.cpp: Fixed compiler warnings storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp: Fixed compiler warnings storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp: Fixed compiler warnings storage/ndb/src/ndbapi/NdbIndexOperation.cpp: Fixed compiler warnings storage/ndb/src/ndbapi/NdbIndexStat.cpp: Initialize possible uninitialized variable storage/ndb/src/ndbapi/NdbOperationInt.cpp: Fixed compiler warnings storage/ndb/src/ndbapi/NdbRecAttr.cpp: Added missing enum's (To avoid compiler warnings) storage/ndb/src/ndbapi/NdbScanOperation.cpp: Fixed compiler warnings storage/ndb/src/ndbapi/ObjectMap.hpp: Fixed compiler warnings storage/ndb/tools/desc.cpp: Fixed compiler warnings storage/ndb/tools/restore/Restore.cpp: Fixed compiler warnings storage/ndb/tools/restore/consumer_restore.cpp: Fixed compiler warnings unittest/mytap/t/basic-t.c: Fixed compiler warnings unittest/mytap/tap.c: Fixed compiler warnings --- client/mysqldump.c | 32 +++--- client/mysqlslap.c | 2 +- client/mysqltest.c | 2 +- extra/yassl/taocrypt/include/algebra.hpp | 2 +- mysql-test/include/im_check_env.inc | 1 + mysql-test/mysql-test-run.pl | 12 +- mysql-test/r/ctype_cp1250_ch.result | 1 + mysql-test/r/im_cmd_line.result | 2 +- mysql-test/r/im_daemon_life_cycle.result | 2 +- mysql-test/r/im_instance_conf.result | 2 +- mysql-test/r/im_life_cycle.result | 2 +- mysql-test/r/im_utils.result | 2 +- mysql-test/r/log_tables.result | 1 + mysql-test/t/disabled.def | 3 + mysys/my_compress.c | 13 ++- mysys/my_getopt.c | 4 +- plugin/fulltext/plugin_example.c | 4 +- .../instance-manager/mysql_connection.cc | 1 - sql/event_data_objects.cc | 67 +++++------ sql/event_data_objects.h | 4 +- sql/event_db_repository.cc | 2 +- sql/event_queue.cc | 46 ++++---- sql/event_scheduler.cc | 21 ++-- sql/events.cc | 2 +- sql/field.cc | 4 +- sql/ha_ndbcluster.cc | 108 +++++++++--------- sql/ha_ndbcluster_binlog.cc | 98 ++++++++-------- sql/ha_partition.cc | 6 +- sql/handler.cc | 14 ++- sql/item_cmpfunc.cc | 2 +- sql/item_func.cc | 2 +- sql/log.cc | 41 ++++--- sql/log_event.cc | 32 +++--- sql/mysqld.cc | 2 +- sql/opt_range.cc | 2 +- sql/repl_failsafe.cc | 4 +- sql/rpl_rli.cc | 4 +- sql/rpl_tblmap.cc | 16 +-- sql/set_var.cc | 2 +- sql/slave.cc | 14 +-- sql/sp_head.cc | 2 +- sql/sql_base.cc | 25 ++-- sql/sql_binlog.cc | 16 +-- sql/sql_cache.cc | 6 +- sql/sql_class.cc | 6 +- sql/sql_handler.cc | 7 +- sql/sql_lex.cc | 2 +- sql/sql_parse.cc | 4 +- sql/sql_partition.cc | 2 +- sql/sql_prepare.cc | 15 +-- sql/sql_table.cc | 2 +- sql/sql_test.cc | 9 +- sql/sql_trigger.cc | 4 +- sql/table.cc | 6 +- storage/federated/ha_federated.cc | 2 +- storage/myisam/mi_rsamepos.c | 3 +- storage/ndb/include/ndb_global.h.in | 1 + storage/ndb/include/util/NdbOut.hpp | 2 +- storage/ndb/include/util/SimpleProperties.hpp | 5 - .../ndb/src/kernel/blocks/backup/Backup.cpp | 2 +- storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp | 8 +- .../ndb/src/kernel/blocks/dbacc/DbaccMain.cpp | 18 +-- .../ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 2 +- .../ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 14 ++- .../ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 6 +- .../ndb/src/kernel/blocks/dbtc/DbtcMain.cpp | 2 +- .../src/kernel/blocks/dbtup/DbtupCommit.cpp | 2 +- .../kernel/blocks/dbtup/DbtupDiskAlloc.cpp | 6 +- .../kernel/blocks/dbtup/DbtupExecQuery.cpp | 2 +- .../src/kernel/blocks/dbtup/DbtupFixAlloc.cpp | 1 + storage/ndb/src/kernel/blocks/lgman.cpp | 4 +- storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp | 4 +- storage/ndb/src/kernel/blocks/pgman.cpp | 4 +- storage/ndb/src/kernel/blocks/restore.cpp | 2 +- storage/ndb/src/kernel/blocks/suma/Suma.cpp | 5 +- storage/ndb/src/kernel/vm/Configuration.cpp | 2 +- storage/ndb/src/kernel/vm/DLHashTable.hpp | 1 + storage/ndb/src/kernel/vm/RWPool.hpp | 1 + storage/ndb/src/kernel/vm/SimulatedBlock.cpp | 1 + storage/ndb/src/kernel/vm/WOPool.hpp | 1 + .../ndb/src/kernel/vm/ndbd_malloc_impl.cpp | 4 + .../ndb/src/mgmclient/CommandInterpreter.cpp | 2 + storage/ndb/src/mgmsrv/MgmtSrvr.cpp | 4 +- storage/ndb/src/ndbapi/DictCache.cpp | 2 +- storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp | 10 +- .../ndb/src/ndbapi/NdbEventOperationImpl.cpp | 33 +++--- storage/ndb/src/ndbapi/NdbIndexOperation.cpp | 3 + storage/ndb/src/ndbapi/NdbIndexStat.cpp | 2 +- storage/ndb/src/ndbapi/NdbOperationInt.cpp | 24 ++-- storage/ndb/src/ndbapi/NdbRecAttr.cpp | 7 +- storage/ndb/src/ndbapi/NdbScanOperation.cpp | 5 +- storage/ndb/src/ndbapi/ObjectMap.hpp | 10 +- storage/ndb/tools/desc.cpp | 12 +- storage/ndb/tools/restore/Restore.cpp | 6 + .../ndb/tools/restore/consumer_restore.cpp | 6 +- unittest/mytap/t/basic-t.c | 2 +- unittest/mytap/tap.c | 1 + 97 files changed, 504 insertions(+), 417 deletions(-) diff --git a/client/mysqldump.c b/client/mysqldump.c index 3e17e09b5f3..f812764b97c 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -30,14 +30,14 @@ ** master/autocommit code by Brian Aker ** SSL by ** Andrei Errapart -** Tõnu Samuel +** Tõnu Samuel ** XML by Gary Huntress 10/10/01, cleaned up ** and adapted to mysqldump 05/11/01 by Jani Tolonen ** Added --single-transaction option 06/06/2002 by Peter Zaitsev ** 10 Jun 2003: SET NAMES and --no-set-names by Alexander Barkov */ -#define DUMP_VERSION "10.11" +#define DUMP_VERSION "10.12" #include #include @@ -540,8 +540,10 @@ static void write_header(FILE *sql_file, char *db_name) if (opt_xml) { fputs("\n", sql_file); - /* Schema reference. Allows use of xsi:nil for NULL values and - xsi:type to define an element's data type. */ + /* + Schema reference. Allows use of xsi:nil for NULL values and + xsi:type to define an element's data type. + */ fputs("\n", md_result_file); } @@ -3155,10 +3157,8 @@ static int dump_all_tables_in_db(char *database) afterdot= strmov(hash_key, database); *afterdot++= '.'; - if (!strcmp(database, NDB_REP_DB)) /* Skip cluster internal database */ - return 0; if (init_dumping(database, init_dumping_tables)) - return 1; + DBUG_RETURN(1); if (opt_xml) print_xml_tag(md_result_file, "", "\n", "database", "name=", database, NullS); if (lock_tables) @@ -3218,7 +3218,7 @@ static int dump_all_tables_in_db(char *database) fprintf(md_result_file,"\n--\n-- Flush Grant Tables \n--\n"); fprintf(md_result_file,"\n/*! FLUSH PRIVILEGES */;\n"); } - return 0; + DBUG_RETURN(0); } /* dump_all_tables_in_db */ diff --git a/client/mysqlslap.c b/client/mysqlslap.c index 9c8585915a9..c2d3a5866e0 100644 --- a/client/mysqlslap.c +++ b/client/mysqlslap.c @@ -1031,7 +1031,7 @@ run_scheduler(stats *sptr, statement *stmts, uint concur, ulonglong limit) for (x= 0; x < concur; x++) { int pid; - DBUG_PRINT("info", ("x %d concurrency %d", x, concurrency)); + DBUG_PRINT("info", ("x: %d concurrency: %u", x, *concurrency)); pid= fork(); switch(pid) { diff --git a/client/mysqltest.c b/client/mysqltest.c index 8c985326e82..b46a337959a 100644 --- a/client/mysqltest.c +++ b/client/mysqltest.c @@ -80,7 +80,7 @@ enum { OPT_SSL_CA, OPT_SSL_CAPATH, OPT_SSL_CIPHER, OPT_PS_PROTOCOL, OPT_SP_PROTOCOL, OPT_CURSOR_PROTOCOL, OPT_VIEW_PROTOCOL, OPT_SSL_VERIFY_SERVER_CERT, OPT_MAX_CONNECT_RETRIES, - OPT_MARK_PROGRESS, OPT_CHARSETS_DIR, OPT_LOG_DIR, OPT_DEBUG_INFO}; + OPT_MARK_PROGRESS, OPT_CHARSETS_DIR, OPT_LOG_DIR, OPT_DEBUG_INFO }; static int record= 0, opt_sleep= -1; diff --git a/extra/yassl/taocrypt/include/algebra.hpp b/extra/yassl/taocrypt/include/algebra.hpp index 07fc405f093..535ce2599c4 100644 --- a/extra/yassl/taocrypt/include/algebra.hpp +++ b/extra/yassl/taocrypt/include/algebra.hpp @@ -75,7 +75,7 @@ public: typedef Integer Element; AbstractRing() : AbstractGroup() {m_mg.m_pRing = this;} - AbstractRing(const AbstractRing &source) {m_mg.m_pRing = this;} + AbstractRing(const AbstractRing &source) :AbstractGroup() {m_mg.m_pRing = this;} AbstractRing& operator=(const AbstractRing &source) {return *this;} virtual bool IsUnit(const Element &a) const =0; diff --git a/mysql-test/include/im_check_env.inc b/mysql-test/include/im_check_env.inc index 019e0984614..883e5d00fe4 100644 --- a/mysql-test/include/im_check_env.inc +++ b/mysql-test/include/im_check_env.inc @@ -22,4 +22,5 @@ SHOW VARIABLES LIKE 'server_id'; # Check that IM understands that mysqld1 is online, while mysqld2 is # offline. +--replace_result starting XXXXX online XXXXX SHOW INSTANCES; diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index b7c2d247ca9..809d9d9d168 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -1576,7 +1576,8 @@ sub environment_setup () { if ( $opt_source_dist ) { push(@ld_library_paths, "$glob_basedir/libmysql/.libs/", - "$glob_basedir/libmysql_r/.libs/"); + "$glob_basedir/libmysql_r/.libs/", + "$glob_basedir/zlib.libs/"); } else { @@ -2992,10 +2993,6 @@ sub do_after_run_mysqltest($) # Save info from this testcase run to mysqltest.log mtr_appendfile_to_file($path_timefile, $path_mysqltest_log) if -f $path_timefile; - - # Remove the file that mysqltest writes info to - unlink($path_timefile); - } @@ -3183,6 +3180,9 @@ sub run_testcase ($) { } } + # Remove the file that mysqltest writes info to + unlink($path_timefile); + # ---------------------------------------------------------------------- # Stop Instance Manager if we are processing an IM-test case. # ---------------------------------------------------------------------- @@ -4094,7 +4094,6 @@ sub run_testcase_start_servers($) { } if ( $clusters->[0]->{'pid'} and ! $master->[1]->{'pid'} ) - { { # Test needs cluster, start an extra mysqld connected to cluster @@ -4848,4 +4847,3 @@ HERE mtr_exit(1); } - diff --git a/mysql-test/r/ctype_cp1250_ch.result b/mysql-test/r/ctype_cp1250_ch.result index 73f415732cd..4b02fa2182a 100644 --- a/mysql-test/r/ctype_cp1250_ch.result +++ b/mysql-test/r/ctype_cp1250_ch.result @@ -1,3 +1,4 @@ +drop table if exists t1; DROP TABLE IF EXISTS t1; SHOW COLLATION LIKE 'cp1250_czech_cs'; Collation Charset Id Default Compiled Sortlen diff --git a/mysql-test/r/im_cmd_line.result b/mysql-test/r/im_cmd_line.result index a4c21c36415..a862d465904 100644 --- a/mysql-test/r/im_cmd_line.result +++ b/mysql-test/r/im_cmd_line.result @@ -3,7 +3,7 @@ Variable_name Value server_id 1 SHOW INSTANCES; instance_name state -mysqld1 starting +mysqld1 XXXXX mysqld2 offline --> Listing users... im_admin diff --git a/mysql-test/r/im_daemon_life_cycle.result b/mysql-test/r/im_daemon_life_cycle.result index 397f4d5d503..4fe15460ae5 100644 --- a/mysql-test/r/im_daemon_life_cycle.result +++ b/mysql-test/r/im_daemon_life_cycle.result @@ -3,7 +3,7 @@ Variable_name Value server_id 1 SHOW INSTANCES; instance_name state -mysqld1 online +mysqld1 XXXXX mysqld2 offline Killing the process... Sleeping... diff --git a/mysql-test/r/im_instance_conf.result b/mysql-test/r/im_instance_conf.result index 597a1be428e..d04ae0270ab 100644 --- a/mysql-test/r/im_instance_conf.result +++ b/mysql-test/r/im_instance_conf.result @@ -3,7 +3,7 @@ Variable_name Value server_id 1 SHOW INSTANCES; instance_name state -mysqld1 online +mysqld1 XXXXX mysqld2 offline -------------------------------------------------------------------- server_id = 1 diff --git a/mysql-test/r/im_life_cycle.result b/mysql-test/r/im_life_cycle.result index c403411c399..fb25f4a0768 100644 --- a/mysql-test/r/im_life_cycle.result +++ b/mysql-test/r/im_life_cycle.result @@ -3,7 +3,7 @@ Variable_name Value server_id 1 SHOW INSTANCES; instance_name state -mysqld1 online +mysqld1 XXXXX mysqld2 offline -------------------------------------------------------------------- diff --git a/mysql-test/r/im_utils.result b/mysql-test/r/im_utils.result index 6e40c9bb1c0..b7c68965ada 100644 --- a/mysql-test/r/im_utils.result +++ b/mysql-test/r/im_utils.result @@ -3,7 +3,7 @@ Variable_name Value server_id 1 SHOW INSTANCES; instance_name state -mysqld1 online +mysqld1 XXXXX mysqld2 offline SHOW INSTANCE OPTIONS mysqld1; option_name value diff --git a/mysql-test/r/log_tables.result b/mysql-test/r/log_tables.result index 2836b96d7b5..d9d754c91e6 100644 --- a/mysql-test/r/log_tables.result +++ b/mysql-test/r/log_tables.result @@ -280,6 +280,7 @@ create table general_log_new like general_log; create table slow_log_new like slow_log; show tables like "%log%"; Tables_in_mysql (%log%) +binlog_index general_log general_log_new slow_log diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index 0d3b7cdfdeb..c21f3661667 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -16,6 +16,9 @@ concurrent_innodb : BUG#21579 2006-08-11 mleich innodb_concurrent random ndb_autodiscover : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog ndb_autodiscover2 : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog ndb_load : BUG#17233 2006-05-04 tomas failed load data from infile causes mysqld dbug_assert, binlog not flushed +ndb_restore_partition : Problem with cluster/def/schema table that is in std_data/ndb_backup51; Pekka will schdule this to someone +rpl_ndb_sync : Problem with cluster/def/schema table that is in std_data/ndb_backup51; Pekka will schdule this to someone + partition_03ndb : BUG#16385 2006-03-24 mikael Partitions: crash when updating a range partitioned NDB table ps_7ndb : BUG#18950 2006-02-16 jmiller create table like does not obtain LOCK_open rpl_ndb_2innodb : BUG#19227 2006-04-20 pekka pk delete apparently not replicated diff --git a/mysys/my_compress.c b/mysys/my_compress.c index 2643d4d16ac..713489e7c2f 100644 --- a/mysys/my_compress.c +++ b/mysys/my_compress.c @@ -138,14 +138,14 @@ int packfrm(const void *data, uint len, uint blob_len; struct frm_blob_struct *blob; DBUG_ENTER("packfrm"); - DBUG_PRINT("enter", ("data: %x, len: %d", data, len)); + DBUG_PRINT("enter", ("data: 0x%lx, len: %d", (long) data, len)); error= 1; org_len= len; if (my_compress((byte*)data, &org_len, &comp_len)) goto err; - DBUG_PRINT("info", ("org_len: %d, comp_len: %d", org_len, comp_len)); + DBUG_PRINT("info", ("org_len: %lu comp_len: %lu", org_len, comp_len)); DBUG_DUMP("compressed", (char*)data, org_len); error= 2; @@ -165,7 +165,8 @@ int packfrm(const void *data, uint len, *pack_len= blob_len; error= 0; - DBUG_PRINT("exit", ("pack_data: %x, pack_len: %d", *pack_data, *pack_len)); + DBUG_PRINT("exit", ("pack_data: 0x%lx pack_len: %d", + (long) *pack_data, *pack_len)); err: DBUG_RETURN(error); @@ -194,13 +195,13 @@ int unpackfrm(const void **unpack_data, uint *unpack_len, byte *data; ulong complen, orglen, ver; DBUG_ENTER("unpackfrm"); - DBUG_PRINT("enter", ("pack_data: %x", pack_data)); + DBUG_PRINT("enter", ("pack_data: 0x%lx", (long) pack_data)); complen= uint4korr((char*)&blob->head.complen); orglen= uint4korr((char*)&blob->head.orglen); ver= uint4korr((char*)&blob->head.ver); - DBUG_PRINT("blob",("ver: %d complen: %d orglen: %d", + DBUG_PRINT("blob",("ver: %lu complen: %lu orglen: %lu", ver,complen,orglen)); DBUG_DUMP("blob->data", (char*) blob->data, complen); @@ -220,7 +221,7 @@ int unpackfrm(const void **unpack_data, uint *unpack_len, *unpack_data= data; *unpack_len= complen; - DBUG_PRINT("exit", ("frmdata: %x, len: %d", *unpack_data, *unpack_len)); + DBUG_PRINT("exit", ("frmdata: 0x%lx len: %d", (long) *unpack_data, *unpack_len)); DBUG_RETURN(0); } #endif /* HAVE_COMPRESS */ diff --git a/mysys/my_getopt.c b/mysys/my_getopt.c index 4de2984d9b9..13524ce4e13 100644 --- a/mysys/my_getopt.c +++ b/mysys/my_getopt.c @@ -936,8 +936,8 @@ void my_print_variables(const struct my_option *options) (*getopt_get_addr)("", 0, optp) : optp->value); if (value) { - printf("%s", optp->name); - length= (uint) strlen(optp->name); + printf("%s ", optp->name); + length= (uint) strlen(optp->name)+1; for (; length < name_space; length++) putchar(' '); switch ((optp->var_type & GET_TYPE_MASK)) { diff --git a/plugin/fulltext/plugin_example.c b/plugin/fulltext/plugin_example.c index f09462f2d1a..4194f1df689 100644 --- a/plugin/fulltext/plugin_example.c +++ b/plugin/fulltext/plugin_example.c @@ -62,7 +62,7 @@ static long number_of_calls= 0; /* for SHOW STATUS, see below */ 1 failure (cannot happen) */ -static int simple_parser_plugin_init(void) +static int simple_parser_plugin_init(void *arg __attribute__((unused))) { return(0); } @@ -81,7 +81,7 @@ static int simple_parser_plugin_init(void) */ -static int simple_parser_plugin_deinit(void) +static int simple_parser_plugin_deinit(void *arg __attribute__((unused))) { return(0); } diff --git a/server-tools/instance-manager/mysql_connection.cc b/server-tools/instance-manager/mysql_connection.cc index 87c62fc4371..4ffee83ddc3 100644 --- a/server-tools/instance-manager/mysql_connection.cc +++ b/server-tools/instance-manager/mysql_connection.cc @@ -334,7 +334,6 @@ int Mysql_connection_thread::dispatch_command(enum enum_server_command command, case COM_QUERY: { log_info("query for connection %lu : ----\n%s\n-------------------------", - log_info("query for connection %d : ----\n%s\n-------------------------", connection_id,packet); if (Command *command= parse_command(&instance_map, packet)) { diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc index afd10350bb5..397688d3bff 100644 --- a/sql/event_data_objects.cc +++ b/sql/event_data_objects.cc @@ -124,8 +124,8 @@ void Event_parse_data::init_body(THD *thd) { DBUG_ENTER("Event_parse_data::init_body"); - DBUG_PRINT("info", ("body=[%s] body_begin=0x%lx end=0x%lx", body_begin, - body_begin, thd->lex->ptr)); + DBUG_PRINT("info", ("body: '%s' body_begin: 0x%lx end: 0x%lx", body_begin, + (long) body_begin, (long) thd->lex->ptr)); body.length= thd->lex->ptr - body_begin; const uchar *body_end= body_begin + body.length - 1; @@ -399,8 +399,9 @@ Event_parse_data::init_starts(THD *thd) thd->variables.time_zone->gmt_sec_to_TIME(&time_tmp, (my_time_t) thd->query_start()); - DBUG_PRINT("info",("now =%lld", TIME_to_ulonglong_datetime(&time_tmp))); - DBUG_PRINT("info",("starts=%lld", TIME_to_ulonglong_datetime(<ime))); + DBUG_PRINT("info",("now: %ld starts: %ld", + (long) TIME_to_ulonglong_datetime(&time_tmp), + (long) TIME_to_ulonglong_datetime(<ime))); if (TIME_to_ulonglong_datetime(<ime) < TIME_to_ulonglong_datetime(&time_tmp)) goto wrong_value; @@ -536,8 +537,9 @@ Event_parse_data::check_parse_data(THD *thd) { bool ret; DBUG_ENTER("Event_parse_data::check_parse_data"); - DBUG_PRINT("info", ("execute_at=0x%lx expr=0x%lx starts=0x%lx ends=0x%lx", - item_execute_at, item_expression, item_starts, item_ends)); + DBUG_PRINT("info", ("execute_at: 0x%lx expr=0x%lx starts=0x%lx ends=0x%lx", + (long) item_execute_at, (long) item_expression, + (long) item_starts, (long) item_ends)); init_name(thd, identifier); @@ -564,9 +566,9 @@ Event_parse_data::init_definer(THD *thd) int definer_host_len; DBUG_ENTER("Event_parse_data::init_definer"); - DBUG_PRINT("info",("init definer_user thd->mem_root=0x%lx " - "thd->sec_ctx->priv_user=0x%lx", thd->mem_root, - thd->security_ctx->priv_user)); + DBUG_PRINT("info",("init definer_user thd->mem_root: 0x%lx " + "thd->sec_ctx->priv_user: 0x%lx", (long) thd->mem_root, + (long) thd->security_ctx->priv_user)); definer_user_len= strlen(thd->security_ctx->priv_user); definer_host_len= strlen(thd->security_ctx->priv_host); @@ -1032,8 +1034,9 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec, TIME tmp; longlong months=0, seconds=0; DBUG_ENTER("get_next_time"); - DBUG_PRINT("enter", ("start=%llu now=%llu", TIME_to_ulonglong_datetime(start), - TIME_to_ulonglong_datetime(time_now))); + DBUG_PRINT("enter", ("start: %lu now: %lu", + (long) TIME_to_ulonglong_datetime(start), + (long) TIME_to_ulonglong_datetime(time_now))); bzero(&interval, sizeof(interval)); @@ -1081,7 +1084,7 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec, case INTERVAL_LAST: DBUG_ASSERT(0); } - DBUG_PRINT("info", ("seconds=%ld months=%ld", seconds, months)); + DBUG_PRINT("info", ("seconds: %ld months: %ld", (long) seconds, (long) months)); if (seconds) { longlong seconds_diff; @@ -1099,14 +1102,14 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec, event two times for the same time get the next exec if the modulus is not */ - DBUG_PRINT("info", ("multiplier=%d", multiplier)); + DBUG_PRINT("info", ("multiplier: %d", multiplier)); if (seconds_diff % seconds || (!seconds_diff && last_exec->year) || TIME_to_ulonglong_datetime(time_now) == TIME_to_ulonglong_datetime(last_exec)) ++multiplier; interval.second= seconds * multiplier; - DBUG_PRINT("info", ("multiplier=%u interval.second=%u", multiplier, - interval.second)); + DBUG_PRINT("info", ("multiplier: %lu interval.second: %lu", (ulong) multiplier, + (ulong) interval.second)); tmp= *start; if (!(ret= date_add_interval(&tmp, INTERVAL_SECOND, interval))) *next= tmp; @@ -1158,7 +1161,7 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec, } done: - DBUG_PRINT("info", ("next=%llu", TIME_to_ulonglong_datetime(next))); + DBUG_PRINT("info", ("next: %lu", (long) TIME_to_ulonglong_datetime(next))); DBUG_RETURN(ret); } @@ -1183,17 +1186,17 @@ Event_queue_element::compute_next_execution_time() { TIME time_now; int tmp; - DBUG_ENTER("Event_queue_element::compute_next_execution_time"); - DBUG_PRINT("enter", ("starts=%llu ends=%llu last_executed=%llu this=0x%lx", - TIME_to_ulonglong_datetime(&starts), - TIME_to_ulonglong_datetime(&ends), - TIME_to_ulonglong_datetime(&last_executed), this)); + DBUG_PRINT("enter", ("starts: %lu ends: %lu last_executed: %lu this: 0x%lx", + (long) TIME_to_ulonglong_datetime(&starts), + (long) TIME_to_ulonglong_datetime(&ends), + (long) TIME_to_ulonglong_datetime(&last_executed), + (long) this)); if (status == Event_queue_element::DISABLED) { DBUG_PRINT("compute_next_execution_time", - ("Event %s is DISABLED", name.str)); + ("Event %s is DISABLED", name.str)); goto ret; } /* If one-time, no need to do computation */ @@ -1203,9 +1206,9 @@ Event_queue_element::compute_next_execution_time() if (last_executed.year) { DBUG_PRINT("info",("One-time event %s.%s of was already executed", - dbname.str, name.str, definer.str)); + dbname.str, name.str)); dropped= (on_completion == Event_queue_element::ON_COMPLETION_DROP); - DBUG_PRINT("info",("One-time event will be dropped=%d.", dropped)); + DBUG_PRINT("info",("One-time event will be dropped: %d.", dropped)); status= Event_queue_element::DISABLED; status_changed= TRUE; @@ -1226,7 +1229,7 @@ Event_queue_element::compute_next_execution_time() execute_at_null= TRUE; if (on_completion == Event_queue_element::ON_COMPLETION_DROP) dropped= TRUE; - DBUG_PRINT("info", ("Dropped=%d", dropped)); + DBUG_PRINT("info", ("Dropped: %d", dropped)); status= Event_queue_element::DISABLED; status_changed= TRUE; @@ -1400,8 +1403,8 @@ Event_queue_element::compute_next_execution_time() goto ret; } ret: - DBUG_PRINT("info", ("ret=0 execute_at=%llu", - TIME_to_ulonglong_datetime(&execute_at))); + DBUG_PRINT("info", ("ret: 0 execute_at: %lu", + (long) TIME_to_ulonglong_datetime(&execute_at))); DBUG_RETURN(FALSE); err: DBUG_PRINT("info", ("ret=1")); @@ -1688,7 +1691,7 @@ done: thd->end_statement(); thd->cleanup_after_query(); - DBUG_PRINT("info", ("EXECUTED %s.%s ret=%d", dbname.str, name.str, ret)); + DBUG_PRINT("info", ("EXECUTED %s.%s ret: %d", dbname.str, name.str, ret)); DBUG_RETURN(ret); } @@ -1752,7 +1755,7 @@ Event_job_data::compile(THD *thd, MEM_ROOT *mem_root) thd->update_charset(); - DBUG_PRINT("info",("old_sql_mode=%d new_sql_mode=%d",old_sql_mode, sql_mode)); + DBUG_PRINT("info",("old_sql_mode: %lu new_sql_mode: %lu",old_sql_mode, sql_mode)); thd->variables.sql_mode= this->sql_mode; /* Change the memory root for the execution time */ if (mem_root) @@ -1769,7 +1772,7 @@ Event_job_data::compile(THD *thd, MEM_ROOT *mem_root) thd->query= show_create.c_ptr_safe(); thd->query_length= show_create.length(); - DBUG_PRINT("info", ("query:%s",thd->query)); + DBUG_PRINT("info", ("query: %s",thd->query)); event_change_security_context(thd, definer_user, definer_host, dbname, &save_ctx); @@ -1777,14 +1780,14 @@ Event_job_data::compile(THD *thd, MEM_ROOT *mem_root) mysql_init_query(thd, (uchar*) thd->query, thd->query_length); if (MYSQLparse((void *)thd) || thd->is_fatal_error) { - DBUG_PRINT("error", ("error during compile or thd->is_fatal_error=%d", + DBUG_PRINT("error", ("error during compile or thd->is_fatal_error: %d", thd->is_fatal_error)); /* Free lex associated resources QQ: Do we really need all this stuff here? */ sql_print_error("SCHEDULER: Error during compilation of %s.%s or " - "thd->is_fatal_error=%d", + "thd->is_fatal_error: %d", dbname.str, name.str, thd->is_fatal_error); lex.unit.cleanup(); diff --git a/sql/event_data_objects.h b/sql/event_data_objects.h index e7e96d299fb..2da39c2158b 100644 --- a/sql/event_data_objects.h +++ b/sql/event_data_objects.h @@ -111,14 +111,14 @@ public: void *p; DBUG_ENTER("Event_queue_element::new(size)"); p= my_malloc(size, MYF(0)); - DBUG_PRINT("info", ("alloc_ptr=0x%lx", p)); + DBUG_PRINT("info", ("alloc_ptr: 0x%lx", (long) p)); DBUG_RETURN(p); } static void operator delete(void *ptr, size_t size) { DBUG_ENTER("Event_queue_element::delete(ptr,size)"); - DBUG_PRINT("enter", ("free_ptr=0x%lx", ptr)); + DBUG_PRINT("enter", ("free_ptr: 0x%lx", (long) ptr)); TRASH(ptr, size); my_free((gptr) ptr, MYF(0)); DBUG_VOID_RETURN; diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc index 3d30aff669b..367c5bae579 100644 --- a/sql/event_db_repository.cc +++ b/sql/event_db_repository.cc @@ -958,7 +958,7 @@ Event_db_repository::load_named_event(THD *thd, LEX_STRING dbname, Open_tables_state backup; DBUG_ENTER("Event_db_repository::load_named_event"); - DBUG_PRINT("enter",("thd=0x%lx name:%*s",thd, name.length, name.str)); + DBUG_PRINT("enter",("thd: 0x%lx name: %*s", (long) thd, name.length, name.str)); thd->reset_n_backup_open_tables_state(&backup); diff --git a/sql/event_queue.cc b/sql/event_queue.cc index 527a59018a8..7ec665fcd5f 100644 --- a/sql/event_queue.cc +++ b/sql/event_queue.cc @@ -143,7 +143,7 @@ Event_queue::init_queue(THD *thd, Event_db_repository *db_repo) struct event_queue_param *event_queue_param_value= NULL; DBUG_ENTER("Event_queue::init_queue"); - DBUG_PRINT("enter", ("this=0x%lx", this)); + DBUG_PRINT("enter", ("this: 0x%lx", (long) this)); LOCK_QUEUE_DATA(); db_repository= db_repo; @@ -218,7 +218,7 @@ Event_queue::create_event(THD *thd, LEX_STRING dbname, LEX_STRING name) int res; Event_queue_element *new_element; DBUG_ENTER("Event_queue::create_event"); - DBUG_PRINT("enter", ("thd=0x%lx et=%s.%s",thd, dbname.str, name.str)); + DBUG_PRINT("enter", ("thd: 0x%lx et=%s.%s", (long) thd, dbname.str, name.str)); new_element= new Event_queue_element(); res= db_repository->load_named_event(thd, dbname, name, new_element); @@ -229,7 +229,7 @@ Event_queue::create_event(THD *thd, LEX_STRING dbname, LEX_STRING name) new_element->compute_next_execution_time(); LOCK_QUEUE_DATA(); - DBUG_PRINT("info", ("new event in the queue 0x%lx", new_element)); + DBUG_PRINT("info", ("new event in the queue: 0x%lx", (long) new_element)); queue_insert_safe(&queue, (byte *) new_element); dbug_dump_queue(thd->query_start()); pthread_cond_broadcast(&COND_queue_state); @@ -264,7 +264,7 @@ Event_queue::update_event(THD *thd, LEX_STRING dbname, LEX_STRING name, Event_queue_element *new_element; DBUG_ENTER("Event_queue::update_event"); - DBUG_PRINT("enter", ("thd=0x%lx et=[%s.%s]", thd, dbname.str, name.str)); + DBUG_PRINT("enter", ("thd: 0x%lx et=[%s.%s]", (long) thd, dbname.str, name.str)); new_element= new Event_queue_element(); @@ -294,7 +294,7 @@ Event_queue::update_event(THD *thd, LEX_STRING dbname, LEX_STRING name, /* If not disabled event */ if (new_element) { - DBUG_PRINT("info", ("new event in the Q 0x%lx", new_element)); + DBUG_PRINT("info", ("new event in the queue: 0x%lx", (long) new_element)); queue_insert_safe(&queue, (byte *) new_element); pthread_cond_broadcast(&COND_queue_state); } @@ -322,7 +322,8 @@ void Event_queue::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name) { DBUG_ENTER("Event_queue::drop_event"); - DBUG_PRINT("enter", ("thd=0x%lx db=%s name=%s", thd, dbname.str, name.str)); + DBUG_PRINT("enter", ("thd: 0x%lx db :%s name: %s", (long) thd, + dbname.str, name.str)); LOCK_QUEUE_DATA(); find_n_remove_event(dbname, name); @@ -484,7 +485,7 @@ Event_queue::load_events_from_db(THD *thd) bool clean_the_queue= TRUE; DBUG_ENTER("Event_queue::load_events_from_db"); - DBUG_PRINT("enter", ("thd=0x%lx", thd)); + DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd)); if ((ret= db_repository->open_event_table(thd, TL_READ, &table))) { @@ -555,7 +556,6 @@ Event_queue::load_events_from_db(THD *thd) goto end; } - DBUG_PRINT("load_events_from_db", ("Adding 0x%lx to the exec list.")); queue_insert_safe(&queue, (byte *) et); count++; } @@ -663,16 +663,20 @@ Event_queue::dbug_dump_queue(time_t now) for (i = 0; i < queue.elements; i++) { et= ((Event_queue_element*)queue_element(&queue, i)); - DBUG_PRINT("info",("et=0x%lx db=%s name=%s",et, et->dbname.str, et->name.str)); - DBUG_PRINT("info", ("exec_at=%llu starts=%llu ends=%llu execs_so_far=%u" - " expr=%lld et.exec_at=%d now=%d (et.exec_at - now)=%d if=%d", - TIME_to_ulonglong_datetime(&et->execute_at), - TIME_to_ulonglong_datetime(&et->starts), - TIME_to_ulonglong_datetime(&et->ends), - et->execution_count, - et->expression, sec_since_epoch_TIME(&et->execute_at), now, - (int)(sec_since_epoch_TIME(&et->execute_at) - now), - sec_since_epoch_TIME(&et->execute_at) <= now)); + DBUG_PRINT("info", ("et: 0x%lx name: %s.%s", (long) et, + et->dbname.str, et->name.str)); + DBUG_PRINT("info", ("exec_at: %lu starts: %lu ends: %lu execs_so_far: %u " + "expr: %ld et.exec_at: %ld now: %ld " + "(et.exec_at - now): %d if: %d", + (long) TIME_to_ulonglong_datetime(&et->execute_at), + (long) TIME_to_ulonglong_datetime(&et->starts), + (long) TIME_to_ulonglong_datetime(&et->ends), + et->execution_count, + (long) et->expression, + (long) (sec_since_epoch_TIME(&et->execute_at)), + (long) now, + (int) (sec_since_epoch_TIME(&et->execute_at) - now), + sec_since_epoch_TIME(&et->execute_at) <= now)); } DBUG_VOID_RETURN; #endif @@ -812,11 +816,11 @@ end: if (to_free) delete top; - DBUG_PRINT("info", ("returning %d. et_new=0x%lx abstime.tv_sec=%d ", - ret, *job_data, abstime? abstime->tv_sec:0)); + DBUG_PRINT("info", ("returning %d et_new: 0x%lx abstime.tv_sec: %ld ", + ret, (long) *job_data, abstime ? abstime->tv_sec : 0)); if (*job_data) - DBUG_PRINT("info", ("db=%s name=%s definer=%s", (*job_data)->dbname.str, + DBUG_PRINT("info", ("db: %s name: %s definer=%s", (*job_data)->dbname.str, (*job_data)->name.str, (*job_data)->definer.str)); DBUG_RETURN(ret); diff --git a/sql/event_scheduler.cc b/sql/event_scheduler.cc index 6f9f6887c12..b1a82477c3c 100644 --- a/sql/event_scheduler.cc +++ b/sql/event_scheduler.cc @@ -264,8 +264,9 @@ event_worker_thread(void *arg) if (!post_init_event_thread(thd)) { - DBUG_PRINT("info", ("Baikonur, time is %d, BURAN reporting and operational." - "THD=0x%lx", time(NULL), thd)); + DBUG_PRINT("info", ("Baikonur, time is %ld, BURAN reporting and operational." + "THD: 0x%lx", + (long) time(NULL), (long) thd)); sql_print_information("SCHEDULER: [%s.%s of %s] executing in thread %lu. " "Execution %u", @@ -378,7 +379,7 @@ Event_scheduler::start() DBUG_ENTER("Event_scheduler::start"); LOCK_DATA(); - DBUG_PRINT("info", ("state before action %s", scheduler_states_names[state])); + DBUG_PRINT("info", ("state before action %s", scheduler_states_names[state].str)); if (state > INITIALIZED) goto end; @@ -400,7 +401,7 @@ Event_scheduler::start() scheduler_thd= new_thd; DBUG_PRINT("info", ("Setting state go RUNNING")); state= RUNNING; - DBUG_PRINT("info", ("Forking new thread for scheduduler. THD=0x%lx", new_thd)); + DBUG_PRINT("info", ("Forking new thread for scheduduler. THD: 0x%lx", (long) new_thd)); if (pthread_create(&th, &connection_attrib, event_scheduler_thread, (void*)scheduler_param_value)) { @@ -463,7 +464,7 @@ Event_scheduler::run(THD *thd) break; } - DBUG_PRINT("info", ("get_top returned job_data=0x%lx", job_data)); + DBUG_PRINT("info", ("get_top returned job_data: 0x%lx", (long) job_data)); if (job_data) { if ((res= execute_top(thd, job_data))) @@ -522,11 +523,11 @@ Event_scheduler::execute_top(THD *thd, Event_job_data *job_data) ++started_events; - DBUG_PRINT("info", ("Launch succeeded. BURAN is in THD=0x%lx", new_thd)); + DBUG_PRINT("info", ("Launch succeeded. BURAN is in THD: 0x%lx", (long) new_thd)); DBUG_RETURN(FALSE); error: - DBUG_PRINT("error", ("Baikonur, we have a problem! res=%d", res)); + DBUG_PRINT("error", ("Baikonur, we have a problem! res: %d", res)); if (new_thd) { new_thd->proc_info= "Clearing"; @@ -581,10 +582,10 @@ Event_scheduler::stop() { THD *thd= current_thd; DBUG_ENTER("Event_scheduler::stop"); - DBUG_PRINT("enter", ("thd=0x%lx", current_thd)); + DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd)); LOCK_DATA(); - DBUG_PRINT("info", ("state before action %s", scheduler_states_names[state])); + DBUG_PRINT("info", ("state before action %s", scheduler_states_names[state].str)); if (state != RUNNING) goto end; @@ -605,7 +606,7 @@ Event_scheduler::stop() */ state= STOPPING; - DBUG_PRINT("info", ("Manager thread has id %d", scheduler_thd->thread_id)); + DBUG_PRINT("info", ("Manager thread has id %lu", scheduler_thd->thread_id)); /* Lock from delete */ pthread_mutex_lock(&scheduler_thd->LOCK_delete); /* This will wake up the thread if it waits on Queue's conditional */ diff --git a/sql/events.cc b/sql/events.cc index 10a8be948ef..3dbc6fd27e1 100644 --- a/sql/events.cc +++ b/sql/events.cc @@ -858,7 +858,7 @@ Events::check_system_tables(THD *thd) bool ret= FALSE; DBUG_ENTER("Events::check_system_tables"); - DBUG_PRINT("enter", ("thd=0x%lx", thd)); + DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd)); thd->reset_n_backup_open_tables_state(&backup); diff --git a/sql/field.cc b/sql/field.cc index d01cc00c711..1551b78bd72 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -8180,8 +8180,8 @@ Field_bit::do_last_null_byte() const bits. On systems with CHAR_BIT > 8 (not very common), the storage will lose the extra bits. */ - DBUG_PRINT("debug", ("bit_ofs=%d, bit_len=%d, bit_ptr=%p", - bit_ofs, bit_len, bit_ptr)); + DBUG_PRINT("test", ("bit_ofs: %d, bit_len: %d bit_ptr: 0x%lx", + bit_ofs, bit_len, (long) bit_ptr)); uchar *result; if (bit_len == 0) result= null_ptr; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 7700aebe367..0703e18b5f7 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -413,7 +413,8 @@ Thd_ndb::get_open_table(THD *thd, const void *key) thd_ndb_share->stat.no_uncommitted_rows_count= 0; thd_ndb_share->stat.records= ~(ha_rows)0; } - DBUG_PRINT("exit", ("thd_ndb_share: 0x%x key: 0x%x", thd_ndb_share, key)); + DBUG_PRINT("exit", ("thd_ndb_share: 0x%lx key: 0x%lx", + (long) thd_ndb_share, (long) key)); DBUG_RETURN(thd_ndb_share); } @@ -761,8 +762,8 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field, blob_ptr= (char*)""; } - DBUG_PRINT("value", ("set blob ptr=%p len=%u", - blob_ptr, blob_len)); + DBUG_PRINT("value", ("set blob ptr: 0x%lx len: %u", + (long) blob_ptr, blob_len)); DBUG_DUMP("value", (char*)blob_ptr, min(blob_len, 26)); if (set_blob_value) @@ -847,8 +848,8 @@ int get_ndb_blobs_value(TABLE* table, NdbValue* value_array, uint32 len= 0xffffffff; // Max uint32 if (ndb_blob->readData(buf, len) != 0) ERR_RETURN(ndb_blob->getNdbError()); - DBUG_PRINT("info", ("[%u] offset=%u buf=%p len=%u [ptrdiff=%d]", - i, offset, buf, len, (int)ptrdiff)); + DBUG_PRINT("info", ("[%u] offset: %u buf: 0x%lx len=%u [ptrdiff=%d]", + i, offset, (long) buf, len, (int)ptrdiff)); DBUG_ASSERT(len == len64); // Ugly hack assumes only ptr needs to be changed field_blob->ptr+= ptrdiff; @@ -1171,8 +1172,8 @@ int ha_ndbcluster::add_index_handle(THD *thd, NDBDICT *dict, KEY *key_info, index= dict->getIndexGlobal(index_name, *m_table); if (!index) ERR_RETURN(dict->getNdbError()); - DBUG_PRINT("info", ("index: 0x%x id: %d version: %d.%d status: %d", - index, + DBUG_PRINT("info", ("index: 0x%lx id: %d version: %d.%d status: %d", + (long) index, index->getObjectId(), index->getObjectVersion() & 0xFFFFFF, index->getObjectVersion() >> 24, @@ -1215,8 +1216,8 @@ int ha_ndbcluster::add_index_handle(THD *thd, NDBDICT *dict, KEY *key_info, index= dict->getIndexGlobal(unique_index_name, *m_table); if (!index) ERR_RETURN(dict->getNdbError()); - DBUG_PRINT("info", ("index: 0x%x id: %d version: %d.%d status: %d", - index, + DBUG_PRINT("info", ("index: 0x%lx id: %d version: %d.%d status: %d", + (long) index, index->getObjectId(), index->getObjectVersion() & 0xFFFFFF, index->getObjectVersion() >> 24, @@ -2305,7 +2306,7 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, // Set bound if not done with this key if (p.key != NULL) { - DBUG_PRINT("info", ("key %d:%d offset=%d length=%d last=%d bound=%d", + DBUG_PRINT("info", ("key %d:%d offset: %d length: %d last: %d bound: %d", j, i, tot_len, part_len, p.part_last, p.bound_type)); DBUG_DUMP("info", (const char*)p.part_ptr, part_store_len); @@ -2462,7 +2463,7 @@ int ha_ndbcluster::full_table_scan(byte *buf) part_spec.start_part= 0; part_spec.end_part= m_part_info->get_tot_partitions() - 1; prune_partition_set(table, &part_spec); - DBUG_PRINT("info", ("part_spec.start_part = %u, part_spec.end_part = %u", + DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u", part_spec.start_part, part_spec.end_part)); /* If partition pruning has found no partition in set @@ -2658,7 +2659,7 @@ int ha_ndbcluster::write_row(byte *record) { // Send rows to NDB DBUG_PRINT("info", ("Sending inserts to NDB, "\ - "rows_inserted:%d, bulk_insert_rows: %d", + "rows_inserted: %d bulk_insert_rows: %d", (int)m_rows_inserted, (int)m_bulk_insert_rows)); m_bulk_insert_not_flushed= FALSE; @@ -3108,7 +3109,8 @@ void ndb_unpack_record(TABLE *table, NdbValue *value, char* ptr; field_blob->get_ptr(&ptr, row_offset); uint32 len= field_blob->get_length(row_offset); - DBUG_PRINT("info",("[%u] SET ptr=%p len=%u", col_no, ptr, len)); + DBUG_PRINT("info",("[%u] SET ptr: 0x%lx len: %u", + col_no, (long) ptr, len)); #endif } } @@ -3350,7 +3352,7 @@ int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key, if (m_use_partition_function) { get_partition_set(table, buf, active_index, start_key, &part_spec); - DBUG_PRINT("info", ("part_spec.start_part = %u, part_spec.end_part = %u", + DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u", part_spec.start_part, part_spec.end_part)); /* If partition pruning has found no partition in set @@ -3876,7 +3878,7 @@ int ha_ndbcluster::end_bulk_insert() NdbTransaction *trans= m_active_trans; // Send rows to NDB DBUG_PRINT("info", ("Sending inserts to NDB, "\ - "rows_inserted:%d, bulk_insert_rows: %d", + "rows_inserted: %d bulk_insert_rows: %d", (int) m_rows_inserted, (int) m_bulk_insert_rows)); m_bulk_insert_not_flushed= FALSE; if (m_transaction_on) @@ -5101,13 +5103,12 @@ void ha_ndbcluster::prepare_for_alter() int ha_ndbcluster::add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys) { - DBUG_ENTER("ha_ndbcluster::add_index"); - DBUG_PRINT("info", ("ha_ndbcluster::add_index to table %s", - table_arg->s->table_name)); int error= 0; uint idx; - + DBUG_ENTER("ha_ndbcluster::add_index"); + DBUG_PRINT("enter", ("table %s", table_arg->s->table_name.str)); DBUG_ASSERT(m_share->state == NSS_ALTERED); + for (idx= 0; idx < num_of_keys; idx++) { KEY *key= key_info + idx; @@ -6662,7 +6663,7 @@ static int ndbcluster_end(handlerton *hton, ha_panic_function type) void ha_ndbcluster::print_error(int error, myf errflag) { DBUG_ENTER("ha_ndbcluster::print_error"); - DBUG_PRINT("enter", ("error = %d", error)); + DBUG_PRINT("enter", ("error: %d", error)); if (error == HA_ERR_NO_PARTITION_FOUND) m_part_info->print_no_partition_found(table); @@ -7168,16 +7169,16 @@ static void dbug_print_open_tables() for (uint i= 0; i < ndbcluster_open_tables.records; i++) { NDB_SHARE *share= (NDB_SHARE*) hash_element(&ndbcluster_open_tables, i); - DBUG_PRINT("share", - ("[%d] 0x%lx key: %s key_length: %d", - i, share, share->key, share->key_length)); - DBUG_PRINT("share", - ("db.tablename: %s.%s use_count: %d commit_count: %d", + DBUG_PRINT("loop", + ("[%d] 0x%lx key: %s key_length: %d", + i, (long) share, share->key, share->key_length)); + DBUG_PRINT("loop", + ("db.tablename: %s.%s use_count: %d commit_count: %lu", share->db, share->table_name, - share->use_count, share->commit_count)); + share->use_count, (ulong) share->commit_count)); #ifdef HAVE_NDB_BINLOG if (share->table) - DBUG_PRINT("share", + DBUG_PRINT("loop", ("table->s->db.table_name: %s.%s", share->table->s->db.str, share->table->s->table_name.str)); #endif @@ -7330,13 +7331,13 @@ static int rename_share(NDB_SHARE *share, const char *new_key) share->table_name= share->db + strlen(share->db) + 1; ha_ndbcluster::set_tabname(new_key, share->table_name); - DBUG_PRINT("rename_share", - ("0x%lx key: %s key_length: %d", - share, share->key, share->key_length)); - DBUG_PRINT("rename_share", - ("db.tablename: %s.%s use_count: %d commit_count: %d", + DBUG_PRINT("info", + ("share: 0x%lx key: %s key_length: %d", + (long) share, share->key, share->key_length)); + DBUG_PRINT("info", + ("db.tablename: %s.%s use_count: %d commit_count: %lu", share->db, share->table_name, - share->use_count, share->commit_count)); + share->use_count, (ulong) share->commit_count)); if (share->table) { DBUG_PRINT("rename_share", @@ -7371,13 +7372,13 @@ NDB_SHARE *ndbcluster_get_share(NDB_SHARE *share) dbug_print_open_tables(); - DBUG_PRINT("get_share", - ("0x%lx key: %s key_length: %d", - share, share->key, share->key_length)); - DBUG_PRINT("get_share", - ("db.tablename: %s.%s use_count: %d commit_count: %d", + DBUG_PRINT("info", + ("share: 0x%lx key: %s key_length: %d", + (long) share, share->key, share->key_length)); + DBUG_PRINT("info", + ("db.tablename: %s.%s use_count: %d commit_count: %lu", share->db, share->table_name, - share->use_count, share->commit_count)); + share->use_count, (ulong) share->commit_count)); pthread_mutex_unlock(&ndbcluster_mutex); return share; } @@ -7485,13 +7486,12 @@ NDB_SHARE *ndbcluster_get_share(const char *key, TABLE *table, void ndbcluster_real_free_share(NDB_SHARE **share) { DBUG_ENTER("ndbcluster_real_free_share"); - DBUG_PRINT("real_free_share", - ("0x%lx key: %s key_length: %d", - (*share), (*share)->key, (*share)->key_length)); - DBUG_PRINT("real_free_share", - ("db.tablename: %s.%s use_count: %d commit_count: %d", + DBUG_PRINT("enter", + ("share: 0x%lx key: %s key_length: %d " + "db.tablename: %s.%s use_count: %d commit_count: %lu", + (long) (*share), (*share)->key, (*share)->key_length, (*share)->db, (*share)->table_name, - (*share)->use_count, (*share)->commit_count)); + (*share)->use_count, (ulong) (*share)->commit_count)); hash_delete(&ndbcluster_open_tables, (byte*) *share); thr_lock_delete(&(*share)->lock); @@ -7539,13 +7539,13 @@ void ndbcluster_free_share(NDB_SHARE **share, bool have_lock) else { dbug_print_open_tables(); - DBUG_PRINT("free_share", - ("0x%lx key: %s key_length: %d", - *share, (*share)->key, (*share)->key_length)); - DBUG_PRINT("free_share", - ("db.tablename: %s.%s use_count: %d commit_count: %d", + DBUG_PRINT("info", + ("share: 0x%lx key: %s key_length: %d", + (long) *share, (*share)->key, (*share)->key_length)); + DBUG_PRINT("info", + ("db.tablename: %s.%s use_count: %d commit_count: %lu", (*share)->db, (*share)->table_name, - (*share)->use_count, (*share)->commit_count)); + (*share)->use_count, (ulong) (*share)->commit_count)); } if (!have_lock) pthread_mutex_unlock(&ndbcluster_mutex); @@ -7815,7 +7815,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, get_partition_set(table, curr, active_index, &multi_range_curr->start_key, &part_spec); - DBUG_PRINT("info", ("part_spec.start_part = %u, part_spec.end_part = %u", + DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u", part_spec.start_part, part_spec.end_part)); /* If partition pruning has found no partition in set @@ -8347,8 +8347,8 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) ndb_get_table_statistics(NULL, false, ndb, ndbtab_g.get_table(), &stat) == 0) { char buff[22], buff2[22]; - DBUG_PRINT("ndb_util_thread", - ("Table: %s, commit_count: %llu, rows: %llu", + DBUG_PRINT("info", + ("Table: %s commit_count: %s rows: %s", share->key, llstr(stat.commit_count, buff), llstr(stat.row_count, buff2))); diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index e0b7502a40a..cb2ac56e828 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -161,16 +161,16 @@ static void dbug_print_table(const char *info, TABLE *table) } DBUG_PRINT("info", ("%s: %s.%s s->fields: %d " - "reclength: %d rec_buff_length: %d record[0]: %lx " - "record[1]: %lx", + "reclength: %lu rec_buff_length: %u record[0]: 0x%lx " + "record[1]: 0x%lx", info, table->s->db.str, table->s->table_name.str, table->s->fields, table->s->reclength, table->s->rec_buff_length, - table->record[0], - table->record[1])); + (long) table->record[0], + (long) table->record[1])); for (unsigned int i= 0; i < table->s->fields; i++) { @@ -180,7 +180,7 @@ static void dbug_print_table(const char *info, TABLE *table) "ptr: 0x%lx[+%d] null_bit: %u null_ptr: 0x%lx[+%d]", i, f->field_name, - f->flags, + (long) f->flags, (f->flags & PRI_KEY_FLAG) ? "pri" : "attr", (f->flags & NOT_NULL_FLAG) ? "" : ",nullable", (f->flags & UNSIGNED_FLAG) ? ",unsigned" : ",signed", @@ -189,16 +189,18 @@ static void dbug_print_table(const char *info, TABLE *table) (f->flags & BINARY_FLAG) ? ",binary" : "", f->real_type(), f->pack_length(), - f->ptr, f->ptr - table->record[0], + (long) f->ptr, (int) (f->ptr - table->record[0]), f->null_bit, - f->null_ptr, (byte*) f->null_ptr - table->record[0])); + (long) f->null_ptr, + (int) ((byte*) f->null_ptr - table->record[0]))); if (f->type() == MYSQL_TYPE_BIT) { Field_bit *g= (Field_bit*) f; DBUG_PRINT("MYSQL_TYPE_BIT",("field_length: %d bit_ptr: 0x%lx[+%d] " - "bit_ofs: %u bit_len: %u", - g->field_length, g->bit_ptr, - (byte*) g->bit_ptr-table->record[0], + "bit_ofs: %d bit_len: %u", + g->field_length, (long) g->bit_ptr, + (int) ((byte*) g->bit_ptr - + table->record[0]), g->bit_ofs, g->bit_len)); } } @@ -605,11 +607,11 @@ static int ndbcluster_binlog_end(THD *thd) { DBUG_PRINT("share", ("[%d] 0x%lx key: %s key_length: %d", - i, share, share->key, share->key_length)); + i, (long) share, share->key, share->key_length)); DBUG_PRINT("share", - ("db.tablename: %s.%s use_count: %d commit_count: %d", + ("db.tablename: %s.%s use_count: %d commit_count: %lu", share->db, share->table_name, - share->use_count, share->commit_count)); + share->use_count, (long) share->commit_count)); } } pthread_mutex_unlock(&ndbcluster_mutex); @@ -685,8 +687,8 @@ static NDB_SHARE *ndbcluster_check_apply_status_share() void *share= hash_search(&ndbcluster_open_tables, NDB_APPLY_TABLE_FILE, sizeof(NDB_APPLY_TABLE_FILE) - 1); - DBUG_PRINT("info",("ndbcluster_check_apply_status_share %s %p", - NDB_APPLY_TABLE_FILE, share)); + DBUG_PRINT("info",("ndbcluster_check_apply_status_share %s 0x%lx", + NDB_APPLY_TABLE_FILE, (long) share)); pthread_mutex_unlock(&ndbcluster_mutex); return (NDB_SHARE*) share; } @@ -703,8 +705,8 @@ static NDB_SHARE *ndbcluster_check_schema_share() void *share= hash_search(&ndbcluster_open_tables, NDB_SCHEMA_TABLE_FILE, sizeof(NDB_SCHEMA_TABLE_FILE) - 1); - DBUG_PRINT("info",("ndbcluster_check_schema_share %s %p", - NDB_SCHEMA_TABLE_FILE, share)); + DBUG_PRINT("info",("ndbcluster_check_schema_share %s 0x%lx", + NDB_SCHEMA_TABLE_FILE, (long) share)); pthread_mutex_unlock(&ndbcluster_mutex); return (NDB_SHARE*) share; } @@ -2721,10 +2723,9 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab, if (share->flags & NSF_BLOB_FLAG) op->mergeEvents(TRUE); // currently not inherited from event - DBUG_PRINT("info", ("share->ndb_value[0]: 0x%x", - share->ndb_value[0])); - DBUG_PRINT("info", ("share->ndb_value[1]: 0x%x", - share->ndb_value[1])); + DBUG_PRINT("info", ("share->ndb_value[0]: 0x%lx share->ndb_value[1]: 0x%lx", + (long) share->ndb_value[0], + (long) share->ndb_value[1])); int n_columns= ndbtab->getNoOfColumns(); int n_fields= table ? table->s->fields : 0; // XXX ??? for (int j= 0; j < n_columns; j++) @@ -2778,12 +2779,14 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab, } share->ndb_value[0][j].ptr= attr0.ptr; share->ndb_value[1][j].ptr= attr1.ptr; - DBUG_PRINT("info", ("&share->ndb_value[0][%d]: 0x%x " - "share->ndb_value[0][%d]: 0x%x", - j, &share->ndb_value[0][j], j, attr0.ptr)); - DBUG_PRINT("info", ("&share->ndb_value[1][%d]: 0x%x " - "share->ndb_value[1][%d]: 0x%x", - j, &share->ndb_value[0][j], j, attr1.ptr)); + DBUG_PRINT("info", ("&share->ndb_value[0][%d]: 0x%lx " + "share->ndb_value[0][%d]: 0x%lx", + j, (long) &share->ndb_value[0][j], + j, (long) attr0.ptr)); + DBUG_PRINT("info", ("&share->ndb_value[1][%d]: 0x%lx " + "share->ndb_value[1][%d]: 0x%lx", + j, (long) &share->ndb_value[0][j], + j, (long) attr1.ptr)); } op->setCustomData((void *) share); // set before execute share->op= op; // assign op in NDB_SHARE @@ -2826,8 +2829,8 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab, (void) pthread_cond_signal(&injector_cond); } - DBUG_PRINT("info",("%s share->op: 0x%lx, share->use_count: %u", - share->key, share->op, share->use_count)); + DBUG_PRINT("info",("%s share->op: 0x%lx share->use_count: %u", + share->key, (long) share->op, share->use_count)); if (ndb_extra_logging) sql_print_information("NDB Binlog: logging %s", share->key); @@ -3012,10 +3015,11 @@ ndb_binlog_thread_handle_non_data_event(THD *thd, Ndb *ndb, free_share(&apply_status_share); apply_status_share= 0; } - DBUG_PRINT("info", ("CLUSTER FAILURE EVENT: " - "%s received share: 0x%lx op: %lx share op: %lx " - "op_old: %lx", - share->key, share, pOp, share->op, share->op_old)); + DBUG_PRINT("error", ("CLUSTER FAILURE EVENT: " + "%s received share: 0x%lx op: 0x%lx share op: 0x%lx " + "op_old: 0x%lx", + share->key, (long) share, (long) pOp, + (long) share->op, (long) share->op_old)); break; case NDBEVENT::TE_DROP: if (apply_status_share == share) @@ -3033,10 +3037,11 @@ ndb_binlog_thread_handle_non_data_event(THD *thd, Ndb *ndb, // fall through case NDBEVENT::TE_ALTER: row.n_schemaops++; - DBUG_PRINT("info", ("TABLE %s EVENT: %s received share: 0x%lx op: %lx " - "share op: %lx op_old: %lx", - type == NDBEVENT::TE_DROP ? "DROP" : "ALTER", - share->key, share, pOp, share->op, share->op_old)); + DBUG_PRINT("info", ("TABLE %s EVENT: %s received share: 0x%lx op: 0x%lx " + "share op: 0x%lx op_old: 0x%lx", + type == NDBEVENT::TE_DROP ? "DROP" : "ALTER", + share->key, (long) share, (long) pOp, + (long) share->op, (long) share->op_old)); break; case NDBEVENT::TE_NODE_FAILURE: /* fall through */ @@ -3513,7 +3518,8 @@ restart: } } // now check that we have epochs consistant with what we had before the restart - DBUG_PRINT("info", ("schema_res: %d schema_gci: %d", schema_res, schema_gci)); + DBUG_PRINT("info", ("schema_res: %d schema_gci: %lu", schema_res, + (long) schema_gci)); { i_ndb->flushIncompleteEvents(schema_gci); s_ndb->flushIncompleteEvents(schema_gci); @@ -3697,8 +3703,8 @@ restart: != NULL) { NDB_SHARE *share= (NDB_SHARE*)gci_op->getCustomData(); - DBUG_PRINT("info", ("per gci_op: %p share: %p event_types: 0x%x", - gci_op, share, event_types)); + DBUG_PRINT("info", ("per gci_op: 0x%lx share: 0x%lx event_types: 0x%x", + (long) gci_op, (long) share, event_types)); // workaround for interface returning TE_STOP events // which are normally filtered out below in the nextEvent loop if ((event_types & ~NdbDictionary::Event::TE_STOP) == 0) @@ -3784,11 +3790,13 @@ restart: { NDB_SHARE *share= (NDB_SHARE*) pOp->getCustomData(); DBUG_PRINT("info", - ("EVENT TYPE: %d GCI: %lld last applied: %lld " - "share: 0x%lx (%s.%s)", pOp->getEventType(), gci, - ndb_latest_applied_binlog_epoch, share, - share ? share->db : "share == NULL", - share ? share->table_name : "")); + ("EVENT TYPE: %d GCI: %ld last applied: %ld " + "share: 0x%lx (%s.%s)", pOp->getEventType(), + (long) gci, + (long) ndb_latest_applied_binlog_epoch, + (long) share, + share ? share->db : "'NULL'", + share ? share->table_name : "'NULL'")); DBUG_ASSERT(share != 0); } // assert that there is consistancy between gci op list diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 3edd3923779..82b43ce578f 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -2027,7 +2027,7 @@ bool ha_partition::create_handlers(MEM_ROOT *mem_root) if (!(m_file[i]= get_new_handler(table_share, mem_root, m_engine_array[i]))) DBUG_RETURN(TRUE); - DBUG_PRINT("info", ("engine_type: %u", m_engine_array[i])); + DBUG_PRINT("info", ("engine_type: %u", m_engine_array[i]->db_type)); } /* For the moment we only support partition over the same table engine */ if (m_engine_array[0] == myisam_hton) @@ -2939,8 +2939,8 @@ int ha_partition::rnd_init(bool scan) include_partition_fields_in_used_fields(); /* Now we see what the index of our first important partition is */ - DBUG_PRINT("info", ("m_part_info->used_partitions 0x%x", - m_part_info->used_partitions.bitmap)); + DBUG_PRINT("info", ("m_part_info->used_partitions: 0x%lx", + (long) m_part_info->used_partitions.bitmap)); part_id= bitmap_get_first_set(&(m_part_info->used_partitions)); DBUG_PRINT("info", ("m_part_spec.start_part %d", part_id)); diff --git a/sql/handler.cc b/sql/handler.cc index ae679826dbf..f874100e634 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -1513,7 +1513,7 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode, DBUG_ENTER("handler::ha_open"); DBUG_PRINT("enter", ("name: %s db_type: %d db_stat: %d mode: %d lock_test: %d", - name, table_share->db_type, table_arg->db_stat, mode, + name, ht->db_type, table_arg->db_stat, mode, test_if_locked)); table= table_arg; @@ -1927,8 +1927,8 @@ int handler::update_auto_increment() void handler::column_bitmaps_signal() { DBUG_ENTER("column_bitmaps_signal"); - DBUG_PRINT("info", ("read_set: 0x%lx write_set: 0x%lx", table->read_set, - table->write_set)); + DBUG_PRINT("info", ("read_set: 0x%lx write_set: 0x%lx", (long) table->read_set, + (long) table->write_set)); DBUG_VOID_RETURN; } @@ -3507,8 +3507,10 @@ namespace int write_locked_table_maps(THD *thd) { DBUG_ENTER("write_locked_table_maps"); - DBUG_PRINT("enter", ("thd=%p, thd->lock=%p, thd->locked_tables=%p, thd->extra_lock", - thd, thd->lock, thd->locked_tables, thd->extra_lock)); + DBUG_PRINT("enter", ("thd: 0x%lx thd->lock: 0x%lx thd->locked_tables: 0x%lx " + "thd->extra_lock: 0x%lx", + (long) thd, (long) thd->lock, + (long) thd->locked_tables, (long) thd->extra_lock)); if (thd->get_binlog_table_maps() == 0) { @@ -3528,7 +3530,7 @@ namespace ++table_ptr) { TABLE *const table= *table_ptr; - DBUG_PRINT("info", ("Checking table %s", table->s->table_name)); + DBUG_PRINT("info", ("Checking table %s", table->s->table_name.str)); if (table->current_lock == F_WRLCK && check_table_binlog_row_based(thd, table)) { diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 1d3048af54c..b252144ae2e 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -3061,7 +3061,7 @@ longlong Item_is_not_null_test::val_int() if (!used_tables_cache) { owner->was_null|= (!cached_value); - DBUG_PRINT("info", ("cached :%ld", (long) cached_value)); + DBUG_PRINT("info", ("cached: %ld", (long) cached_value)); DBUG_RETURN(cached_value); } if (args[0]->is_null()) diff --git a/sql/item_func.cc b/sql/item_func.cc index 407ab4a66f7..574a8055ac3 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -5044,7 +5044,7 @@ Item_func_sp::result_type() const { Field *field; DBUG_ENTER("Item_func_sp::result_type"); - DBUG_PRINT("info", ("m_sp = %p", m_sp)); + DBUG_PRINT("info", ("m_sp: 0x%lx", (long) m_sp)); if (result_field) DBUG_RETURN(result_field->result_type()); diff --git a/sql/log.cc b/sql/log.cc index 620445aecfa..b12eca9bb07 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -1344,7 +1344,7 @@ binlog_trans_log_savepos(THD *thd, my_off_t *pos) (binlog_trx_data*) thd->ha_data[binlog_hton->slot]; DBUG_ASSERT(mysql_bin_log.is_open()); *pos= trx_data->position(); - DBUG_PRINT("return", ("*pos=%u", *pos)); + DBUG_PRINT("return", ("*pos: %lu", (ulong) *pos)); DBUG_VOID_RETURN; } @@ -1368,7 +1368,7 @@ static void binlog_trans_log_truncate(THD *thd, my_off_t pos) { DBUG_ENTER("binlog_trans_log_truncate"); - DBUG_PRINT("enter", ("pos=%u", pos)); + DBUG_PRINT("enter", ("pos: %lu", (ulong) pos)); DBUG_ASSERT(thd->ha_data[binlog_hton->slot] != NULL); /* Only true if binlog_trans_log_savepos() wasn't called before */ @@ -1444,8 +1444,8 @@ binlog_end_trans(THD *thd, binlog_trx_data *trx_data, DBUG_ENTER("binlog_end_trans"); int error=0; IO_CACHE *trans_log= &trx_data->trans_log; - DBUG_PRINT("enter", ("transaction: %s, end_ev=%p", - all ? "all" : "stmt", end_ev)); + DBUG_PRINT("enter", ("transaction: %s end_ev: 0x%lx", + all ? "all" : "stmt", (long) end_ev)); DBUG_PRINT("info", ("thd->options={ %s%s}", FLAGSTR(thd->options, OPTION_NOT_AUTOCOMMIT), FLAGSTR(thd->options, OPTION_BEGIN))); @@ -3417,12 +3417,13 @@ int THD::binlog_setup_trx_data() void THD::binlog_start_trans_and_stmt() { - DBUG_ENTER("binlog_start_trans_and_stmt"); binlog_trx_data *trx_data= (binlog_trx_data*) ha_data[binlog_hton->slot]; - DBUG_PRINT("enter", ("trx_data=0x%lu", trx_data)); - if (trx_data) - DBUG_PRINT("enter", ("trx_data->before_stmt_pos=%u", - trx_data->before_stmt_pos)); + DBUG_ENTER("binlog_start_trans_and_stmt"); + DBUG_PRINT("enter", ("trx_data: 0x%lx trx_data->before_stmt_pos: %lu", + (long) trx_data, + (trx_data ? (ulong) trx_data->before_stmt_pos : + (ulong) 0))); + if (trx_data == NULL || trx_data->before_stmt_pos == MY_OFF_T_UNDEF) { @@ -3453,8 +3454,8 @@ int THD::binlog_write_table_map(TABLE *table, bool is_trans) { int error; DBUG_ENTER("THD::binlog_write_table_map"); - DBUG_PRINT("enter", ("table: %0xlx (%s: #%u)", - (long) table, table->s->table_name, + DBUG_PRINT("enter", ("table: 0x%lx (%s: #%lu)", + (long) table, table->s->table_name.str, table->s->table_map_id)); /* Pre-conditions */ @@ -3517,7 +3518,7 @@ MYSQL_BIN_LOG::flush_and_set_pending_rows_event(THD *thd, { DBUG_ENTER("MYSQL_BIN_LOG::flush_and_set_pending_rows_event(event)"); DBUG_ASSERT(mysql_bin_log.is_open()); - DBUG_PRINT("enter", ("event=%p", event)); + DBUG_PRINT("enter", ("event: 0x%lx", (long) event)); int error= 0; @@ -3526,7 +3527,7 @@ MYSQL_BIN_LOG::flush_and_set_pending_rows_event(THD *thd, DBUG_ASSERT(trx_data); - DBUG_PRINT("info", ("trx_data->pending()=%p", trx_data->pending())); + DBUG_PRINT("info", ("trx_data->pending(): 0x%lx", (long) trx_data->pending())); if (Rows_log_event* pending= trx_data->pending()) { @@ -3681,9 +3682,9 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info) my_off_t trans_log_pos= my_b_tell(trans_log); if (event_info->get_cache_stmt() || trans_log_pos != 0) { - DBUG_PRINT("info", ("Using trans_log: cache=%d, trans_log_pos=%u", + DBUG_PRINT("info", ("Using trans_log: cache: %d, trans_log_pos: %lu", event_info->get_cache_stmt(), - trans_log_pos)); + (ulong) trans_log_pos)); if (trans_log_pos == 0) thd->binlog_start_trans_and_stmt(); file= trans_log; @@ -3725,15 +3726,17 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info) } if (thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements() > 0) { - DBUG_PRINT("info",("number of auto_inc intervals: %lu", - thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements())); + DBUG_PRINT("info",("number of auto_inc intervals: %u", + thd->auto_inc_intervals_in_cur_stmt_for_binlog. + nb_elements())); /* If the auto_increment was second in a table's index (possible with MyISAM or BDB) (table->next_number_key_offset != 0), such event is in fact not necessary. We could avoid logging it. */ - Intvar_log_event e(thd,(uchar) INSERT_ID_EVENT, - thd->auto_inc_intervals_in_cur_stmt_for_binlog.minimum()); + Intvar_log_event e(thd, (uchar) INSERT_ID_EVENT, + thd->auto_inc_intervals_in_cur_stmt_for_binlog. + minimum()); if (e.write(file)) goto err; } diff --git a/sql/log_event.cc b/sql/log_event.cc index e170194bc37..79e3a35cbe8 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -5345,8 +5345,8 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len, uint8 const common_header_len= description_event->common_header_len; uint8 const post_header_len= description_event->post_header_len[event_type-1]; - DBUG_PRINT("enter",("event_len=%ld, common_header_len=%d, " - "post_header_len=%d", + DBUG_PRINT("enter",("event_len: %u common_header_len: %d " + "post_header_len: %d", event_len, common_header_len, post_header_len)); @@ -5376,7 +5376,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len, const byte* const ptr_rows_data= var_start + byte_count + 1; my_size_t const data_size= event_len - (ptr_rows_data - (const byte *) buf); - DBUG_PRINT("info",("m_table_id=%lu, m_flags=%d, m_width=%u, data_size=%lu", + DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %u", m_table_id, m_flags, m_width, data_size)); m_rows_buf= (byte*)my_malloc(data_size, MYF(MY_WME)); @@ -5416,7 +5416,7 @@ int Rows_log_event::do_add_row_data(byte *const row_data, would save binlog space. TODO */ DBUG_ENTER("Rows_log_event::do_add_row_data"); - DBUG_PRINT("enter", ("row_data: 0x%lx length: %lu", (ulong) row_data, + DBUG_PRINT("enter", ("row_data: 0x%lx length: %u", (ulong) row_data, length)); /* Don't print debug messages when running valgrind since they can @@ -5513,7 +5513,7 @@ unpack_row(RELAY_LOG_INFO *rli, { DBUG_ENTER("unpack_row"); DBUG_ASSERT(record && row); - DBUG_PRINT("enter", ("row=0x%lx; record=0x%lx", row, record)); + DBUG_PRINT("enter", ("row: 0x%lx record: 0x%lx", (long) row, (long) record)); my_ptrdiff_t const offset= record - (byte*) table->record[0]; my_size_t master_null_bytes= table->s->null_bytes; @@ -5555,10 +5555,12 @@ unpack_row(RELAY_LOG_INFO *rli, if (bitmap_is_set(cols, field_ptr - begin_ptr)) { DBUG_ASSERT(table->record[0] <= f->ptr); - DBUG_ASSERT(f->ptr < table->record[0] + table->s->reclength + (f->pack_length_in_rec() == 0)); + DBUG_ASSERT(f->ptr < (table->record[0] + table->s->reclength + + (f->pack_length_in_rec() == 0))); f->move_field_offset(offset); - DBUG_PRINT("info", ("unpacking column '%s' to 0x%lx", f->field_name, f->ptr)); + DBUG_PRINT("info", ("unpacking column '%s' to 0x%lx", f->field_name, + (long) f->ptr)); ptr= f->unpack(f->ptr, ptr); f->move_field_offset(-offset); /* Field...::unpack() cannot return 0 */ @@ -6068,7 +6070,7 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len, uint8 common_header_len= description_event->common_header_len; uint8 post_header_len= description_event->post_header_len[TABLE_MAP_EVENT-1]; - DBUG_PRINT("info",("event_len=%ld, common_header_len=%d, post_header_len=%d", + DBUG_PRINT("info",("event_len: %u common_header_len: %d post_header_len: %d", event_len, common_header_len, post_header_len)); /* @@ -6116,10 +6118,10 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len, uchar *ptr_after_colcnt= (uchar*) ptr_colcnt; m_colcnt= net_field_length(&ptr_after_colcnt); - DBUG_PRINT("info",("m_dblen=%d off=%d m_tbllen=%d off=%d m_colcnt=%d off=%d", - m_dblen, ptr_dblen-(const byte*)vpart, - m_tbllen, ptr_tbllen-(const byte*)vpart, - m_colcnt, ptr_colcnt-(const byte*)vpart)); + DBUG_PRINT("info",("m_dblen: %d off: %ld m_tbllen: %d off: %ld m_colcnt: %lu off: %ld", + m_dblen, (long) (ptr_dblen-(const byte*)vpart), + m_tbllen, (long) (ptr_tbllen-(const byte*)vpart), + m_colcnt, (long) (ptr_colcnt-(const byte*)vpart))); /* Allocate mem for all fields in one go. If fails, catched in is_valid() */ m_memory= my_multi_malloc(MYF(MY_WME), @@ -6523,10 +6525,10 @@ copy_extra_record_fields(TABLE *table, my_size_t master_reclength, my_ptrdiff_t master_fields) { - DBUG_PRINT("info", ("Copying to %p " + DBUG_PRINT("info", ("Copying to 0x%lx " "from field %ld at offset %u " - "to field %d at offset %u", - table->record[0], + "to field %d at offset %lu", + (long) table->record[0], master_fields, master_reclength, table->s->fields, table->s->reclength)); /* diff --git a/sql/mysqld.cc b/sql/mysqld.cc index bf803fad360..423dfc19fdf 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -2121,7 +2121,7 @@ the thread stack. Please read http://www.mysql.com/doc/en/Linux.html\n\n", #ifdef HAVE_STACKTRACE if (!(test_flags & TEST_NO_STACKTRACE)) { - fprintf(stderr,"thd=%p\n",thd); + fprintf(stderr,"thd: 0x%lx\n",(long) thd); print_stacktrace(thd ? (gptr) thd->thread_stack : (gptr) 0, thread_stack); } diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 64300c16ee7..1d6b384df35 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -10814,7 +10814,7 @@ static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map, if (!tmp.length()) tmp.append(STRING_WITH_LEN("(empty)")); - DBUG_PRINT("info", ("SEL_TREE %p (%s) scans:%s", tree, msg, tmp.ptr())); + DBUG_PRINT("info", ("SEL_TREE: 0x%lx (%s) scans: %s", (long) tree, msg, tmp.ptr())); DBUG_VOID_RETURN; } diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc index 2b034d50d6a..762fcfb7a6a 100644 --- a/sql/repl_failsafe.cc +++ b/sql/repl_failsafe.cc @@ -564,8 +564,8 @@ err: mysql_free_result(res); if (error) { - sql_print_error("While trying to obtain the list of slaves from the master \ -'%s:%d', user '%s' got the following error: '%s'", + sql_print_error("While trying to obtain the list of slaves from the master " + "'%s:%d', user '%s' got the following error: '%s'", mi->host, mi->port, mi->user, error); DBUG_RETURN(1); } diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index f01fc5d1c9e..a2edb9dc8a8 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -402,7 +402,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, bool look_for_description_event) { DBUG_ENTER("init_relay_log_pos"); - DBUG_PRINT("info", ("pos=%lu", pos)); + DBUG_PRINT("info", ("pos: %lu", (ulong) pos)); *errmsg=0; pthread_mutex_t *log_lock=rli->relay_log.get_log_lock(); @@ -855,7 +855,7 @@ void st_relay_log_info::close_temporary_tables() Don't ask for disk deletion. For now, anyway they will be deleted when slave restarts, but it is a better intention to not delete them. */ - DBUG_PRINT("info", ("table: %p", table)); + DBUG_PRINT("info", ("table: 0x%lx", (long) table)); close_temporary(table, 1, 0); } save_temporary_tables= 0; diff --git a/sql/rpl_tblmap.cc b/sql/rpl_tblmap.cc index a0272b23ee8..97f0066233c 100644 --- a/sql/rpl_tblmap.cc +++ b/sql/rpl_tblmap.cc @@ -50,17 +50,17 @@ table_mapping::~table_mapping() st_table* table_mapping::get_table(ulong table_id) { DBUG_ENTER("table_mapping::get_table(ulong)"); - DBUG_PRINT("enter", ("table_id=%d", table_id)); + DBUG_PRINT("enter", ("table_id: %lu", table_id)); entry *e= find_entry(table_id); if (e) { - DBUG_PRINT("info", ("tid %d -> table %p (%s)", - table_id, e->table, + DBUG_PRINT("info", ("tid %lu -> table 0x%lx (%s)", + table_id, (long) e->table, MAYBE_TABLE_NAME(e->table))); DBUG_RETURN(e->table); } - DBUG_PRINT("info", ("tid %d is not mapped!", table_id)); + DBUG_PRINT("info", ("tid %lu is not mapped!", table_id)); DBUG_RETURN(NULL); } @@ -93,9 +93,9 @@ int table_mapping::expand() int table_mapping::set_table(ulong table_id, TABLE* table) { DBUG_ENTER("table_mapping::set_table(ulong,TABLE*)"); - DBUG_PRINT("enter", ("table_id=%d, table=%p (%s)", + DBUG_PRINT("enter", ("table_id: %lu table: 0x%lx (%s)", table_id, - table, MAYBE_TABLE_NAME(table))); + (long) table, MAYBE_TABLE_NAME(table))); entry *e= find_entry(table_id); if (e == 0) { @@ -111,8 +111,8 @@ int table_mapping::set_table(ulong table_id, TABLE* table) e->table= table; my_hash_insert(&m_table_ids,(byte *)e); - DBUG_PRINT("info", ("tid %d -> table %p (%s)", - table_id, e->table, + DBUG_PRINT("info", ("tid %lu -> table 0x%lx (%s)", + table_id, (long) e->table, MAYBE_TABLE_NAME(e->table))); DBUG_RETURN(0); // All OK } diff --git a/sql/set_var.cc b/sql/set_var.cc index 5590e71c810..dc78eb2f509 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -3943,7 +3943,7 @@ sys_var_event_scheduler::update(THD *thd, set_var *var) DBUG_RETURN(TRUE); } - DBUG_PRINT("new_value", ("%lu", (bool)var->save_result.ulong_value)); + DBUG_PRINT("info", ("new_value: %d", (int) var->save_result.ulong_value)); Item_result var_type= var->value->result_type(); diff --git a/sql/slave.cc b/sql/slave.cc index d06b405b06b..4c5f0fc4764 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1609,7 +1609,7 @@ static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings) DBUG_RETURN(packet_error); } - DBUG_PRINT("info",( "len=%u, net->read_pos[4] = %d\n", + DBUG_PRINT("exit", ("len: %lu net->read_pos[4]: %d", len, mysql->net.read_pos[4])); DBUG_RETURN(len - 1); } @@ -1800,7 +1800,7 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) ev->when = time(NULL); ev->thd = thd; // because up to this point, ev->thd == 0 exec_res = ev->exec_event(rli); - DBUG_PRINT("info", ("exec_event result = %d", exec_res)); + DBUG_PRINT("info", ("exec_event result: %d", exec_res)); DBUG_ASSERT(rli->sql_thd==thd); /* Format_description_log_event should not be deleted because it will be @@ -1951,9 +1951,9 @@ pthread_handler_t handle_slave_io(void *arg) // we can get killed during safe_connect if (!safe_connect(thd, mysql, mi)) { - sql_print_information("Slave I/O thread: connected to master '%s@%s:%d',\ - replication started in log '%s' at position %s", mi->user, - mi->host, mi->port, + sql_print_information("Slave I/O thread: connected to master '%s@%s:%d'," + "replication started in log '%s' at position %s", + mi->user, mi->host, mi->port, IO_RPL_LOG_NAME, llstr(mi->master_log_pos,llbuff)); /* @@ -3107,8 +3107,8 @@ static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi, { last_errno=mysql_errno(mysql); suppress_warnings= 0; - sql_print_error("Slave I/O thread: error %s to master \ -'%s@%s:%d': \ + sql_print_error("Slave I/O thread: error %s to master " + "'%s@%s:%d': \ Error: '%s' errno: %d retry-time: %d retries: %lu", (reconnect ? "reconnecting" : "connecting"), mi->user, mi->host, mi->port, diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 47a623ec749..622d9efdde0 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -899,7 +899,7 @@ subst_spvars(THD *thd, sp_instr *instr, LEX_STRING *query_str) break; val= (*splocal)->this_item(); - DBUG_PRINT("info", ("print %p", val)); + DBUG_PRINT("info", ("print 0x%lx", (long) val)); str_value= sp_get_item_value(val, &str_value_holder); if (str_value) res|= qbuf.append(*str_value); diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 28bc1e9dcbf..f1a685778f9 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -1087,7 +1087,7 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived) if (!lock_in_use) VOID(pthread_mutex_lock(&LOCK_open)); - DBUG_PRINT("info", ("thd->open_tables: %p", thd->open_tables)); + DBUG_PRINT("info", ("thd->open_tables: 0x%lx", (long) thd->open_tables)); found_old_table= 0; while (thd->open_tables) @@ -1177,6 +1177,16 @@ static inline uint tmpkeyval(THD *thd, TABLE *table) void close_temporary_tables(THD *thd) { TABLE *table; + TABLE *next; + /* + TODO: 5.1 maintains prev link in temporary_tables + double-linked list so we could fix it. But it is not necessary + at this time when the list is being destroyed + */ + TABLE *prev_table; + /* Assume thd->options has OPTION_QUOTE_SHOW_CREATE */ + bool was_quote_show= TRUE; + if (!thd->temporary_tables) return; @@ -1192,12 +1202,7 @@ void close_temporary_tables(THD *thd) return; } - TABLE *next, - *prev_table /* TODO: 5.1 maintaines prev link in temporary_tables - double-linked list so we could fix it. But it is not necessary - at this time when the list is being destroyed */; - bool was_quote_show= true; /* to assume thd->options has OPTION_QUOTE_SHOW_CREATE */ - // Better add "if exists", in case a RESET MASTER has been done + /* Better add "if exists", in case a RESET MASTER has been done */ const char stub[]= "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS "; uint stub_len= sizeof(stub) - 1; char buf[256]; @@ -1303,7 +1308,7 @@ void close_temporary_tables(THD *thd) } } if (!was_quote_show) - thd->options &= ~OPTION_QUOTE_SHOW_CREATE; /* restore option */ + thd->options&= ~OPTION_QUOTE_SHOW_CREATE; /* restore option */ thd->temporary_tables=0; } @@ -2069,7 +2074,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, VOID(pthread_mutex_unlock(&LOCK_open)); DBUG_RETURN(0); // VIEW } - DBUG_PRINT("info", ("inserting table %p into the cache", table)); + DBUG_PRINT("info", ("inserting table 0x%lx into the cache", (long) table)); VOID(my_hash_insert(&open_cache,(byte*) table)); } @@ -2399,7 +2404,7 @@ bool table_is_used(TABLE *table, bool wait_for_name_lock) { DBUG_PRINT("info", ("share: 0x%lx locked_by_logger: %d " "locked_by_flush: %d locked_by_name: %d " - "db_stat: %u version: %u", + "db_stat: %u version: %lu", (ulong) search->s, search->locked_by_logger, search->locked_by_flush, search->locked_by_name, search->db_stat, diff --git a/sql/sql_binlog.cc b/sql/sql_binlog.cc index 23ca5330053..a48c0ac0b31 100644 --- a/sql/sql_binlog.cc +++ b/sql/sql_binlog.cc @@ -80,8 +80,9 @@ void mysql_client_binlog_statement(THD* thd) int bytes_decoded= base64_decode(strptr, coded_len, buf, &endptr); DBUG_PRINT("info", - ("bytes_decoded=%d; strptr=0x%lu; endptr=0x%lu ('%c':%d)", - bytes_decoded, strptr, endptr, *endptr, *endptr)); + ("bytes_decoded: %d strptr: 0x%lx endptr: 0x%lx ('%c':%d)", + bytes_decoded, (long) strptr, (long) endptr, *endptr, + *endptr)); if (bytes_decoded < 0) { @@ -145,14 +146,15 @@ void mysql_client_binlog_statement(THD* thd) bufptr += event_len; DBUG_PRINT("info",("ev->get_type_code()=%d", ev->get_type_code())); - DBUG_PRINT("info",("bufptr+EVENT_TYPE_OFFSET=0x%lx", - bufptr+EVENT_TYPE_OFFSET)); - DBUG_PRINT("info", ("bytes_decoded=%d; bufptr=0x%lx; buf[EVENT_LEN_OFFSET]=%u", - bytes_decoded, bufptr, uint4korr(bufptr+EVENT_LEN_OFFSET))); + DBUG_PRINT("info",("bufptr+EVENT_TYPE_OFFSET: 0x%lx", + (long) (bufptr+EVENT_TYPE_OFFSET))); + DBUG_PRINT("info", ("bytes_decoded: %d bufptr: 0x%lx buf[EVENT_LEN_OFFSET]: %lu", + bytes_decoded, (long) bufptr, + uint4korr(bufptr+EVENT_LEN_OFFSET))); ev->thd= thd; if (int err= ev->exec_event(thd->rli_fake)) { - DBUG_PRINT("info", ("exec_event() - error=%d", error)); + DBUG_PRINT("error", ("exec_event() returned: %d", err)); /* TODO: Maybe a better error message since the BINLOG statement now contains several events. diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 1d217cbe54c..9fc39685407 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -2981,7 +2981,7 @@ static TABLE_COUNTER_TYPE process_and_count_tables(TABLE_LIST *tables_used, DBUG_PRINT("qcache", ("table: %s db: %s type: %u", tables_used->table->s->table_name.str, tables_used->table->s->db.str, - tables_used->table->s->db_type)); + tables_used->table->s->db_type->db_type)); if (tables_used->derived) { table_count--; @@ -3037,7 +3037,7 @@ Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex, lex->safe_to_cache_query) { DBUG_PRINT("qcache", ("options: %lx %lx type: %u", - OPTION_TO_QUERY_CACHE, + (long) OPTION_TO_QUERY_CACHE, (long) lex->select_lex.options, (int) thd->variables.query_cache_type)); @@ -3057,7 +3057,7 @@ Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex, DBUG_PRINT("qcache", ("not interesting query: %d or not cacheable, options %lx %lx type: %u", (int) lex->sql_command, - OPTION_TO_QUERY_CACHE, + (long) OPTION_TO_QUERY_CACHE, (long) lex->select_lex.options, (int) thd->variables.query_cache_type)); DBUG_RETURN(0); diff --git a/sql/sql_class.cc b/sql/sql_class.cc index ac93200266d..07510c1fbb0 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -551,7 +551,7 @@ void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var, void THD::awake(THD::killed_state state_to_set) { DBUG_ENTER("THD::awake"); - DBUG_PRINT("enter", ("this=0x%lx", this)); + DBUG_PRINT("enter", ("this: 0x%lx", (long) this)); THD_CHECK_SENTRY(this); safe_mutex_assert_owner(&LOCK_delete); @@ -2623,9 +2623,9 @@ namespace { return m_memory != 0; } - byte *slot(int const s) + byte *slot(uint s) { - DBUG_ASSERT(0 <= s && s < sizeof(m_ptr)/sizeof(*m_ptr)); + DBUG_ASSERT(s < sizeof(m_ptr)/sizeof(*m_ptr)); DBUG_ASSERT(m_ptr[s] != 0); DBUG_ASSERT(m_alloc_checked == true); return m_ptr[s]; diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index 0d893a6c9be..c448be04ac5 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -367,9 +367,9 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables, strlen(tables->alias) + 1))) { table= hash_tables->table; - DBUG_PRINT("info-in-hash",("'%s'.'%s' as '%s' tab %p", + DBUG_PRINT("info-in-hash",("'%s'.'%s' as '%s' table: 0x%lx", hash_tables->db, hash_tables->table_name, - hash_tables->alias, table)); + hash_tables->alias, (long) table)); if (!table) { /* @@ -633,7 +633,8 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags, TABLE **table_ptr; bool did_lock= FALSE; DBUG_ENTER("mysql_ha_flush"); - DBUG_PRINT("enter", ("tables: %p mode_flags: 0x%02x", tables, mode_flags)); + DBUG_PRINT("enter", ("tables: 0x%lx mode_flags: 0x%02x", + (long) tables, mode_flags)); if (tables) { diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index c35ef4079d3..ffd32bea42a 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -1443,7 +1443,7 @@ bool st_select_lex::add_order_to_list(THD *thd, Item *item, bool asc) bool st_select_lex::add_item_to_list(THD *thd, Item *item) { DBUG_ENTER("st_select_lex::add_item_to_list"); - DBUG_PRINT("info", ("Item: %p", item)); + DBUG_PRINT("info", ("Item: 0x%lx", (long) item)); DBUG_RETURN(item_list.push_back(item)); } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 8ccf7116d95..2c130a45f77 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1604,7 +1604,7 @@ bool do_command(THD *thd) command= COM_END; // Wrong command DBUG_PRINT("info",("Command on %s = %d (%s)", vio_description(net->vio), command, - command_name[command])); + command_name[command].str)); } net->read_timeout=old_timeout; // restore it /* @@ -1828,7 +1828,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, char *packet_end= thd->query + thd->query_length; /* 'b' stands for 'buffer' parameter', special for 'my_snprintf' */ const char *format= "%.*b"; - general_log.write(thd, command, format, thd->query_length, thd->query); + general_log_print(thd, command, format, thd->query_length, thd->query); DBUG_PRINT("query",("%-.4096s",thd->query)); if (!(specialflag & SPECIAL_NO_PRIOR)) diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 8df527fd25b..266a5bad34d 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -4480,7 +4480,7 @@ that are reorganised. { if (!alt_part_info->use_default_partitions) { - DBUG_PRINT("info", ("part_info= %x", tab_part_info)); + DBUG_PRINT("info", ("part_info: 0x%lx", (long) tab_part_info)); tab_part_info->use_default_partitions= FALSE; } tab_part_info->use_default_no_partitions= FALSE; diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index c505f8c0fbc..0c6a5fe5846 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -1918,7 +1918,7 @@ void mysql_stmt_prepare(THD *thd, const char *packet, uint packet_length) else { const char *format= "[%lu] %.*b"; - general_log.write(thd, COM_STMT_PREPARE, format, stmt->id, + general_log_print(thd, COM_STMT_PREPARE, format, stmt->id, stmt->query_length, stmt->query); } @@ -2265,7 +2265,7 @@ void mysql_stmt_execute(THD *thd, char *packet_arg, uint packet_length) DBUG_VOID_RETURN; DBUG_PRINT("exec_query", ("%s", stmt->query)); - DBUG_PRINT("info",("stmt: %p", stmt)); + DBUG_PRINT("info",("stmt: 0x%lx", (long) stmt)); sp_cache_flush_obsolete(&thd->sp_proc_cache); sp_cache_flush_obsolete(&thd->sp_func_cache); @@ -2305,9 +2305,9 @@ void mysql_stmt_execute(THD *thd, char *packet_arg, uint packet_length) if (error == 0) { const char *format= "[%lu] %.*b"; - general_log.write(thd, COM_STMT_EXECUTE, format, stmt->id, + general_log_print(thd, COM_STMT_EXECUTE, format, stmt->id, thd->query_length, thd->query); - + } DBUG_VOID_RETURN; set_params_data_err: @@ -2360,7 +2360,7 @@ void mysql_sql_stmt_execute(THD *thd) DBUG_VOID_RETURN; } - DBUG_PRINT("info",("stmt: %p", stmt)); + DBUG_PRINT("info",("stmt: 0x%lx", (long) stmt)); /* If the free_list is not empty, we'll wrongly free some externally @@ -2724,7 +2724,8 @@ void Prepared_statement::setup_set_params() Prepared_statement::~Prepared_statement() { DBUG_ENTER("Prepared_statement::~Prepared_statement"); - DBUG_PRINT("enter",("stmt: %p cursor: %p", this, cursor)); + DBUG_PRINT("enter",("stmt: 0x%lx cursor: 0x%lx", + (long) this, (long) cursor)); delete cursor; /* We have to call free on the items even if cleanup is called as some items, @@ -2745,7 +2746,7 @@ Query_arena::Type Prepared_statement::type() const void Prepared_statement::cleanup_stmt() { DBUG_ENTER("Prepared_statement::cleanup_stmt"); - DBUG_PRINT("enter",("stmt: %p", this)); + DBUG_PRINT("enter",("stmt: 0x%lx", (long) this)); /* The order is important */ lex->unit.cleanup(); diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 511d9fa6677..a0149b1a34d 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -3743,7 +3743,7 @@ static void wait_while_table_is_used(THD *thd,TABLE *table, enum ha_extra_function function) { DBUG_ENTER("wait_while_table_is_used"); - DBUG_PRINT("enter", ("table: '%s' share: 0x%lx db_stat: %u version: %u", + DBUG_PRINT("enter", ("table: '%s' share: 0x%lx db_stat: %u version: %lu", table->s->table_name.str, (ulong) table->s, table->db_stat, table->s->version)); diff --git a/sql/sql_test.cc b/sql/sql_test.cc index c4c40ea63c8..219ca8260ed 100644 --- a/sql/sql_test.cc +++ b/sql/sql_test.cc @@ -248,14 +248,15 @@ print_plan(JOIN* join, uint idx, double record_count, double read_time, if (join->best_read == DBL_MAX) { fprintf(DBUG_FILE, - "%s; idx:%u, best: DBL_MAX, atime: %g, itime: %g, count: %g\n", - info, idx, current_read_time, read_time, record_count); + "%s; idx: %u best: DBL_MAX atime: %g itime: %g count: %g\n", + info, idx, current_read_time, read_time, record_count); } else { fprintf(DBUG_FILE, - "%s; idx:%u, best: %g, accumulated: %g, increment: %g, count: %g\n", - info, idx, join->best_read, current_read_time, read_time, record_count); + "%s; idx :%u best: %g accumulated: %g increment: %g count: %g\n", + info, idx, join->best_read, current_read_time, read_time, + record_count); } /* Print the tables in JOIN->positions */ diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index fb56b7ae3b0..8baf84585b2 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -1612,7 +1612,7 @@ Handle_old_incorrect_sql_modes_hook::process_unknown_string(char *&unknown_key, char *end) { DBUG_ENTER("Handle_old_incorrect_sql_modes_hook::process_unknown_string"); - DBUG_PRINT("info", ("unknown key:%60s", unknown_key)); + DBUG_PRINT("info", ("unknown key: %60s", unknown_key)); if (unknown_key + INVALID_SQL_MODES_LENGTH + 1 < end && unknown_key[INVALID_SQL_MODES_LENGTH] == '=' && @@ -1654,7 +1654,7 @@ process_unknown_string(char *&unknown_key, gptr base, MEM_ROOT *mem_root, char *end) { DBUG_ENTER("Handle_old_incorrect_trigger_table_hook::process_unknown_string"); - DBUG_PRINT("info", ("unknown key:%60s", unknown_key)); + DBUG_PRINT("info", ("unknown key: %60s", unknown_key)); if (unknown_key + INVALID_TRIGGER_TABLE_LENGTH + 1 < end && unknown_key[INVALID_TRIGGER_TABLE_LENGTH] == '=' && diff --git a/sql/table.cc b/sql/table.cc index c2b76a21a8e..0ddaf99810d 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1339,7 +1339,7 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias, Field **field_ptr; DBUG_ENTER("open_table_from_share"); DBUG_PRINT("enter",("name: '%s.%s' form: 0x%lx", share->db.str, - share->table_name.str, outparam)); + share->table_name.str, (long) outparam)); error= 1; bzero((char*) outparam, sizeof(*outparam)); @@ -2401,8 +2401,8 @@ table_check_intact(TABLE *table, const uint table_f_count, my_bool error= FALSE; my_bool fields_diff_count; DBUG_ENTER("table_check_intact"); - DBUG_PRINT("info",("table=%s expected_count=%d",table->alias, table_f_count)); - DBUG_PRINT("info",("last_create_time=%d", *last_create_time)); + DBUG_PRINT("info",("table: %s expected_count: %d last_create_time: %ld", + table->alias, table_f_count, *last_create_time)); if ((fields_diff_count= (table->s->fields != table_f_count)) || (*last_create_time != table->file->stats.create_time)) diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index 13ff2f21428..053fb2cde78 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -2157,7 +2157,7 @@ error: int ha_federated::index_init(uint keynr, bool sorted) { DBUG_ENTER("ha_federated::index_init"); - DBUG_PRINT("info", ("table: '%s' key: %u", table->s->table_name, keynr)); + DBUG_PRINT("info", ("table: '%s' key: %u", table->s->table_name.str, keynr)); active_index= keynr; DBUG_RETURN(0); } diff --git a/storage/myisam/mi_rsamepos.c b/storage/myisam/mi_rsamepos.c index c4bd5fa16fa..d2dba64b0fd 100644 --- a/storage/myisam/mi_rsamepos.c +++ b/storage/myisam/mi_rsamepos.c @@ -33,7 +33,8 @@ int mi_rsame_with_pos(MI_INFO *info, byte *record, int inx, my_off_t filepos) DBUG_ENTER("mi_rsame_with_pos"); DBUG_PRINT("enter",("index: %d filepos: %ld", inx, (long) filepos)); - if (inx < -1 || inx >= 0 && ! mi_is_key_active(info->s->state.key_map, inx)) + if (inx < -1 || + (inx >= 0 && ! mi_is_key_active(info->s->state.key_map, inx))) { DBUG_RETURN(my_errno=HA_ERR_WRONG_INDEX); } diff --git a/storage/ndb/include/ndb_global.h.in b/storage/ndb/include/ndb_global.h.in index 0befd0ab026..24e75f964a0 100644 --- a/storage/ndb/include/ndb_global.h.in +++ b/storage/ndb/include/ndb_global.h.in @@ -137,6 +137,7 @@ extern "C" { #define LINT_SET_PTR = {0,0} #else #define LINT_SET_PTR +#endif #ifndef MIN #define MIN(x,y) (((x)<(y))?(x):(y)) diff --git a/storage/ndb/include/util/NdbOut.hpp b/storage/ndb/include/util/NdbOut.hpp index d85d5cc6305..911777be07d 100644 --- a/storage/ndb/include/util/NdbOut.hpp +++ b/storage/ndb/include/util/NdbOut.hpp @@ -106,7 +106,7 @@ inline NdbOut& dec(NdbOut& _NdbOut) { return _NdbOut.setHexFormat(0); } extern "C" -void ndbout_c(const char * fmt, ...); +void ndbout_c(const char * fmt, ...) ATTRIBUTE_FORMAT(printf, 1, 2); class FilteredNdbOut : public NdbOut { public: diff --git a/storage/ndb/include/util/SimpleProperties.hpp b/storage/ndb/include/util/SimpleProperties.hpp index 60aeca1ed50..f199790f416 100644 --- a/storage/ndb/include/util/SimpleProperties.hpp +++ b/storage/ndb/include/util/SimpleProperties.hpp @@ -153,7 +153,6 @@ public: ValueType m_type; protected: Reader(); - virtual ~Reader() {} virtual void reset() = 0; virtual bool step(Uint32 len) = 0; @@ -168,7 +167,6 @@ public: class Writer { public: Writer() {} - virtual ~Writer() {} bool first(); bool add(Uint16 key, Uint32 value); @@ -192,7 +190,6 @@ public: SimplePropertiesLinearReader(const Uint32 * src, Uint32 len); virtual ~SimplePropertiesLinearReader() {} - virtual ~SimplePropertiesLinearReader() {} virtual void reset(); virtual bool step(Uint32 len); virtual bool getWord(Uint32 * dst); @@ -230,7 +227,6 @@ public: UtilBufferWriter(class UtilBuffer & buf); virtual ~UtilBufferWriter() {} - virtual ~UtilBufferWriter() {} virtual bool reset(); virtual bool putWord(Uint32 val); virtual bool putWords(const Uint32 * src, Uint32 len); @@ -284,7 +280,6 @@ public: SimplePropertiesSectionWriter(class SectionSegmentPool &); virtual ~SimplePropertiesSectionWriter() {} - virtual ~SimplePropertiesSectionWriter() {} virtual bool reset(); virtual bool putWord(Uint32 val); virtual bool putWords(const Uint32 * src, Uint32 len); diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.cpp b/storage/ndb/src/kernel/blocks/backup/Backup.cpp index 8081db7e18c..819255a79f5 100644 --- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp @@ -2118,7 +2118,7 @@ Backup::execDROP_TRIG_REF(Signal* signal) BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); - if(ref->getConf()->getTriggerId() != -1) + if(ref->getConf()->getTriggerId() != ~(Uint32) 0) { ndbout << "ERROR DROPPING TRIGGER: " << ref->getConf()->getTriggerId(); ndbout << " Err: " << (Uint32)ref->getErrorCode() << endl << endl; diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index b36eaab4d46..957248bcf56 100644 --- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -1287,7 +1287,7 @@ Cmvmi::execTESTSIG(Signal* signal){ fprintf(stdout, "\n"); for(i = 0; iheader.m_noOfSections; i++){ - SegmentedSectionPtr ptr = {0,0,0}; + SegmentedSectionPtr ptr; ndbout_c("-- Section %d --", i); signal->getSection(ptr, i); ndbrequire(ptr.p != 0); @@ -1345,7 +1345,7 @@ Cmvmi::execTESTSIG(Signal* signal){ LinearSectionPtr ptr[3]; const Uint32 secs = signal->getNoOfSections(); for(i = 0; igetSection(sptr, i); ptr[i].sz = sptr.sz; ptr[i].p = new Uint32[sptr.sz]; @@ -1394,7 +1394,7 @@ Cmvmi::execTESTSIG(Signal* signal){ LinearSectionPtr ptr[3]; const Uint32 secs = signal->getNoOfSections(); for(i = 0; igetSection(sptr, i); ptr[i].sz = sptr.sz; ptr[i].p = new Uint32[sptr.sz]; @@ -1460,7 +1460,7 @@ Cmvmi::execTESTSIG(Signal* signal){ const Uint32 secs = signal->getNoOfSections(); memset(g_test, 0, sizeof(g_test)); for(i = 0; igetSection(sptr, i); g_test[i].sz = sptr.sz; g_test[i].p = new Uint32[sptr.sz]; diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp index d5578a5c0c0..8a994db4fbc 100644 --- a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp @@ -971,10 +971,10 @@ void Dbacc::initOpRec(Signal* signal) Uint32 opbits = 0; opbits |= Treqinfo & 0x7; - opbits |= ((Treqinfo >> 4) & 0x3) ? Operationrec::OP_LOCK_MODE : 0; - opbits |= ((Treqinfo >> 4) & 0x3) ? Operationrec::OP_ACC_LOCK_MODE : 0; - opbits |= (dirtyReadFlag) ? Operationrec::OP_DIRTY_READ : 0; - opbits |= ((Treqinfo >> 31) & 0x1) ? Operationrec::OP_LOCK_REQ : 0; + opbits |= ((Treqinfo >> 4) & 0x3) ? (Uint32) Operationrec::OP_LOCK_MODE : 0; + opbits |= ((Treqinfo >> 4) & 0x3) ? (Uint32) Operationrec::OP_ACC_LOCK_MODE : 0; + opbits |= (dirtyReadFlag) ? (Uint32) Operationrec::OP_DIRTY_READ : 0; + opbits |= ((Treqinfo >> 31) & 0x1) ? (Uint32) Operationrec::OP_LOCK_REQ : 0; //operationRecPtr.p->nodeType = (Treqinfo >> 7) & 0x3; operationRecPtr.p->fid = fragrecptr.p->myfid; @@ -6947,10 +6947,10 @@ void Dbacc::initScanOpRec(Signal* signal) Uint32 opbits = 0; opbits |= ZSCAN_OP; - opbits |= scanPtr.p->scanLockMode ? Operationrec::OP_LOCK_MODE : 0; - opbits |= scanPtr.p->scanLockMode ? Operationrec::OP_ACC_LOCK_MODE : 0; - opbits |= scanPtr.p->scanReadCommittedFlag ? - Operationrec::OP_EXECUTED_DIRTY_READ : 0; + opbits |= scanPtr.p->scanLockMode ? (Uint32) Operationrec::OP_LOCK_MODE : 0; + opbits |= scanPtr.p->scanLockMode ? (Uint32) Operationrec::OP_ACC_LOCK_MODE : 0; + opbits |= (scanPtr.p->scanReadCommittedFlag ? + (Uint32) Operationrec::OP_EXECUTED_DIRTY_READ : 0); opbits |= Operationrec::OP_COMMIT_DELETE_CHECK; operationRecPtr.p->userptr = RNIL; operationRecPtr.p->scanRecPtr = scanPtr.i; @@ -7700,6 +7700,7 @@ void Dbacc::putOverflowRecInFrag(Signal* signal) OverflowRecordPtr tpifPrevOverrecPtr; tpifNextOverrecPtr.i = fragrecptr.p->firstOverflowRec; + LINT_INIT(tpifPrevOverrecPtr.p); tpifPrevOverrecPtr.i = RNIL; while (tpifNextOverrecPtr.i != RNIL) { ptrCheckGuard(tpifNextOverrecPtr, coverflowrecsize, overflowRecord); @@ -7749,6 +7750,7 @@ void Dbacc::putRecInFreeOverdir(Signal* signal) OverflowRecordPtr tpfoPrevOverrecPtr; tpfoNextOverrecPtr.i = fragrecptr.p->firstFreeDirindexRec; + LINT_INIT(tpfoPrevOverrecPtr.p); tpfoPrevOverrecPtr.i = RNIL; while (tpfoNextOverrecPtr.i != RNIL) { ptrCheckGuard(tpfoNextOverrecPtr, coverflowrecsize, overflowRecord); diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index b91131ca32d..1c305d74863 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -189,7 +189,7 @@ struct { &Dbdict::drop_undofile_prepare_start, 0, 0, 0, 0, - 0, 0 + 0, 0, 0 } }; diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 0e9157c38aa..1eee1badce3 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -2909,7 +2909,7 @@ Dbdih::nr_start_fragment(Signal* signal, } } - if (maxLcpIndex == ~0) + if (maxLcpIndex == ~ (Uint32) 0) { ndbout_c("Didnt find any LCP for node: %d tab: %d frag: %d", takeOverPtr.p->toStartingNode, @@ -5968,6 +5968,7 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){ break; default: ndbrequire(false); + lcpState= MasterLCPConf::LCP_STATUS_IDLE; // remove warning }//switch Uint32 failedNodeId = c_lcpState.m_MASTER_LCPREQ_FailedNodeId; @@ -6892,6 +6893,8 @@ void Dbdih::execDIADDTABREQ(Signal* signal) Uint32 align; }; SegmentedSectionPtr fragDataPtr; + LINT_INIT(fragDataPtr.i); + LINT_INIT(fragDataPtr.sz); signal->getSection(fragDataPtr, DiAddTabReq::FRAGMENTATION); copy((Uint32*)fragments, fragDataPtr); releaseSections(signal); @@ -6981,7 +6984,9 @@ Dbdih::sendAddFragreq(Signal* signal, ConnectRecordPtr connectPtr, TabRecordPtr tabPtr, Uint32 fragId){ jam(); const Uint32 fragCount = tabPtr.p->totalfragments; - ReplicaRecordPtr replicaPtr; replicaPtr.i = RNIL; + ReplicaRecordPtr replicaPtr; + LINT_INIT(replicaPtr.p); + replicaPtr.i = RNIL; FragmentstorePtr fragPtr; for(; fragIdm_connectionData = RNIL; else + { + jam(); + ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord); ref->m_connectionData = connectPtr.p->userpointer; + } ref->m_tableRef = tabPtr.i; ref->m_senderData = senderData; ref->m_error = DihFragCountRef::ErroneousTableState; @@ -11443,6 +11452,7 @@ Dbdih::findBestLogNode(CreateReplicaRecord* createReplica, { ConstPtr fblFoundReplicaPtr; ConstPtr fblReplicaPtr; + LINT_INIT(fblFoundReplicaPtr.p); /* --------------------------------------------------------------------- */ /* WE START WITH ZERO AS FOUND TO ENSURE THAT FIRST HIT WILL BE */ diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 9a7803efbec..53d7d98ae84 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -3417,9 +3417,9 @@ void Dblqh::execLQHKEYREQ(Signal* signal) } else { - regTcPtr->operation = op == ZREAD_EX ? ZREAD : op; + regTcPtr->operation = (Operation_t) op == ZREAD_EX ? ZREAD : (Operation_t) op; regTcPtr->lockType = - op == ZREAD_EX ? ZUPDATE : op == ZWRITE ? ZINSERT : op; + op == ZREAD_EX ? ZUPDATE : (Operation_t) op == ZWRITE ? ZINSERT : (Operation_t) op; } CRASH_INSERTION2(5041, regTcPtr->simpleRead && @@ -18520,7 +18520,7 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal) do { ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - ndbout_c(" file %d(%d) FileChangeState: %d logFileStatus: %d currentMbyte: %d currentFilepage", + ndbout_c(" file %d(%d) FileChangeState: %d logFileStatus: %d currentMbyte: %d currentFilepage %d", logFilePtr.p->fileNo, logFilePtr.i, logFilePtr.p->fileChangeState, diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index 3fdd587afa5..af2925fa738 100644 --- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -3194,7 +3194,7 @@ void Dbtc::sendlqhkeyreq(Signal* signal, if (unlikely(version < NDBD_ROWID_VERSION)) { Uint32 op = regTcPtr->operation; - Uint32 lock = op == ZREAD_EX ? ZUPDATE : op == ZWRITE ? ZINSERT : op; + Uint32 lock = (Operation_t) op == ZREAD_EX ? ZUPDATE : (Operation_t) op == ZWRITE ? ZINSERT : (Operation_t) op; LqhKeyReq::setLockType(Tdata10, lock); } /* ---------------------------------------------------------------------- */ diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp index fc3419e694a..23edd212991 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp @@ -43,7 +43,7 @@ void Dbtup::execTUP_DEALLOCREQ(Signal* signal) getFragmentrec(regFragPtr, frag_id, regTabPtr.p); ndbassert(regFragPtr.p != NULL); - if (! (((frag_page_id << MAX_TUPLES_BITS) + page_index) == ~0)) + if (! (((frag_page_id << MAX_TUPLES_BITS) + page_index) == ~ (Uint32) 0)) { Local_key tmp; tmp.m_page_no= getRealpid(regFragPtr.p, frag_page_id); diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp index a055b18888b..7959606b7f4 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp @@ -82,7 +82,7 @@ Dbtup::dump_disk_alloc(Dbtup::Disk_alloc_info & alloc) { ndbout << ptr << " "; } - ndbout_c(""); + ndbout_c(" "); } ndbout_c("page requests"); for(Uint32 i = 0; im_tail_pos[0].m_ptr_i, ptr.p->m_tail_pos[0].m_idx, ptr.p->m_tail_pos[1].m_ptr_i, ptr.p->m_tail_pos[1].m_idx, ptr.p->m_tail_pos[2].m_ptr_i, ptr.p->m_tail_pos[2].m_idx, - (ptr.p->m_free_file_words / File_formats::UNDO_PAGE_WORDS)); + (long) (ptr.p->m_free_file_words / File_formats::UNDO_PAGE_WORDS)); } m_logfile_group_list.next(ptr); } diff --git a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp index 362a462b081..b20f810d029 100644 --- a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp +++ b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp @@ -655,7 +655,7 @@ Ndbfs::createAsyncFile(){ // Print info about all open files for (unsigned i = 0; i < theFiles.size(); i++){ AsyncFile* file = theFiles[i]; - ndbout_c("%2d (0x%x): %s", i, file, file->isOpen()?"OPEN":"CLOSED"); + ndbout_c("%2d (0x%lx): %s", i, (long) file, file->isOpen()?"OPEN":"CLOSED"); } ERROR_SET(fatal, NDBD_EXIT_AFS_MAXOPEN,""," Ndbfs::createAsyncFile"); } @@ -1130,7 +1130,7 @@ Ndbfs::execDUMP_STATE_ORD(Signal* signal) ndbout << "All files: " << endl; for (unsigned i = 0; i < theFiles.size(); i++){ AsyncFile* file = theFiles[i]; - ndbout_c("%2d (0x%x): %s", i,file, file->isOpen()?"OPEN":"CLOSED"); + ndbout_c("%2d (0x%lx): %s", i, (long) file, file->isOpen()?"OPEN":"CLOSED"); } } }//Ndbfs::execDUMP_STATE_ORD() diff --git a/storage/ndb/src/kernel/blocks/pgman.cpp b/storage/ndb/src/kernel/blocks/pgman.cpp index 15f056f70a9..88ea0122268 100644 --- a/storage/ndb/src/kernel/blocks/pgman.cpp +++ b/storage/ndb/src/kernel/blocks/pgman.cpp @@ -1188,7 +1188,7 @@ Pgman::process_lcp(Signal* signal) pl_hash.next(m_lcp_curr_bucket, iter); Uint32 loop = 0; while (iter.curr.i != RNIL && - m_lcp_outstanding < max_count && + m_lcp_outstanding < (Uint32) max_count && (loop ++ < 32 || iter.bucket == m_lcp_curr_bucket)) { Ptr& ptr = iter.curr; @@ -2324,7 +2324,7 @@ Pgman::execDUMP_STATE_ORD(Signal* signal) if (signal->theData[0] == 11004) { - ndbout << "Dump LCP bucket m_lcp_outstanding: %d", m_lcp_outstanding; + ndbout << "Dump LCP bucket m_lcp_outstanding: " << m_lcp_outstanding; if (m_lcp_curr_bucket != ~(Uint32)0) { Page_hashlist::Iterator iter; diff --git a/storage/ndb/src/kernel/blocks/restore.cpp b/storage/ndb/src/kernel/blocks/restore.cpp index b80bc88ec5b..0436347eeca 100644 --- a/storage/ndb/src/kernel/blocks/restore.cpp +++ b/storage/ndb/src/kernel/blocks/restore.cpp @@ -1137,7 +1137,7 @@ Restore::reorder_key(const KeyDescriptor* desc, } dst += sz; } - ndbassert((dst - Tmp) == len); + ndbassert((Uint32) (dst - Tmp) == len); memcpy(data, Tmp, 4*len); } diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp index 4b38ac0f5ff..92efca36a35 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp @@ -1590,6 +1590,9 @@ Suma::execGET_TABINFOREF(Signal* signal){ break; case GetTabInfoRef::TableNameTooLong: ndbrequire(false); + break; + case GetTabInfoRef::NoFetchByName: + break; } if (do_resend_request) { @@ -4306,7 +4309,7 @@ Suma::Restart::sendSubStartReq(SubscriptionPtr subPtr, SubscriberPtr subbPtr, // restarting suma will not respond to this until startphase 5 // since it is not until then data copying has been completed - DBUG_PRINT("info",("Restarting subscriber: %u on key: [%u,%u]", + DBUG_PRINT("info",("Restarting subscriber: %u on key: [%u,%u] %u", subbPtr.i, subPtr.p->m_subscriptionId, subPtr.p->m_subscriptionKey, diff --git a/storage/ndb/src/kernel/vm/Configuration.cpp b/storage/ndb/src/kernel/vm/Configuration.cpp index 81b87c818fb..48868309d25 100644 --- a/storage/ndb/src/kernel/vm/Configuration.cpp +++ b/storage/ndb/src/kernel/vm/Configuration.cpp @@ -191,7 +191,7 @@ Configuration::init(int argc, char** argv) } if (! (val > 0 && val < MAX_NDB_NODES)) { - ndbout_c("Invalid nodeid specified in nowait-nodes: %d : %s", + ndbout_c("Invalid nodeid specified in nowait-nodes: %ld : %s", val, _nowait_nodes); exit(-1); } diff --git a/storage/ndb/src/kernel/vm/DLHashTable.hpp b/storage/ndb/src/kernel/vm/DLHashTable.hpp index 4f580f937b7..7469dda7917 100644 --- a/storage/ndb/src/kernel/vm/DLHashTable.hpp +++ b/storage/ndb/src/kernel/vm/DLHashTable.hpp @@ -287,6 +287,7 @@ DLHashTableImpl::remove(Ptr & ptr, const T & key) Uint32 i; T * p; Ptr prev; + LINT_INIT(prev.p); prev.i = RNIL; i = hashValues[hv]; diff --git a/storage/ndb/src/kernel/vm/RWPool.hpp b/storage/ndb/src/kernel/vm/RWPool.hpp index c1f4abeed79..a4ad12b52cf 100644 --- a/storage/ndb/src/kernel/vm/RWPool.hpp +++ b/storage/ndb/src/kernel/vm/RWPool.hpp @@ -70,6 +70,7 @@ RWPool::getPtr(Uint32 i) return record; } handle_invalid_get_ptr(i); + return 0; /* purify: deadcode */ } #endif diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp index 4e01038d343..1d6676287e8 100644 --- a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp +++ b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp @@ -1930,6 +1930,7 @@ SimulatedBlock::xfrm_attr(Uint32 attrDesc, CHARSET_INFO* cs, { jam(); Uint32 len; + LINT_INIT(len); switch(array){ case NDB_ARRAYTYPE_SHORT_VAR: len = 1 + srcPtr[0]; diff --git a/storage/ndb/src/kernel/vm/WOPool.hpp b/storage/ndb/src/kernel/vm/WOPool.hpp index 6b42218368c..ed0d09d2f04 100644 --- a/storage/ndb/src/kernel/vm/WOPool.hpp +++ b/storage/ndb/src/kernel/vm/WOPool.hpp @@ -115,6 +115,7 @@ WOPool::getPtr(Uint32 i) return record; } handle_invalid_get_ptr(i); + return 0; /* purify: deadcode */ } #endif diff --git a/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp b/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp index 7b8795f7ecb..4de8f8ee479 100644 --- a/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp +++ b/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp @@ -223,6 +223,10 @@ Ndbd_mem_manager::init(bool alloc_less_memory) InitChunk chunk; Uint32 remaining = pages - allocated; +#if defined(_lint) || defined(FORCE_INIT_OF_VARS) + memset((char*) &chunk, 0 , sizeof(chunk)); +#endif + if (do_malloc(pages - allocated, &chunk)) { Uint32 i = 0; diff --git a/storage/ndb/src/mgmclient/CommandInterpreter.cpp b/storage/ndb/src/mgmclient/CommandInterpreter.cpp index 999ff6a4cf2..debf5343a90 100644 --- a/storage/ndb/src/mgmclient/CommandInterpreter.cpp +++ b/storage/ndb/src/mgmclient/CommandInterpreter.cpp @@ -1558,6 +1558,8 @@ CommandInterpreter::executeShow(char* parameters) case NDB_MGM_NODE_TYPE_UNKNOWN: ndbout << "Error: Unknown Node Type" << endl; return -1; + case NDB_MGM_NODE_TYPE_MAX: + break; /* purify: deadcode */ } } diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp index 2268414ae21..58369141ba3 100644 --- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -2495,7 +2495,7 @@ MgmtSrvr::startBackup(Uint32& backupId, int waitCompleted) const BackupCompleteRep * const rep = CAST_CONSTPTR(BackupCompleteRep, signal->getDataPtr()); #ifdef VM_TRACE - ndbout_c("Backup(%d) completed %d", rep->backupId); + ndbout_c("Backup(%d) completed", rep->backupId); #endif event.Event = BackupEvent::BackupCompleted; event.Completed.BackupId = rep->backupId; @@ -2751,7 +2751,7 @@ MgmtSrvr::setDbParameter(int node, int param, const char * value, break; case 1: res = i2.set(param, val_64); - ndbout_c("Updating node %d param: %d to %Ld", node, param, val_32); + ndbout_c("Updating node %d param: %d to %u", node, param, val_32); break; case 2: res = i2.set(param, val_char); diff --git a/storage/ndb/src/ndbapi/DictCache.cpp b/storage/ndb/src/ndbapi/DictCache.cpp index bdeea3674cd..aa42c1a1bab 100644 --- a/storage/ndb/src/ndbapi/DictCache.cpp +++ b/storage/ndb/src/ndbapi/DictCache.cpp @@ -417,7 +417,7 @@ GlobalDictCache::alter_table_rep(const char * name, { TableVersion & ver = (* vers)[i]; if(ver.m_version == tableVersion && ver.m_impl && - ver.m_impl->m_id == tableId) + (Uint32) ver.m_impl->m_id == tableId) { ver.m_status = DROPPED; ver.m_impl->m_status = altered ? diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 42ef7bbbaee..dca1432d18a 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -3583,7 +3583,7 @@ NdbDictInterface::createEvent(class Ndb & ndb, evnt.mi_type = evntConf->getEventType(); evnt.setTable(dataPtr); } else { - if (evnt.m_tableImpl->m_id != evntConf->getTableId() || + if ((Uint32) evnt.m_tableImpl->m_id != evntConf->getTableId() || evnt.m_tableImpl->m_version != evntConf->getTableVersion() || //evnt.m_attrListBitmask != evntConf->getAttrListBitmask() || evnt.mi_type != evntConf->getEventType()) { @@ -3701,7 +3701,7 @@ NdbDictionaryImpl::getEvent(const char * eventName, NdbTableImpl* tab) DBUG_RETURN(NULL); } if ((tab->m_status != NdbDictionary::Object::Retrieved) || - (tab->m_id != ev->m_table_id) || + ((Uint32) tab->m_id != ev->m_table_id) || (table_version_major(tab->m_version) != table_version_major(ev->m_table_version))) { @@ -3731,7 +3731,7 @@ NdbDictionaryImpl::getEvent(const char * eventName, NdbTableImpl* tab) DBUG_PRINT("info",("Table: id: %d version: %d", table.m_id, table.m_version)); - if (table.m_id != ev->m_table_id || + if ((Uint32) table.m_id != ev->m_table_id || table_version_major(table.m_version) != table_version_major(ev->m_table_version)) { @@ -3747,7 +3747,7 @@ NdbDictionaryImpl::getEvent(const char * eventName, NdbTableImpl* tab) #endif - if ( attributeList_sz > table.getNoOfColumns() ) + if ( attributeList_sz > (uint) table.getNoOfColumns() ) { m_error.code = 241; DBUG_PRINT("error",("Invalid version, too many columns")); @@ -3757,7 +3757,7 @@ NdbDictionaryImpl::getEvent(const char * eventName, NdbTableImpl* tab) assert( (int)attributeList_sz <= table.getNoOfColumns() ); for(unsigned id= 0; ev->m_columns.size() < attributeList_sz; id++) { - if ( id >= table.getNoOfColumns()) + if ( id >= (uint) table.getNoOfColumns()) { m_error.code = 241; DBUG_PRINT("error",("Invalid version, column %d out of range", id)); diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index 08b98cf7b48..0b12b9d2f0f 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -58,7 +58,7 @@ print_std(const SubTableData * sdata, LinearSectionPtr ptr[3]) SubTableData::getOperation(sdata->requestInfo)); for (int i = 0; i <= 2; i++) { printf("sec=%d addr=%p sz=%d\n", i, (void*)ptr[i].p, ptr[i].sz); - for (int j = 0; j < ptr[i].sz; j++) + for (int j = 0; (uint) j < ptr[i].sz; j++) printf("%08x ", ptr[i].p[j]); printf("\n"); } @@ -199,11 +199,11 @@ NdbEventOperationImpl::init(NdbEventImpl& evnt) m_mergeEvents = false; #endif m_ref_count = 0; - DBUG_PRINT("info", ("m_ref_count = 0 for op: %p", this)); + DBUG_PRINT("info", ("m_ref_count = 0 for op: 0x%lx", (long) this)); m_has_error= 0; - DBUG_PRINT("exit",("this: 0x%x oid: %u", this, m_oid)); + DBUG_PRINT("exit",("this: 0x%lx oid: %u", (long) this, m_oid)); DBUG_VOID_RETURN; } @@ -739,8 +739,8 @@ NdbEventOperationImpl::receive_event() NdbTableImpl *tmp_table_impl= m_eventImpl->m_tableImpl; m_eventImpl->m_tableImpl = at; - DBUG_PRINT("info", ("switching table impl 0x%x -> 0x%x", - tmp_table_impl, at)); + DBUG_PRINT("info", ("switching table impl 0x%lx -> 0x%lx", + (long) tmp_table_impl, (long) at)); // change the rec attrs to refer to the new table object int i; @@ -751,9 +751,9 @@ NdbEventOperationImpl::receive_event() { int no = p->getColumn()->getColumnNo(); NdbColumnImpl *tAttrInfo = at->getColumn(no); - DBUG_PRINT("info", ("rec_attr: 0x%x " - "switching column impl 0x%x -> 0x%x", - p, p->m_column, tAttrInfo)); + DBUG_PRINT("info", ("rec_attr: 0x%lx " + "switching column impl 0x%lx -> 0x%lx", + (long) p, (long) p->m_column, (long) tAttrInfo)); p->m_column = tAttrInfo; p = p->next(); } @@ -765,9 +765,9 @@ NdbEventOperationImpl::receive_event() { int no = p->getColumn()->getColumnNo(); NdbColumnImpl *tAttrInfo = at->getColumn(no); - DBUG_PRINT("info", ("rec_attr: 0x%x " - "switching column impl 0x%x -> 0x%x", - p, p->m_column, tAttrInfo)); + DBUG_PRINT("info", ("rec_attr: 0x%lx " + "switching column impl 0x%lx -> 0x%lx", + (long) p, (long) p->m_column, (long) tAttrInfo)); p->m_column = tAttrInfo; p = p->next(); } @@ -1269,8 +1269,9 @@ NdbEventBuffer::getGCIEventOperations(Uint32* iter, Uint32* event_types) EventBufData_list::Gci_op g = gci_ops->m_gci_op_list[(*iter)++]; if (event_types != NULL) *event_types = g.event_types; - DBUG_PRINT("info", ("gci: %d g.op: %x g.event_types: %x", - (unsigned)gci_ops->m_gci, g.op, g.event_types)); + DBUG_PRINT("info", ("gci: %u g.op: 0x%lx g.event_types: 0x%lx", + (unsigned)gci_ops->m_gci, (long) g.op, + (long) g.event_types)); DBUG_RETURN(g.op); } DBUG_RETURN(NULL); @@ -1563,8 +1564,8 @@ NdbEventBuffer::complete_outof_order_gcis() #endif m_complete_data.m_data.append_list(&bucket->m_data, start_gci); #ifdef VM_TRACE - ndbout_c(" moved %lld rows -> %lld", bucket->m_data.m_count, - m_complete_data.m_data.m_count); + ndbout_c(" moved %ld rows -> %ld", (long) bucket->m_data.m_count, + (long) m_complete_data.m_data.m_count); #else ndbout_c(""); #endif @@ -2180,7 +2181,7 @@ NdbEventBuffer::merge_data(const SubTableData * const sdata, Ev_t* tp = 0; int i; - for (i = 0; i < sizeof(ev_t)/sizeof(ev_t[0]); i++) { + for (i = 0; (uint) i < sizeof(ev_t)/sizeof(ev_t[0]); i++) { if (ev_t[i].t1 == t1 && ev_t[i].t2 == t2) { tp = &ev_t[i]; break; diff --git a/storage/ndb/src/ndbapi/NdbIndexOperation.cpp b/storage/ndb/src/ndbapi/NdbIndexOperation.cpp index 39dbab423d3..9faf66a1e98 100644 --- a/storage/ndb/src/ndbapi/NdbIndexOperation.cpp +++ b/storage/ndb/src/ndbapi/NdbIndexOperation.cpp @@ -64,6 +64,9 @@ NdbIndexOperation::indxInit(const NdbIndexImpl * anIndex, case(NdbDictionary::Index::OrderedIndex): setErrorCodeAbort(4003); return -1; + default: + DBUG_ASSERT(0); + break; } m_theIndex = anIndex; m_accessTable = anIndex->m_table; diff --git a/storage/ndb/src/ndbapi/NdbIndexStat.cpp b/storage/ndb/src/ndbapi/NdbIndexStat.cpp index e490290b6a2..4ae00348606 100644 --- a/storage/ndb/src/ndbapi/NdbIndexStat.cpp +++ b/storage/ndb/src/ndbapi/NdbIndexStat.cpp @@ -236,7 +236,7 @@ NdbIndexStat::stat_search(const Area& a, const Uint32* key, Uint32 keylen, Uint3 int NdbIndexStat::stat_oldest(const Area& a) { - Uint32 i, k, m; + Uint32 i, k= 0, m; bool found = false; m = ~(Uint32)0; // shut up incorrect CC warning for (i = 0; i < a.m_entries; i++) { diff --git a/storage/ndb/src/ndbapi/NdbOperationInt.cpp b/storage/ndb/src/ndbapi/NdbOperationInt.cpp index cd2e3911751..b7fda205450 100644 --- a/storage/ndb/src/ndbapi/NdbOperationInt.cpp +++ b/storage/ndb/src/ndbapi/NdbOperationInt.cpp @@ -1091,53 +1091,61 @@ NdbOperation::branch_col(Uint32 type, int NdbOperation::branch_col_eq(Uint32 ColId, const void * val, Uint32 len, bool nopad, Uint32 Label){ - INT_DEBUG(("branch_col_eq %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label)); + INT_DEBUG(("branch_col_eq %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len, + nopad, Label)); return branch_col(Interpreter::EQ, ColId, val, len, nopad, Label); } int NdbOperation::branch_col_ne(Uint32 ColId, const void * val, Uint32 len, bool nopad, Uint32 Label){ - INT_DEBUG(("branch_col_ne %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label)); + INT_DEBUG(("branch_col_ne %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len, + nopad, Label)); return branch_col(Interpreter::NE, ColId, val, len, nopad, Label); } int NdbOperation::branch_col_lt(Uint32 ColId, const void * val, Uint32 len, bool nopad, Uint32 Label){ - INT_DEBUG(("branch_col_lt %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label)); + INT_DEBUG(("branch_col_lt %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len, + nopad, Label)); return branch_col(Interpreter::LT, ColId, val, len, nopad, Label); } int NdbOperation::branch_col_le(Uint32 ColId, const void * val, Uint32 len, bool nopad, Uint32 Label){ - INT_DEBUG(("branch_col_le %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label)); + INT_DEBUG(("branch_col_le %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len, + nopad, Label)); return branch_col(Interpreter::LE, ColId, val, len, nopad, Label); } int NdbOperation::branch_col_gt(Uint32 ColId, const void * val, Uint32 len, bool nopad, Uint32 Label){ - INT_DEBUG(("branch_col_gt %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label)); + INT_DEBUG(("branch_col_gt %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len, + nopad, Label)); return branch_col(Interpreter::GT, ColId, val, len, nopad, Label); } int NdbOperation::branch_col_ge(Uint32 ColId, const void * val, Uint32 len, bool nopad, Uint32 Label){ - INT_DEBUG(("branch_col_ge %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label)); + INT_DEBUG(("branch_col_ge %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len, + nopad, Label)); return branch_col(Interpreter::GE, ColId, val, len, nopad, Label); } int NdbOperation::branch_col_like(Uint32 ColId, const void * val, Uint32 len, bool nopad, Uint32 Label){ - INT_DEBUG(("branch_col_like %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label)); + INT_DEBUG(("branch_col_like %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len, + nopad, Label)); return branch_col(Interpreter::LIKE, ColId, val, len, nopad, Label); } int NdbOperation::branch_col_notlike(Uint32 ColId, const void * val, Uint32 len, bool nopad, Uint32 Label){ - INT_DEBUG(("branch_col_notlike %u %.*s(%u,%d) -> %u", ColId,len,val,len,nopad,Label)); + INT_DEBUG(("branch_col_notlike %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len, + nopad, Label)); return branch_col(Interpreter::NOT_LIKE, ColId, val, len, nopad, Label); } diff --git a/storage/ndb/src/ndbapi/NdbRecAttr.cpp b/storage/ndb/src/ndbapi/NdbRecAttr.cpp index 5931a00fcf7..edd48f50ce3 100644 --- a/storage/ndb/src/ndbapi/NdbRecAttr.cpp +++ b/storage/ndb/src/ndbapi/NdbRecAttr.cpp @@ -372,7 +372,12 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r) j = length; } break; - unknown: + + case NdbDictionary::Column::Undefined: + case NdbDictionary::Column::Mediumint: + case NdbDictionary::Column::Mediumunsigned: + case NdbDictionary::Column::Longvarbinary: + unknown: //default: /* no print functions for the rest, just print type */ out << (int) r.getType(); j = length; diff --git a/storage/ndb/src/ndbapi/NdbScanOperation.cpp b/storage/ndb/src/ndbapi/NdbScanOperation.cpp index 2d47f79ee09..64dc544b226 100644 --- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp +++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp @@ -181,7 +181,8 @@ NdbScanOperation::readTuples(NdbScanOperation::LockMode lm, } bool rangeScan = false; - if (m_accessTable->m_indexType == NdbDictionary::Index::OrderedIndex) + if ( (int) m_accessTable->m_indexType == + (int) NdbDictionary::Index::OrderedIndex) { if (m_currentTable == m_accessTable){ // Old way of scanning indexes, should not be allowed @@ -588,7 +589,7 @@ err4: theNdbCon->theTransactionIsStarted = false; theNdbCon->theReleaseOnClose = true; - if(DEBUG_NEXT_RESULT) ndbout_c("return -1", retVal); + if(DEBUG_NEXT_RESULT) ndbout_c("return %d", retVal); return -1; } diff --git a/storage/ndb/src/ndbapi/ObjectMap.hpp b/storage/ndb/src/ndbapi/ObjectMap.hpp index e3db479f677..b211e2956dd 100644 --- a/storage/ndb/src/ndbapi/ObjectMap.hpp +++ b/storage/ndb/src/ndbapi/ObjectMap.hpp @@ -84,7 +84,7 @@ NdbObjectIdMap::map(void * object){ // unlock(); - DBUG_PRINT("info",("NdbObjectIdMap::map(0x%x) %u", object, ff<<2)); + DBUG_PRINT("info",("NdbObjectIdMap::map(0x%lx) %u", (long) object, ff<<2)); return ff<<2; } @@ -102,14 +102,16 @@ NdbObjectIdMap::unmap(Uint32 id, void *object){ m_map[i].m_next = m_firstFree; m_firstFree = i; } else { - ndbout_c("Error: NdbObjectIdMap::::unmap(%u, 0x%x) obj=0x%x", id, object, obj); - DBUG_PRINT("error",("NdbObjectIdMap::unmap(%u, 0x%x) obj=0x%x", id, object, obj)); + ndbout_c("Error: NdbObjectIdMap::::unmap(%u, 0x%lx) obj=0x%lx", + id, (long) object, (long) obj); + DBUG_PRINT("error",("NdbObjectIdMap::unmap(%u, 0x%lx) obj=0x%lx", + id, (long) object, (long) obj)); return 0; } // unlock(); - DBUG_PRINT("info",("NdbObjectIdMap::unmap(%u) obj=0x%x", id, obj)); + DBUG_PRINT("info",("NdbObjectIdMap::unmap(%u) obj=0x%lx", id, (long) obj)); return obj; } diff --git a/storage/ndb/tools/desc.cpp b/storage/ndb/tools/desc.cpp index c042f745d9d..2a91d3215f5 100644 --- a/storage/ndb/tools/desc.cpp +++ b/storage/ndb/tools/desc.cpp @@ -131,7 +131,7 @@ int desc_logfilegroup(Ndb *myndb, char* name) assert(dict); NdbDictionary::LogfileGroup lfg= dict->getLogfileGroup(name); NdbError err= dict->getNdbError(); - if(err.classification!=ndberror_cl_none) + if( (int) err.classification != (int) ndberror_cl_none) return 0; ndbout << "Type: LogfileGroup" << endl; @@ -153,7 +153,7 @@ int desc_tablespace(Ndb *myndb, char* name) assert(dict); NdbDictionary::Tablespace ts= dict->getTablespace(name); NdbError err= dict->getNdbError(); - if(err.classification!=ndberror_cl_none) + if ((int) err.classification != (int) ndberror_cl_none) return 0; ndbout << "Type: Tablespace" << endl; @@ -175,11 +175,11 @@ int desc_undofile(Ndb_cluster_connection &con, Ndb *myndb, char* name) con.init_get_next_node(iter); - while(id= con.get_next_node(iter)) + while ((id= con.get_next_node(iter))) { NdbDictionary::Undofile uf= dict->getUndofile(0, name); NdbError err= dict->getNdbError(); - if(err.classification!=ndberror_cl_none) + if ((int) err.classification != (int) ndberror_cl_none) return 0; ndbout << "Type: Undofile" << endl; @@ -211,11 +211,11 @@ int desc_datafile(Ndb_cluster_connection &con, Ndb *myndb, char* name) con.init_get_next_node(iter); - while(id= con.get_next_node(iter)) + while ((id= con.get_next_node(iter))) { NdbDictionary::Datafile df= dict->getDatafile(id, name); NdbError err= dict->getNdbError(); - if(err.classification!=ndberror_cl_none) + if ((int) err.classification != (int) ndberror_cl_none) return 0; ndbout << "Type: Datafile" << endl; diff --git a/storage/ndb/tools/restore/Restore.cpp b/storage/ndb/tools/restore/Restore.cpp index 4e3d299239b..b51760266cb 100644 --- a/storage/ndb/tools/restore/Restore.cpp +++ b/storage/ndb/tools/restore/Restore.cpp @@ -300,7 +300,13 @@ RestoreMetaData::markSysTables() strcmp(tableName, "NDB$EVENTS_0") == 0 || strcmp(tableName, "sys/def/SYSTAB_0") == 0 || strcmp(tableName, "sys/def/NDB$EVENTS_0") == 0 || + /* + The following is for old MySQL versions, + before we changed the database name of the tables from + "cluster_replication" -> "cluster" -> "mysql" + */ strcmp(tableName, "cluster_replication/def/" NDB_APPLY_TABLE) == 0 || + strcmp(tableName, "cluster/def/" NDB_APPLY_TABLE) == 0 || strcmp(tableName, NDB_REP_DB "/def/" NDB_APPLY_TABLE) == 0 || strcmp(tableName, NDB_REP_DB "/def/" NDB_SCHEMA_TABLE)== 0 ) table->isSysTable = true; diff --git a/storage/ndb/tools/restore/consumer_restore.cpp b/storage/ndb/tools/restore/consumer_restore.cpp index 507058e2743..7524558a2d6 100644 --- a/storage/ndb/tools/restore/consumer_restore.cpp +++ b/storage/ndb/tools/restore/consumer_restore.cpp @@ -494,7 +494,7 @@ BackupRestore::object(Uint32 type, const void * ptr) NdbDictionary::Tablespace curr = dict->getTablespace(old.getName()); NdbError errobj = dict->getNdbError(); - if(errobj.classification == ndberror_cl_none) + if ((int) errobj.classification == (int) ndberror_cl_none) { NdbDictionary::Tablespace* currptr = new NdbDictionary::Tablespace(curr); NdbDictionary::Tablespace * null = 0; @@ -533,7 +533,7 @@ BackupRestore::object(Uint32 type, const void * ptr) NdbDictionary::LogfileGroup curr = dict->getLogfileGroup(old.getName()); NdbError errobj = dict->getNdbError(); - if(errobj.classification == ndberror_cl_none) + if ((int) errobj.classification == (int) ndberror_cl_none) { NdbDictionary::LogfileGroup* currptr = new NdbDictionary::LogfileGroup(curr); @@ -680,7 +680,7 @@ BackupRestore::table(const TableS & table){ return true; const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* table.m_dictTable); - if(tmptab.m_indexType != NdbDictionary::Index::Undefined){ + if ((int) tmptab.m_indexType != (int) NdbDictionary::Index::Undefined){ m_indexes.push_back(table.m_dictTable); return true; } diff --git a/unittest/mytap/t/basic-t.c b/unittest/mytap/t/basic-t.c index bf4c1a9a664..16928509e8c 100644 --- a/unittest/mytap/t/basic-t.c +++ b/unittest/mytap/t/basic-t.c @@ -7,7 +7,7 @@ int main() { plan(5); ok(1 == 1, "testing basic functions"); - ok(2 == 2, ""); + ok(2 == 2, " "); ok(3 == 3, NULL); if (1 == 1) skip(2, "Sensa fragoli"); diff --git a/unittest/mytap/tap.c b/unittest/mytap/tap.c index 29dc765950f..e3a967ceb79 100644 --- a/unittest/mytap/tap.c +++ b/unittest/mytap/tap.c @@ -235,6 +235,7 @@ skip(int how_many, char const *const fmt, ...) while (how_many-- > 0) { va_list ap; + memset((char*) &ap, 0, sizeof(ap)); /* Keep compiler happy */ vemit_tap(1, NULL, ap); emit_dir("skip", reason); emit_endl(); From c1477a3f20897905f3835c4377c9e88841fdd4bb Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 27 Nov 2006 18:16:08 +0200 Subject: [PATCH 53/57] Removed compiler warnings Ensure that my_size_t is always unsigned (to get predictiable results from system to system) Removed some %lld, as these are not portable BUILD/FINISH.sh: Remove configure files from storage engines (as some of them may be old versions and may cause conflicts) client/mysqldump.c: Removed compiler warning client/mysqlslap.c: Removed compiler warning client/mysqltest.c: Removed compiler warning cmd-line-utils/readline/bind.c: Removed compiler warning cmd-line-utils/readline/histfile.c: Removed compiler warning include/my_global.h: Ensure that my_size_t is always unsigned (to get predictiable results from system to system) Moved my_offset_t here from parse_file.h sql/event_data_objects.cc: Removed compiler warning sql/event_scheduler.cc: Removed compiler warning sql/field.h: Removed compiler warning sql/ha_ndbcluster_binlog.cc: Removed compiler warning sql/ha_partition.cc: Removed compiler warning sql/item_strfunc.cc: Removed compiler warning sql/log_event.cc: Removed compiler warning sql/mysqld.cc: Removed compiler warning sql/parse_file.h: Moved my_offset_t to my_global.h sql/rpl_utility.cc: Removed compiler warning sql/sql_binlog.cc: Removed compiler warning sql/sql_cache.cc: Removed compiler warning sql/tztime.cc: Removed compiler warning storage/archive/ha_archive.cc: Removed compiler warning Removed %lld as it's not portable storage/heap/hp_write.c: Removed compiler warning storage/innobase/os/os0file.c: Removed compiler warning storage/myisam/myisampack.c: Removed compiler warning storage/myisammrg/myrg_rkey.c: Removed compiler warning storage/ndb/include/kernel/signaldata/DictTabInfo.hpp: Use my_offsetof instead of offsetof to get rid of compiler warnings storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp: Removed compiler warning storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp: Removed compiler warning Note: Someone from NDB team should check this fix! storage/ndb/src/kernel/vm/Rope.cpp: Removed compiler warning storage/ndb/src/mgmapi/mgmapi.cpp: Removed compiler warning storage/ndb/src/ndbapi/Ndb.cpp: Removed compiler warning storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp: Removed compiler warning storage/ndb/src/ndbapi/NdbScanOperation.cpp: Removed compiler warning storage/ndb/src/ndbapi/NdbTransaction.cpp: Removed compiler warning storage/ndb/src/ndbapi/Ndblist.cpp: Removed compiler warning --- BUILD/FINISH.sh | 2 +- client/mysqldump.c | 4 +-- client/mysqlslap.c | 2 +- client/mysqltest.c | 10 +++---- cmd-line-utils/readline/bind.c | 3 +- cmd-line-utils/readline/histfile.c | 6 ++-- include/my_global.h | 21 ++++++++++++-- sql/event_data_objects.cc | 16 ++++++---- sql/event_scheduler.cc | 2 +- sql/field.h | 2 +- sql/ha_ndbcluster_binlog.cc | 24 ++++++++------- sql/ha_partition.cc | 4 +-- sql/item_strfunc.cc | 6 ++-- sql/log_event.cc | 15 +++++----- sql/mysqld.cc | 2 +- sql/parse_file.h | 17 ----------- sql/rpl_utility.cc | 11 +++---- sql/sql_binlog.cc | 6 ++-- sql/sql_cache.cc | 20 ++++++------- sql/tztime.cc | 8 ++--- storage/archive/ha_archive.cc | 26 +++++++++-------- storage/heap/hp_write.c | 2 +- storage/innobase/os/os0file.c | 2 +- storage/myisam/myisampack.c | 16 +++++----- storage/myisammrg/myrg_rkey.c | 4 +-- .../include/kernel/signaldata/DictTabInfo.hpp | 20 ++++++------- .../kernel/blocks/dbdict/printSchemaFile.cpp | 2 +- .../src/kernel/blocks/dbtup/DbtupRoutines.cpp | 2 ++ storage/ndb/src/kernel/vm/Rope.cpp | 14 ++++----- storage/ndb/src/mgmapi/mgmapi.cpp | 6 ++-- storage/ndb/src/ndbapi/Ndb.cpp | 29 ++++++++++--------- .../ndb/src/ndbapi/NdbEventOperationImpl.cpp | 6 ++-- storage/ndb/src/ndbapi/NdbScanOperation.cpp | 6 ++-- storage/ndb/src/ndbapi/NdbTransaction.cpp | 2 +- storage/ndb/src/ndbapi/Ndblist.cpp | 2 +- 35 files changed, 168 insertions(+), 152 deletions(-) diff --git a/BUILD/FINISH.sh b/BUILD/FINISH.sh index 51f6e909172..6f0600c9de3 100644 --- a/BUILD/FINISH.sh +++ b/BUILD/FINISH.sh @@ -5,7 +5,7 @@ configure="./configure $base_configs $extra_configs" commands="\ $make -k distclean || true -/bin/rm -rf */.deps/*.P config.cache storage/innobase/config.cache autom4te.cache innobase/autom4te.cache; +/bin/rm -rf */.deps/*.P configure config.cache storage/*/configure storage/*/config.cache autom4te.cache storage/*/autom4te.cache; path=`dirname $0` . \"$path/autorun.sh\"" diff --git a/client/mysqldump.c b/client/mysqldump.c index f812764b97c..4a32d1617c2 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -1560,8 +1560,8 @@ static uint dump_routines_for_db(char *db) if the user has EXECUTE privilege he see routine names, but NOT the routine body of other routines that are not the creator of! */ - DBUG_PRINT("info",("length of body for %s row[2] '%s' is %d", - routine_name, row[2], strlen(row[2]))); + DBUG_PRINT("info",("length of body for %s row[2] '%s' is %ld", + routine_name, row[2], (long) strlen(row[2]))); if (strlen(row[2])) { char *query_str= NULL; diff --git a/client/mysqlslap.c b/client/mysqlslap.c index c2d3a5866e0..cba6f3009be 100644 --- a/client/mysqlslap.c +++ b/client/mysqlslap.c @@ -592,7 +592,7 @@ get_random_string(char *buf) DBUG_ENTER("get_random_string"); for (x= RAND_STRING_SIZE; x > 0; x--) *buf_ptr++= ALPHANUMERICS[random() % ALPHANUMERICS_SIZE]; - DBUG_PRINT("info", ("random string: '%*s'", buf_ptr - buf, buf)); + DBUG_PRINT("info", ("random string: '%*s'", (int) (buf_ptr - buf), buf)); DBUG_RETURN(buf_ptr - buf); } diff --git a/client/mysqltest.c b/client/mysqltest.c index b46a337959a..286de17a00e 100644 --- a/client/mysqltest.c +++ b/client/mysqltest.c @@ -893,8 +893,8 @@ int dyn_string_cmp(DYNAMIC_STRING* ds, const char *fname) die(NullS); if (!eval_result && (uint) stat_info.st_size != ds->length) { - DBUG_PRINT("info",("Size differs: result size: %u file size: %llu", - ds->length, stat_info.st_size)); + DBUG_PRINT("info",("Size differs: result size: %u file size: %lu", + ds->length, (ulong) stat_info.st_size)); DBUG_PRINT("info",("result: '%s'", ds->str)); DBUG_RETURN(RESULT_LENGTH_MISMATCH); } @@ -3077,14 +3077,14 @@ void do_connect(struct st_command *command) else if (!strncmp(con_options, "COMPRESS", 8)) con_compress= 1; else - die("Illegal option to connect: %.*s", end - con_options, con_options); + die("Illegal option to connect: %.*s", (int) (end - con_options), con_options); /* Process next option */ con_options= end; } if (next_con == connections_end) - die("Connection limit exhausted, you can have max %d connections", - (sizeof(connections)/sizeof(struct st_connection))); + die("Connection limit exhausted, you can have max %ld connections", + (long) (sizeof(connections)/sizeof(struct st_connection))); if (find_connection_by_name(ds_connection_name.str)) die("Connection %s already exists", ds_connection_name.str); diff --git a/cmd-line-utils/readline/bind.c b/cmd-line-utils/readline/bind.c index ab1136c7da5..3e2a72375d3 100644 --- a/cmd-line-utils/readline/bind.c +++ b/cmd-line-utils/readline/bind.c @@ -735,7 +735,8 @@ _rl_read_file (filename, sizep) file_size = (size_t)finfo.st_size; /* check for overflow on very large files */ - if (file_size != finfo.st_size || file_size + 1 < file_size) + if ((long long) file_size != (long long) finfo.st_size || + file_size + 1 < file_size) { if (file >= 0) close (file); diff --git a/cmd-line-utils/readline/histfile.c b/cmd-line-utils/readline/histfile.c index 7d340b346d4..f1822b105a4 100644 --- a/cmd-line-utils/readline/histfile.c +++ b/cmd-line-utils/readline/histfile.c @@ -184,7 +184,8 @@ read_history_range (filename, from, to) file_size = (size_t)finfo.st_size; /* check for overflow on very large files */ - if (file_size != finfo.st_size || file_size + 1 < file_size) + if ((long long) file_size != (long long) finfo.st_size || + file_size + 1 < file_size) { errno = overflow_errno; goto error_and_exit; @@ -333,7 +334,8 @@ history_truncate_file (fname, lines) file_size = (size_t)finfo.st_size; /* check for overflow on very large files */ - if (file_size != finfo.st_size || file_size + 1 < file_size) + if ((long long) file_size != (long long) finfo.st_size || + file_size + 1 < file_size) { close (file); #if defined (EFBIG) diff --git a/include/my_global.h b/include/my_global.h index a7ec41068b3..c182ef7b799 100644 --- a/include/my_global.h +++ b/include/my_global.h @@ -869,9 +869,8 @@ typedef long my_ptrdiff_t; typedef long long my_ptrdiff_t; #endif -#if HAVE_SIZE_T -typedef size_t my_size_t; -#elif SIZEOF_CHARP <= SIZEOF_LONG +/* We can't set my_size_t to size_t as we want my_size_t to be unsigned */ +#if SIZEOF_CHARP <= SIZEOF_LONG typedef unsigned long my_size_t; #else typedef unsigned long long my_size_t; @@ -886,6 +885,22 @@ typedef unsigned long long my_size_t; #define ADD_TO_PTR(ptr,size,type) (type) ((byte*) (ptr)+size) #define PTR_BYTE_DIFF(A,B) (my_ptrdiff_t) ((byte*) (A) - (byte*) (B)) +/* + Custom version of standard offsetof() macro which can be used to get + offsets of members in class for non-POD types (according to the current + version of C++ standard offsetof() macro can't be used in such cases and + attempt to do so causes warnings to be emitted, OTOH in many cases it is + still OK to assume that all instances of the class has the same offsets + for the same members). + + This is temporary solution which should be removed once File_parser class + and related routines are refactored. +*/ + +#define my_offsetof(TYPE, MEMBER) \ + ((size_t)((char *)&(((TYPE *)0x10)->MEMBER) - (char*)0x10)) + + #define NullS (char *) 0 /* Nowdays we do not support MessyDos */ #ifndef NEAR diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc index 397688d3bff..0de90e4145b 100644 --- a/sql/event_data_objects.cc +++ b/sql/event_data_objects.cc @@ -1218,7 +1218,8 @@ Event_queue_element::compute_next_execution_time() my_tz_UTC->gmt_sec_to_TIME(&time_now, current_thd->query_start()); - DBUG_PRINT("info",("NOW=[%llu]", TIME_to_ulonglong_datetime(&time_now))); + DBUG_PRINT("info",("NOW: [%lu]", + (ulong) TIME_to_ulonglong_datetime(&time_now))); /* if time_now is after ends don't execute anymore */ if (!ends_null && (tmp= my_time_compare(&ends, &time_now)) == -1) @@ -1300,7 +1301,8 @@ Event_queue_element::compute_next_execution_time() } else { - DBUG_PRINT("info",("Next[%llu]",TIME_to_ulonglong_datetime(&next_exec))); + DBUG_PRINT("info",("Next[%lu]", + (ulong) TIME_to_ulonglong_datetime(&next_exec))); execute_at= next_exec; execute_at_null= FALSE; } @@ -1322,7 +1324,8 @@ Event_queue_element::compute_next_execution_time() expression, interval)) goto err; execute_at= next_exec; - DBUG_PRINT("info",("Next[%llu]",TIME_to_ulonglong_datetime(&next_exec))); + DBUG_PRINT("info",("Next[%lu]", + (ulong) TIME_to_ulonglong_datetime(&next_exec))); } else { @@ -1356,7 +1359,8 @@ Event_queue_element::compute_next_execution_time() expression, interval)) goto err; execute_at= next_exec; - DBUG_PRINT("info",("Next[%llu]",TIME_to_ulonglong_datetime(&next_exec))); + DBUG_PRINT("info",("Next[%lu]", + (ulong) TIME_to_ulonglong_datetime(&next_exec))); } execute_at_null= FALSE; } @@ -1393,8 +1397,8 @@ Event_queue_element::compute_next_execution_time() } else { - DBUG_PRINT("info", ("Next[%llu]", - TIME_to_ulonglong_datetime(&next_exec))); + DBUG_PRINT("info", ("Next[%lu]", + (ulong) TIME_to_ulonglong_datetime(&next_exec))); execute_at= next_exec; execute_at_null= FALSE; } diff --git a/sql/event_scheduler.cc b/sql/event_scheduler.cc index b1a82477c3c..9be2f2d1125 100644 --- a/sql/event_scheduler.cc +++ b/sql/event_scheduler.cc @@ -776,7 +776,7 @@ Event_scheduler::dump_internal_status() mutex_last_unlocked_at_line); printf("WOC : %s\n", waiting_on_cond? "YES":"NO"); printf("Workers : %u\n", workers_count()); - printf("Executed : %llu\n", started_events); + printf("Executed : %lu\n", (ulong) started_events); printf("Data locked: %s\n", mutex_scheduler_data_locked ? "YES":"NO"); DBUG_VOID_RETURN; diff --git a/sql/field.h b/sql/field.h index 2a8257ed606..433f5c6bfbf 100644 --- a/sql/field.h +++ b/sql/field.h @@ -239,7 +239,7 @@ public: */ my_size_t last_null_byte() const { my_size_t bytes= do_last_null_byte(); - DBUG_PRINT("debug", ("last_null_byte() ==> %d", bytes)); + DBUG_PRINT("debug", ("last_null_byte() ==> %ld", (long) bytes)); DBUG_ASSERT(bytes <= table->s->null_bytes); return bytes; } diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index cb2ac56e828..865fa0bde94 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -3563,9 +3563,11 @@ restart: if (do_ndbcluster_binlog_close_connection) { DBUG_PRINT("info", ("do_ndbcluster_binlog_close_connection: %d, " - "ndb_latest_handled_binlog_epoch: %llu, " - "*p_latest_trans_gci: %llu", do_ndbcluster_binlog_close_connection, - ndb_latest_handled_binlog_epoch, *p_latest_trans_gci)); + "ndb_latest_handled_binlog_epoch: %lu, " + "*p_latest_trans_gci: %lu", + do_ndbcluster_binlog_close_connection, + (ulong) ndb_latest_handled_binlog_epoch, + (ulong) *p_latest_trans_gci)); } #endif #ifdef RUN_NDB_BINLOG_TIMER @@ -3653,9 +3655,10 @@ restart: do_ndbcluster_binlog_close_connection= BCCC_restart; if (ndb_latest_received_binlog_epoch < *p_latest_trans_gci && ndb_binlog_running) { - sql_print_error("NDB Binlog: latest transaction in epoch %lld not in binlog " - "as latest received epoch is %lld", - *p_latest_trans_gci, ndb_latest_received_binlog_epoch); + sql_print_error("NDB Binlog: latest transaction in epoch %lu not in binlog " + "as latest received epoch is %lu", + (ulong) *p_latest_trans_gci, + (ulong) ndb_latest_received_binlog_epoch); } } } @@ -3841,9 +3844,10 @@ restart: do_ndbcluster_binlog_close_connection= BCCC_restart; if (ndb_latest_received_binlog_epoch < *p_latest_trans_gci && ndb_binlog_running) { - sql_print_error("NDB Binlog: latest transaction in epoch %lld not in binlog " - "as latest received epoch is %lld", - *p_latest_trans_gci, ndb_latest_received_binlog_epoch); + sql_print_error("NDB Binlog: latest transaction in epoch %lu not in binlog " + "as latest received epoch is %lu", + (ulong) *p_latest_trans_gci, + (ulong) ndb_latest_received_binlog_epoch); } } } @@ -3875,7 +3879,7 @@ restart: row.master_log_file= start.file_name(); row.master_log_pos= start.file_pos(); - DBUG_PRINT("info", ("COMMIT gci: %lld", gci)); + DBUG_PRINT("info", ("COMMIT gci: %lu", (ulong) gci)); if (ndb_update_binlog_index) ndb_add_binlog_index(thd, &row); ndb_latest_applied_binlog_epoch= gci; diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 82b43ce578f..7cd33dd5726 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -2427,7 +2427,7 @@ repeat: do { DBUG_PRINT("info", ("external_lock(thd, %d) iteration %d", - lock_type, (file - m_file))); + lock_type, (int) (file - m_file))); if ((error= (*file)->external_lock(thd, lock_type))) { if (F_UNLCK != lock_type) @@ -2508,7 +2508,7 @@ THR_LOCK_DATA **ha_partition::store_lock(THD *thd, file= m_file; do { - DBUG_PRINT("info", ("store lock %d iteration", (file - m_file))); + DBUG_PRINT("info", ("store lock %d iteration", (int) (file - m_file))); to= (*file)->store_lock(thd, to, lock_type); } while (*(++file)); DBUG_RETURN(to); diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 7237b4e37ae..2a022d4af71 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -978,8 +978,8 @@ String *Item_func_insert::val_str(String *str) if (length > res->length() - start) length= res->length() - start; - if (res->length() - length + res2->length() > - current_thd->variables.max_allowed_packet) + if ((ulonglong) (res->length() - length + res2->length()) > + (ulonglong) current_thd->variables.max_allowed_packet) { push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_ALLOWED_PACKET_OVERFLOWED, @@ -2426,7 +2426,7 @@ String *Item_func_lpad::val_str(String *str) pad_char_length= pad->numchars(); byte_count= count * collation.collation->mbmaxlen; - if (byte_count > current_thd->variables.max_allowed_packet) + if ((ulonglong) byte_count > current_thd->variables.max_allowed_packet) { push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_ALLOWED_PACKET_OVERFLOWED, diff --git a/sql/log_event.cc b/sql/log_event.cc index 0f9e10e37c1..112f4aee135 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -5376,7 +5376,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len, const byte* const ptr_rows_data= var_start + byte_count + 1; my_size_t const data_size= event_len - (ptr_rows_data - (const byte *) buf); - DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %u", + DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %lu", m_table_id, m_flags, m_width, data_size)); m_rows_buf= (byte*)my_malloc(data_size, MYF(MY_WME)); @@ -5416,8 +5416,8 @@ int Rows_log_event::do_add_row_data(byte *const row_data, would save binlog space. TODO */ DBUG_ENTER("Rows_log_event::do_add_row_data"); - DBUG_PRINT("enter", ("row_data: 0x%lx length: %u", (ulong) row_data, - length)); + DBUG_PRINT("enter", ("row_data: 0x%lx length: %lu", (ulong) row_data, + (ulong) length)); /* Don't print debug messages when running valgrind since they can trigger false warnings. @@ -5597,7 +5597,8 @@ unpack_row(RELAY_LOG_INFO *rli, uint32 const mask= NOT_NULL_FLAG | NO_DEFAULT_VALUE_FLAG; Field *const f= *field_ptr; - DBUG_PRINT("info", ("processing column '%s' @ 0x%lx", f->field_name, f->ptr)); + DBUG_PRINT("info", ("processing column '%s' @ 0x%lx", f->field_name, + (long) f->ptr)); if (event_type == WRITE_ROWS_EVENT && (f->flags & mask) == mask) { slave_print_msg(ERROR_LEVEL, rli, ER_NO_DEFAULT_FOR_FIELD, @@ -6121,7 +6122,7 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len, uchar *ptr_after_colcnt= (uchar*) ptr_colcnt; m_colcnt= net_field_length(&ptr_after_colcnt); - DBUG_PRINT("info",("m_dblen: %d off: %ld m_tbllen: %d off: %ld m_colcnt: %lu off: %ld", + DBUG_PRINT("info",("m_dblen: %lu off: %ld m_tbllen: %lu off: %ld m_colcnt: %lu off: %ld", m_dblen, (long) (ptr_dblen-(const byte*)vpart), m_tbllen, (long) (ptr_tbllen-(const byte*)vpart), m_colcnt, (long) (ptr_colcnt-(const byte*)vpart))); @@ -6527,10 +6528,10 @@ copy_extra_record_fields(TABLE *table, my_ptrdiff_t master_fields) { DBUG_PRINT("info", ("Copying to 0x%lx " - "from field %ld at offset %u " + "from field %lu at offset %lu " "to field %d at offset %lu", (long) table->record[0], - master_fields, master_reclength, + (ulong) master_fields, (ulong) master_reclength, table->s->fields, table->s->reclength)); /* Copying the extra fields of the slave that does not exist on diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 4acc7e4ee54..40e24fb5800 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -1610,7 +1610,7 @@ static void network_init(void) if (strlen(mysqld_unix_port) > (sizeof(UNIXaddr.sun_path) - 1)) { sql_print_error("The socket file path is too long (> %u): %s", - sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port); + (uint) sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port); unireg_abort(1); } if ((unix_sock= socket(AF_UNIX, SOCK_STREAM, 0)) < 0) diff --git a/sql/parse_file.h b/sql/parse_file.h index 5fb65b4c7ec..0a02bf7eb75 100644 --- a/sql/parse_file.h +++ b/sql/parse_file.h @@ -106,21 +106,4 @@ public: MEM_ROOT *mem_root, bool bad_format_errors); }; - - -/* - Custom version of standard offsetof() macro which can be used to get - offsets of members in class for non-POD types (according to the current - version of C++ standard offsetof() macro can't be used in such cases and - attempt to do so causes warnings to be emitted, OTOH in many cases it is - still OK to assume that all instances of the class has the same offsets - for the same members). - - This is temporary solution which should be removed once File_parser class - and related routines are refactored. -*/ - -#define my_offsetof(TYPE, MEMBER) \ - ((size_t)((char *)&(((TYPE *)0x10)->MEMBER) - (char*)0x10)) - #endif /* _PARSE_FILE_H_ */ diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc index c80b6dc3f69..4bed1343e55 100644 --- a/sql/rpl_utility.cc +++ b/sql/rpl_utility.cc @@ -25,7 +25,7 @@ field_length_from_packed(enum_field_types const field_type, switch (field_type) { case MYSQL_TYPE_DECIMAL: case MYSQL_TYPE_NEWDECIMAL: - length= ~0UL; + length= ~(uint32) 0; break; case MYSQL_TYPE_YEAR: case MYSQL_TYPE_TINY: @@ -71,7 +71,7 @@ field_length_from_packed(enum_field_types const field_type, break; break; case MYSQL_TYPE_BIT: - length= ~0UL; + length= ~(uint32) 0; break; default: /* This case should never be chosen */ @@ -85,7 +85,7 @@ field_length_from_packed(enum_field_types const field_type, case MYSQL_TYPE_SET: case MYSQL_TYPE_VAR_STRING: case MYSQL_TYPE_VARCHAR: - length= ~0UL; // NYI + length= ~(uint32) 0; // NYI break; case MYSQL_TYPE_TINY_BLOB: @@ -93,7 +93,7 @@ field_length_from_packed(enum_field_types const field_type, case MYSQL_TYPE_LONG_BLOB: case MYSQL_TYPE_BLOB: case MYSQL_TYPE_GEOMETRY: - length= ~0UL; // NYI + length= ~(uint32) 0; // NYI break; } @@ -131,7 +131,8 @@ table_def::compatible_with(RELAY_LOG_INFO *rli, TABLE *table) slave_print_msg(ERROR_LEVEL, rli, ER_BINLOG_ROW_WRONG_TABLE_DEF, "Table width mismatch - " "received %u columns, %s.%s has %u columns", - size(), tsh->db.str, tsh->table_name.str, tsh->fields); + (uint) size(), tsh->db.str, tsh->table_name.str, + tsh->fields); } for (uint col= 0 ; col < cols_to_check ; ++col) diff --git a/sql/sql_binlog.cc b/sql/sql_binlog.cc index b28d8189631..37094b992e5 100644 --- a/sql/sql_binlog.cc +++ b/sql/sql_binlog.cc @@ -114,8 +114,8 @@ void mysql_client_binlog_statement(THD* thd) order to be able to read exactly what is necessary. */ - DBUG_PRINT("info",("binlog base64 decoded_len=%d, bytes_decoded=%d", - decoded_len, bytes_decoded)); + DBUG_PRINT("info",("binlog base64 decoded_len: %lu bytes_decoded: %d", + (ulong) decoded_len, bytes_decoded)); /* Now we start to read events of the buffer, until there are no @@ -161,7 +161,7 @@ void mysql_client_binlog_statement(THD* thd) (long) (bufptr+EVENT_TYPE_OFFSET))); DBUG_PRINT("info", ("bytes_decoded: %d bufptr: 0x%lx buf[EVENT_LEN_OFFSET]: %lu", bytes_decoded, (long) bufptr, - uint4korr(bufptr+EVENT_LEN_OFFSET))); + (ulong) uint4korr(bufptr+EVENT_LEN_OFFSET))); #endif ev->thd= thd; if (int err= ev->exec_event(thd->rli_fake)) diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 9fc39685407..3362ec76fc2 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -1227,9 +1227,9 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu", if (engine_data != table->engine_data()) { DBUG_PRINT("qcache", - ("Handler require invalidation queries of %s.%s %lld-%lld", - table_list.db, table_list.alias, - engine_data, table->engine_data())); + ("Handler require invalidation queries of %s.%s %lu-%lu", + table_list.db, table_list.alias, + (ulong) engine_data, (ulong) table->engine_data())); invalidate_table((byte *) table->db(), table->key_length()); } else @@ -1250,10 +1250,10 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu", #ifndef EMBEDDED_LIBRARY do { - DBUG_PRINT("qcache", ("Results (len: %lu used: %lu headers: %u)", + DBUG_PRINT("qcache", ("Results (len: %lu used: %lu headers: %lu)", result_block->length, result_block->used, - result_block->headers_len()+ - ALIGN_SIZE(sizeof(Query_cache_result)))); + (ulong) (result_block->headers_len()+ + ALIGN_SIZE(sizeof(Query_cache_result))))); Query_cache_result *result = result_block->result(); if (net_real_write(&thd->net, result->data(), @@ -2469,11 +2469,11 @@ Query_cache::insert_table(uint key_len, char *key, table_block->table()->engine_data() != engine_data) { DBUG_PRINT("qcache", - ("Handler require invalidation queries of %s.%s %lld-%lld", + ("Handler require invalidation queries of %s.%s %lu-%lu", table_block->table()->db(), table_block->table()->table(), - engine_data, - table_block->table()->engine_data())); + (ulong) engine_data, + (ulong) table_block->table()->engine_data())); /* as far as we delete all queries with this table, table block will be deleted, too @@ -3759,7 +3759,7 @@ my_bool Query_cache::check_integrity(bool locked) { DBUG_PRINT("error", ("block 0x%lx do not aligned by %d", (ulong) block, - ALIGN_SIZE(1))); + (int) ALIGN_SIZE(1))); result = 1; } // Check memory allocation diff --git a/sql/tztime.cc b/sql/tztime.cc index 5af16005f8f..6acf17520d9 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -1743,8 +1743,8 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) tz_leapcnt++; DBUG_PRINT("info", - ("time_zone_leap_second table: tz_leapcnt=%u tt_time=%lld offset=%ld", - tz_leapcnt, (longlong)tz_lsis[tz_leapcnt-1].ls_trans, + ("time_zone_leap_second table: tz_leapcnt:%u tt_time: %lu offset: %ld", + tz_leapcnt, (ulong) tz_lsis[tz_leapcnt-1].ls_trans, tz_lsis[tz_leapcnt-1].ls_corr)); res= table->file->index_next(table->record[0]); @@ -2057,8 +2057,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) tz_info->timecnt++; DBUG_PRINT("info", - ("time_zone_transition table: tz_id=%u tt_time=%lld tt_id=%u", - tzid, (longlong)ttime, ttid)); + ("time_zone_transition table: tz_id: %u tt_time:%lu tt_id: %u", + tzid, (ulong) ttime, ttid)); res= table->file->index_next_same(table->record[0], (byte*)table->field[0]->ptr, 4); diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index 172f0b524fb..f85c3fb4adf 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -329,10 +329,12 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows, DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0])); DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1])); - DBUG_PRINT("ha_archive::read_meta_file", ("Rows %llu", *rows)); - DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %llu", check_point)); - DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %llu", *auto_increment)); - DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %llu", *forced_flushes)); + DBUG_PRINT("ha_archive::read_meta_file", ("Rows %lu", (ulong) *rows)); + DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %lu", (ulong) check_point)); + DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %lu", + (ulong) *auto_increment)); + DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %lu", + (ulong) *forced_flushes)); DBUG_PRINT("ha_archive::read_meta_file", ("Real Path %s", real_path)); DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr))); @@ -385,12 +387,12 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows, (uint)ARCHIVE_CHECK_HEADER)); DBUG_PRINT("ha_archive::write_meta_file", ("Version %d", (uint)ARCHIVE_VERSION)); - DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", (ulonglong)rows)); - DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", check_point)); - DBUG_PRINT("ha_archive::write_meta_file", ("Auto Increment %llu", - auto_increment)); - DBUG_PRINT("ha_archive::write_meta_file", ("Forced Flushes %llu", - forced_flushes)); + DBUG_PRINT("ha_archive::write_meta_file", ("Rows %lu", (ulong) rows)); + DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %lu", (ulong) check_point)); + DBUG_PRINT("ha_archive::write_meta_file", ("Auto Increment %lu", + (ulong) auto_increment)); + DBUG_PRINT("ha_archive::write_meta_file", ("Forced Flushes %lu", + (ulong) forced_flushes)); DBUG_PRINT("ha_archive::write_meta_file", ("Real path %s", real_path)); DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty)); @@ -1057,7 +1059,7 @@ int ha_archive::rnd_init(bool scan) if (scan) { scan_rows= share->rows_recorded; - DBUG_PRINT("info", ("archive will retrieve %llu rows", scan_rows)); + DBUG_PRINT("info", ("archive will retrieve %lu rows", (ulong) scan_rows)); stats.records= 0; /* @@ -1318,7 +1320,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) share->rows_recorded++; } } - DBUG_PRINT("info", ("recovered %llu archive rows", share->rows_recorded)); + DBUG_PRINT("info", ("recovered %lu archive rows", (ulong) share->rows_recorded)); my_free((char*)buf, MYF(0)); if (rc && rc != HA_ERR_END_OF_FILE) diff --git a/storage/heap/hp_write.c b/storage/heap/hp_write.c index 16f02999c93..c83ae65c966 100644 --- a/storage/heap/hp_write.c +++ b/storage/heap/hp_write.c @@ -68,7 +68,7 @@ int heap_write(HP_INFO *info, const byte *record) DBUG_RETURN(0); err: - DBUG_PRINT("info",("Duplicate key: %d", keydef - share->keydef)); + DBUG_PRINT("info",("Duplicate key: %d", (int) (keydef - share->keydef))); info->errkey= keydef - share->keydef; if (keydef->algorithm == HA_KEY_ALG_BTREE) { diff --git a/storage/innobase/os/os0file.c b/storage/innobase/os/os0file.c index a4acb0cd485..c4d051ec771 100644 --- a/storage/innobase/os/os0file.c +++ b/storage/innobase/os/os0file.c @@ -1733,7 +1733,7 @@ os_file_set_size( } /* Print about progress for each 100 MB written */ - if ((current_size + n_bytes) / (ib_longlong)(100 * 1024 * 1024) + if ((ib_longlong) (current_size + n_bytes) / (ib_longlong)(100 * 1024 * 1024) != current_size / (ib_longlong)(100 * 1024 * 1024)) { fprintf(stderr, " %lu00", diff --git a/storage/myisam/myisampack.c b/storage/myisam/myisampack.c index 810c12327a0..98121cc6d90 100644 --- a/storage/myisam/myisampack.c +++ b/storage/myisam/myisampack.c @@ -1105,18 +1105,18 @@ static int get_statistic(PACK_MRG_INFO *mrg,HUFF_COUNTS *huff_counts) my_off_t total_count; char llbuf[32]; - DBUG_PRINT("info", ("column: %3u", count - huff_counts + 1)); + DBUG_PRINT("info", ("column: %3u", (uint) (count - huff_counts) + 1)); if (verbose >= 2) - VOID(printf("column: %3u\n", count - huff_counts + 1)); + VOID(printf("column: %3u\n", (uint) (count - huff_counts) + 1)); if (count->tree_buff) { DBUG_PRINT("info", ("number of distinct values: %u", - (count->tree_pos - count->tree_buff) / - count->field_length)); + (uint) ((count->tree_pos - count->tree_buff) / + count->field_length))); if (verbose >= 2) VOID(printf("number of distinct values: %u\n", - (count->tree_pos - count->tree_buff) / - count->field_length)); + (uint) ((count->tree_pos - count->tree_buff) / + count->field_length))); } total_count= 0; for (idx= 0; idx < 256; idx++) @@ -2279,8 +2279,8 @@ static my_off_t write_huff_tree(HUFF_TREE *huff_tree, uint trees) if (bits > 8 * sizeof(code)) { VOID(fflush(stdout)); - VOID(fprintf(stderr, "error: Huffman code too long: %u/%u\n", - bits, 8 * sizeof(code))); + VOID(fprintf(stderr, "error: Huffman code too long: %u/%lu\n", + bits, (ulong) (8 * sizeof(code)))); errors++; break; } diff --git a/storage/myisammrg/myrg_rkey.c b/storage/myisammrg/myrg_rkey.c index f87b264081e..8d3c0a4699a 100644 --- a/storage/myisammrg/myrg_rkey.c +++ b/storage/myisammrg/myrg_rkey.c @@ -87,8 +87,8 @@ int myrg_rkey(MYRG_INFO *info,byte *buf,int inx, const byte *key, mi=(info->current_table=(MYRG_TABLE *)queue_top(&(info->by_key)))->table; mi->once_flags|= RRND_PRESERVE_LASTINX; - DBUG_PRINT("info", ("using table no: %d", - info->current_table - info->open_tables + 1)); + DBUG_PRINT("info", ("using table no: %u", + (uint) (info->current_table - info->open_tables) + 1)); DBUG_DUMP("result key", (byte*) mi->lastkey, mi->lastkey_length); DBUG_RETURN(_myrg_mi_read_record(mi,buf)); } diff --git a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp index 86186929394..6e76840fc5f 100644 --- a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp +++ b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp @@ -47,17 +47,17 @@ inline int my_decimal_get_binary_size(uint precision, uint scale) #endif #define DTIMAP(x, y, z) \ - { DictTabInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 } + { DictTabInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 } #define DTIMAP2(x, y, z, u, v) \ - { DictTabInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 } + { DictTabInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 } #define DTIMAPS(x, y, z, u, v) \ - { DictTabInfo::y, offsetof(x, z), SimpleProperties::StringValue, u, v, 0 } + { DictTabInfo::y, my_offsetof(x, z), SimpleProperties::StringValue, u, v, 0 } #define DTIMAPB(x, y, z, u, v, l) \ - { DictTabInfo::y, offsetof(x, z), SimpleProperties::BinaryValue, u, v, \ - offsetof(x, l) } + { DictTabInfo::y, my_offsetof(x, z), SimpleProperties::BinaryValue, u, v, \ + my_offsetof(x, l) } #define DTIBREAK(x) \ { DictTabInfo::x, 0, SimpleProperties::InvalidValue, 0, 0, 0 } @@ -602,17 +602,17 @@ public: }; #define DFGIMAP(x, y, z) \ - { DictFilegroupInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 } + { DictFilegroupInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 } #define DFGIMAP2(x, y, z, u, v) \ - { DictFilegroupInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 } + { DictFilegroupInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 } #define DFGIMAPS(x, y, z, u, v) \ - { DictFilegroupInfo::y, offsetof(x, z), SimpleProperties::StringValue, u, v, 0 } + { DictFilegroupInfo::y, my_offsetof(x, z), SimpleProperties::StringValue, u, v, 0 } #define DFGIMAPB(x, y, z, u, v, l) \ - { DictFilegroupInfo::y, offsetof(x, z), SimpleProperties::BinaryValue, u, v, \ - offsetof(x, l) } + { DictFilegroupInfo::y, my_offsetof(x, z), SimpleProperties::BinaryValue, u, v, \ + my_offsetof(x, l) } #define DFGIBREAK(x) \ { DictFilegroupInfo::x, 0, SimpleProperties::InvalidValue, 0, 0, 0 } diff --git a/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp b/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp index d3a4e72c3f0..9c66636980a 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp @@ -68,7 +68,7 @@ print_head(const char * filename, const SchemaFile * sf) if (! checkonly) { ndbout << "----- Schemafile: " << filename << " -----" << endl; ndbout_c("Magic: %.*s ByteOrder: %.8x NdbVersion: %s FileSize: %d", - sizeof(sf->Magic), + (int) sizeof(sf->Magic), sf->Magic, sf->ByteOrder, version(sf->NdbVersion), diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp index 677eff53559..5d4115c1d2d 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp @@ -1066,6 +1066,7 @@ Dbtup::updateVarSizeNotNULL(Uint32* in_buffer, terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } + return false; } bool @@ -1485,6 +1486,7 @@ Dbtup::updateDiskVarSizeNotNULL(Uint32* in_buffer, terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } + return false; } bool diff --git a/storage/ndb/src/kernel/vm/Rope.cpp b/storage/ndb/src/kernel/vm/Rope.cpp index 0c90d8f65d5..b6bce864caf 100644 --- a/storage/ndb/src/kernel/vm/Rope.cpp +++ b/storage/ndb/src/kernel/vm/Rope.cpp @@ -30,8 +30,8 @@ ConstRope::copy(char* buf) const { int ConstRope::compare(const char * str, size_t len) const { if(DEBUG_ROPE) - ndbout_c("ConstRope[ %d 0x%x 0x%x ]::compare(%s, %d)", - head.used, head.firstItem, head.lastItem, str, len); + ndbout_c("ConstRope[ %d 0x%x 0x%x ]::compare(%s, %d)", + head.used, head.firstItem, head.lastItem, str, (int) len); Uint32 left = head.used > len ? len : head.used; Ptr curr; curr.i = head.firstItem; @@ -60,7 +60,7 @@ ConstRope::compare(const char * str, size_t len) const { } } if(DEBUG_ROPE) - ndbout_c("ConstRope::compare(%s, %d) -> %d", str, len, head.used > len); + ndbout_c("ConstRope::compare(%s, %d) -> %d", str, (int) len, head.used > len); return head.used > len; } @@ -91,7 +91,7 @@ Rope::copy(char* buf) const { int Rope::compare(const char * str, size_t len) const { if(DEBUG_ROPE) - ndbout_c("Rope::compare(%s, %d)", str, len); + ndbout_c("Rope::compare(%s, %d)", str, (int) len); Uint32 left = head.used > len ? len : head.used; Ptr curr; curr.i = head.firstItem; @@ -100,7 +100,7 @@ Rope::compare(const char * str, size_t len) const { int res = memcmp(str, (const char*)curr.p->data, 4 * getSegmentSize()); if(res != 0){ if(DEBUG_ROPE) - ndbout_c("Rope::compare(%s, %d, %s) -> %d", str, len, + ndbout_c("Rope::compare(%s, %d, %s) -> %d", str, (int) len, (const char*)curr.p->data, res); return res; } @@ -120,14 +120,14 @@ Rope::compare(const char * str, size_t len) const { } } if(DEBUG_ROPE) - ndbout_c("Rope::compare(%s, %d) -> %d", str, len, head.used > len); + ndbout_c("Rope::compare(%s, %d) -> %d", str, (int) len, head.used > len); return head.used > len; } bool Rope::assign(const char * s, size_t len, Uint32 hash){ if(DEBUG_ROPE) - ndbout_c("Rope::assign(%s, %d, 0x%x)", s, len, hash); + ndbout_c("Rope::assign(%s, %d, 0x%x)", s, (int) len, hash); m_hash = hash; head.used = (head.used + 3) / 4; release(); diff --git a/storage/ndb/src/mgmapi/mgmapi.cpp b/storage/ndb/src/mgmapi/mgmapi.cpp index 90a863402e8..b64b24aa3cf 100644 --- a/storage/ndb/src/mgmapi/mgmapi.cpp +++ b/storage/ndb/src/mgmapi/mgmapi.cpp @@ -184,7 +184,7 @@ ndb_mgm_create_handle() h->mgmd_version_minor= -1; h->mgmd_version_build= -1; - DBUG_PRINT("info", ("handle=0x%x", (UintPtr)h)); + DBUG_PRINT("info", ("handle: 0x%lx", (ulong) h)); DBUG_RETURN(h); } @@ -201,7 +201,7 @@ int ndb_mgm_set_connectstring(NdbMgmHandle handle, const char * mgmsrv) { DBUG_ENTER("ndb_mgm_set_connectstring"); - DBUG_PRINT("info", ("handle=0x%x", (UintPtr)handle)); + DBUG_PRINT("info", ("handle: 0x%lx", (ulong) handle)); handle->cfg.~LocalConfig(); new (&(handle->cfg)) LocalConfig; if (!handle->cfg.init(mgmsrv, 0) || @@ -243,7 +243,7 @@ ndb_mgm_destroy_handle(NdbMgmHandle * handle) DBUG_ENTER("ndb_mgm_destroy_handle"); if(!handle) DBUG_VOID_RETURN; - DBUG_PRINT("info", ("handle=0x%x", (UintPtr)(* handle))); + DBUG_PRINT("info", ("handle: 0x%lx", (ulong) (* handle))); /** * important! only disconnect if connected * other code relies on this diff --git a/storage/ndb/src/ndbapi/Ndb.cpp b/storage/ndb/src/ndbapi/Ndb.cpp index 843cb7668e6..ca5fd07d724 100644 --- a/storage/ndb/src/ndbapi/Ndb.cpp +++ b/storage/ndb/src/ndbapi/Ndb.cpp @@ -768,7 +768,7 @@ Ndb::getAutoIncrementValue(const char* aTableName, TupleIdRange & range = info->m_tuple_id_range; if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1) DBUG_RETURN(-1); - DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId)); + DBUG_PRINT("info", ("value %lu", (ulong) tupleId)); DBUG_RETURN(0); } @@ -791,7 +791,7 @@ Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable, TupleIdRange & range = info->m_tuple_id_range; if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1) DBUG_RETURN(-1); - DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId)); + DBUG_PRINT("info", ("value %lu", (ulong)tupleId)); DBUG_RETURN(0); } @@ -806,7 +806,7 @@ Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable, if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1) DBUG_RETURN(-1); - DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId)); + DBUG_PRINT("info", ("value %lu", (ulong)tupleId)); DBUG_RETURN(0); } @@ -819,7 +819,7 @@ Ndb::getTupleIdFromNdb(const NdbTableImpl* table, { assert(range.m_first_tuple_id < range.m_last_tuple_id); tupleId = ++range.m_first_tuple_id; - DBUG_PRINT("info", ("next cached value %llu", (ulonglong)tupleId)); + DBUG_PRINT("info", ("next cached value %lu", (ulong)tupleId)); } else { @@ -856,7 +856,7 @@ Ndb::readAutoIncrementValue(const char* aTableName, TupleIdRange & range = info->m_tuple_id_range; if (readTupleIdFromNdb(table, range, tupleId) == -1) DBUG_RETURN(-1); - DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId)); + DBUG_PRINT("info", ("value %lu", (ulong)tupleId)); DBUG_RETURN(0); } @@ -879,7 +879,7 @@ Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable, TupleIdRange & range = info->m_tuple_id_range; if (readTupleIdFromNdb(table, range, tupleId) == -1) DBUG_RETURN(-1); - DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId)); + DBUG_PRINT("info", ("value %lu", (ulong)tupleId)); DBUG_RETURN(0); } @@ -893,7 +893,7 @@ Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable, if (readTupleIdFromNdb(table, range, tupleId) == -1) DBUG_RETURN(-1); - DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId)); + DBUG_PRINT("info", ("value %lu", (ulong)tupleId)); DBUG_RETURN(0); } @@ -994,8 +994,8 @@ Ndb::setTupleIdInNdb(const NdbTableImpl* table, { range.m_first_tuple_id = tupleId - 1; DBUG_PRINT("info", - ("Setting next auto increment cached value to %llu", - (ulonglong)tupleId)); + ("Setting next auto increment cached value to %lu", + (ulong)tupleId)); DBUG_RETURN(0); } } @@ -1049,7 +1049,8 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table, { DBUG_ENTER("Ndb::opTupleIdOnNdb"); Uint32 aTableId = table->m_id; - DBUG_PRINT("enter", ("table=%u value=%llu op=%u", aTableId, opValue, op)); + DBUG_PRINT("enter", ("table: %u value: %lu op: %u", + aTableId, (ulong) opValue, op)); NdbTransaction* tConnection = NULL; NdbOperation* tOperation = NULL; @@ -1117,8 +1118,8 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table, else { DBUG_PRINT("info", - ("Setting next auto increment value (db) to %llu", - (ulonglong)opValue)); + ("Setting next auto increment value (db) to %lu", + (ulong) opValue)); range.m_first_tuple_id = range.m_last_tuple_id = opValue - 1; } break; @@ -1244,9 +1245,9 @@ int Ndb::setDatabaseAndSchemaName(const NdbDictionary::Table* t) if (s2 && s2 != s1 + 1) { char buf[NAME_LEN + 1]; if (s1 - s0 <= NAME_LEN && s2 - (s1 + 1) <= NAME_LEN) { - sprintf(buf, "%.*s", s1 - s0, s0); + sprintf(buf, "%.*s", (int) (s1 - s0), s0); setDatabaseName(buf); - sprintf(buf, "%.*s", s2 - (s1 + 1), s1 + 1); + sprintf(buf, "%.*s", (int) (s2 - (s1 + 1)), s1 + 1); setDatabaseSchemaName(buf); return 0; } diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index 0b12b9d2f0f..1996dec024a 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -1508,9 +1508,9 @@ NdbEventBuffer::execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep) else { /** out of order something */ - ndbout_c("out of order bucket: %d gci: %lld m_latestGCI: %lld", - bucket-(Gci_container*)m_active_gci.getBase(), - gci, m_latestGCI); + ndbout_c("out of order bucket: %d gci: %ld m_latestGCI: %ld", + (int) (bucket-(Gci_container*)m_active_gci.getBase()), + (long) gci, (long) m_latestGCI); bucket->m_state = Gci_container::GC_COMPLETE; bucket->m_gcp_complete_rep_count = 1; // Prevent from being reused m_latest_complete_GCI = gci; diff --git a/storage/ndb/src/ndbapi/NdbScanOperation.cpp b/storage/ndb/src/ndbapi/NdbScanOperation.cpp index 64dc544b226..3e2081b6018 100644 --- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp +++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp @@ -669,9 +669,9 @@ NdbScanOperation::doSend(int ProcessorId) void NdbScanOperation::close(bool forceSend, bool releaseOp) { DBUG_ENTER("NdbScanOperation::close"); - DBUG_PRINT("enter", ("this=%x tcon=%x con=%x force=%d release=%d", - (UintPtr)this, - (UintPtr)m_transConnection, (UintPtr)theNdbCon, + DBUG_PRINT("enter", ("this: 0x%lx tcon: 0x%lx con: 0x%lx force: %d release: %d", + (long) this, + (long) m_transConnection, (long) theNdbCon, forceSend, releaseOp)); if(m_transConnection){ diff --git a/storage/ndb/src/ndbapi/NdbTransaction.cpp b/storage/ndb/src/ndbapi/NdbTransaction.cpp index 4d7a6a59371..0c59746c1e9 100644 --- a/storage/ndb/src/ndbapi/NdbTransaction.cpp +++ b/storage/ndb/src/ndbapi/NdbTransaction.cpp @@ -1010,7 +1010,7 @@ void NdbTransaction::releaseExecutedScanOperation(NdbIndexScanOperation* cursorOp) { DBUG_ENTER("NdbTransaction::releaseExecutedScanOperation"); - DBUG_PRINT("enter", ("this=0x%x op=0x%x", (UintPtr)this, (UintPtr)cursorOp)); + DBUG_PRINT("enter", ("this: 0x%lx op: 0x%lx", (ulong) this, (ulong) cursorOp)); releaseScanOperation(&m_firstExecutedScanOp, 0, cursorOp); diff --git a/storage/ndb/src/ndbapi/Ndblist.cpp b/storage/ndb/src/ndbapi/Ndblist.cpp index f82348fc91d..a0d22466db4 100644 --- a/storage/ndb/src/ndbapi/Ndblist.cpp +++ b/storage/ndb/src/ndbapi/Ndblist.cpp @@ -361,7 +361,7 @@ void Ndb::releaseScanOperation(NdbIndexScanOperation* aScanOperation) { DBUG_ENTER("Ndb::releaseScanOperation"); - DBUG_PRINT("enter", ("op=%x", (UintPtr)aScanOperation)); + DBUG_PRINT("enter", ("op: 0x%lx", (ulong) aScanOperation)); #ifdef ndb_release_check_dup { NdbIndexScanOperation* tOp = theScanOpIdleList; while (tOp != NULL) { From 13bb3fa823245bddd051482b2efb455557b8b726 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 27 Nov 2006 23:19:53 +0200 Subject: [PATCH 54/57] Disabled syncronization test at it fails on multiple platforms. (Bug #24529) Fixed that test_thr_alarm works mysql-test/mysql-test-run.pl: Fixed typo mysql-test/r/synchronization.result: Updated results mysql-test/t/disabled.def: Disabled syncronization test at it fails on multiple platforms. (Bug #24529) mysql-test/t/synchronization.test: Drop used tables mysys/Makefile.am: Added 'debug' to be able to build test cases mysys/thr_alarm.c: Fixed test case --- mysql-test/mysql-test-run.pl | 2 +- mysql-test/r/synchronization.result | 2 +- mysql-test/t/disabled.def | 1 + mysql-test/t/synchronization.test | 2 +- mysys/Makefile.am | 2 +- mysys/thr_alarm.c | 7 ++++--- 6 files changed, 9 insertions(+), 7 deletions(-) diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index cec3b4a3225..b8ae774629c 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -1235,7 +1235,7 @@ sub command_line_setup () { # But a fairly safe range seems to be 5001 - 32767 # -sub set_mtr_build_thread_ports() { +sub set_mtr_build_thread_ports($) { my $mtr_build_thread= shift; # Up to two masters, up to three slaves diff --git a/mysql-test/r/synchronization.result b/mysql-test/r/synchronization.result index 4543a829494..5d8585f1f88 100644 --- a/mysql-test/r/synchronization.result +++ b/mysql-test/r/synchronization.result @@ -1,4 +1,4 @@ -drop table if exists t1; +drop table if exists t1,t2; CREATE TABLE t1 (x1 int); ALTER TABLE t1 CHANGE x1 x2 int; CREATE TABLE t2 LIKE t1; diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index c21f3661667..e48108af9ce 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -30,6 +30,7 @@ rpl_ndb_myisam2ndb : Bug #19710 Cluster replication to partition table fa rpl_row_blob_innodb : BUG#18980 2006-04-10 kent Test fails randomly rpl_sp : BUG#16456 2006-02-16 jmiller rpl_multi_engine : BUG#22583 2006-09-23 lars +synchronization : Bug#24529 Test 'synchronization' fails on Mac pushbuild; Also on Linux 64 bit. # the below testcase have been reworked to avoid the bug, test contains comment, keep bug open #ndb_binlog_ddl_multi : BUG#18976 2006-04-10 kent CRBR: multiple binlog, second binlog may miss schema log events diff --git a/mysql-test/t/synchronization.test b/mysql-test/t/synchronization.test index c7696195ee0..71e13a65ec3 100644 --- a/mysql-test/t/synchronization.test +++ b/mysql-test/t/synchronization.test @@ -4,7 +4,7 @@ # --disable_warnings -drop table if exists t1; +drop table if exists t1,t2; --enable_warnings connect (con1,localhost,root,,); diff --git a/mysys/Makefile.am b/mysys/Makefile.am index 79d79d41c34..e115739b421 100644 --- a/mysys/Makefile.am +++ b/mysys/Makefile.am @@ -20,7 +20,7 @@ MYSQLBASEdir= $(prefix) INCLUDES = @ZLIB_INCLUDES@ -I$(top_builddir)/include \ -I$(top_srcdir)/include -I$(srcdir) pkglib_LIBRARIES = libmysys.a -LDADD = libmysys.a $(top_builddir)/strings/libmystrings.a +LDADD = libmysys.a $(top_builddir)/strings/libmystrings.a $(top_builddir)/dbug/libdbug.a noinst_HEADERS = mysys_priv.h my_static.h libmysys_a_SOURCES = my_init.c my_getwd.c mf_getdate.c my_mmap.c \ mf_path.c mf_loadpath.c my_file.c \ diff --git a/mysys/thr_alarm.c b/mysys/thr_alarm.c index c55cc32b47d..dbadc45ae9f 100644 --- a/mysys/thr_alarm.c +++ b/mysys/thr_alarm.c @@ -276,7 +276,7 @@ sig_handler process_alarm(int sig __attribute__((unused))) if (!pthread_equal(pthread_self(),alarm_thread)) { #if defined(MAIN) && !defined(__bsdi__) - printf("thread_alarm\n"); fflush(stdout); + printf("thread_alarm in process_alarm\n"); fflush(stdout); #endif #ifdef DONT_REMEMBER_SIGNAL my_sigset(THR_CLIENT_ALARM,process_alarm); /* int. thread system calls */ @@ -848,8 +848,9 @@ int main(int argc __attribute__((unused)),char **argv __attribute__((unused))) MY_INIT(argv[0]); if (argc > 1 && argv[1][0] == '-' && argv[1][1] == '#') + { DBUG_PUSH(argv[1]+2); - + } pthread_mutex_init(&LOCK_thread_count,MY_MUTEX_INIT_FAST); pthread_cond_init(&COND_thread_count,NULL); @@ -917,8 +918,8 @@ int main(int argc __attribute__((unused)),char **argv __attribute__((unused))) } } pthread_mutex_unlock(&LOCK_thread_count); - end_thr_alarm(1); thr_alarm_info(&alarm_info); + end_thr_alarm(1); printf("Main_thread: Alarms: %u max_alarms: %u next_alarm_time: %lu\n", alarm_info.active_alarms, alarm_info.max_used_alarms, alarm_info.next_alarm_time); From 76b4ccbdf8d4913993eb64ef08986654ac5f0423 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 28 Nov 2006 05:43:30 +0200 Subject: [PATCH 55/57] Fixed portablity problem Removed warnings from mysqltest scripts cmd-line-utils/readline/undo.c: Fixed portability problem mysql-test/include/mix2.inc: Removed mysqltest warnings mysql-test/include/query_cache.inc: Removed mysqltest warnings mysql-test/r/ndb_binlog_ddl_multi.result: Update tests after fixing wrong connection command in test mysql-test/t/greedy_optimizer.test: Removed mysqltest warnings mysql-test/t/join.test: Removed mysqltest warnings mysql-test/t/ndb_binlog_ddl_multi.test: Removed mysqltest warnings mysql-test/t/ndb_blob_partition.test: Removed mysqltest warnings mysql-test/t/ndb_index_ordered.test: Removed mysqltest warnings storage/ndb/src/kernel/vm/Rope.cpp: Removed compiler warning --- cmd-line-utils/readline/undo.c | 2 +- mysql-test/include/mix2.inc | 2 +- mysql-test/include/query_cache.inc | 2 +- mysql-test/r/ndb_binlog_ddl_multi.result | 2 +- mysql-test/t/greedy_optimizer.test | 4 ++-- mysql-test/t/join.test | 4 ++-- mysql-test/t/ndb_binlog_ddl_multi.test | 2 +- mysql-test/t/ndb_blob_partition.test | 18 +++++++++--------- mysql-test/t/ndb_index_ordered.test | 2 +- storage/ndb/src/kernel/vm/Rope.cpp | 2 +- 10 files changed, 20 insertions(+), 20 deletions(-) diff --git a/cmd-line-utils/readline/undo.c b/cmd-line-utils/readline/undo.c index b4b5a6511ba..4d256f492b8 100644 --- a/cmd-line-utils/readline/undo.c +++ b/cmd-line-utils/readline/undo.c @@ -175,7 +175,7 @@ _rl_fix_last_undo_of_type (type, start, end) for (rl = rl_undo_list; rl; rl = rl->next) { - if (rl->what == (uint) type) + if (rl->what == (unsigned int) type) { rl->start = start; rl->end = end; diff --git a/mysql-test/include/mix2.inc b/mysql-test/include/mix2.inc index d3980b17d91..8c11f094907 100644 --- a/mysql-test/include/mix2.inc +++ b/mysql-test/include/mix2.inc @@ -1588,7 +1588,7 @@ INSERT INTO t1 (id) VALUES (NULL); SELECT * FROM t1; DROP TABLE t2, t1; --- Test that foreign keys in temporary tables are not accepted (bug #12084) +# Test that foreign keys in temporary tables are not accepted (bug #12084) eval CREATE TABLE t1 ( id INT PRIMARY KEY diff --git a/mysql-test/include/query_cache.inc b/mysql-test/include/query_cache.inc index 70249a9a5aa..0cf2f7cdfe9 100644 --- a/mysql-test/include/query_cache.inc +++ b/mysql-test/include/query_cache.inc @@ -111,7 +111,7 @@ SET @@autocommit=1; connection default; --echo connection default --- This should be 'YES'. +# This should be 'YES'. SHOW VARIABLES LIKE 'have_query_cache'; SET GLOBAL query_cache_size = 200000; diff --git a/mysql-test/r/ndb_binlog_ddl_multi.result b/mysql-test/r/ndb_binlog_ddl_multi.result index b278bb25d25..ff9c3bdc3e4 100644 --- a/mysql-test/r/ndb_binlog_ddl_multi.result +++ b/mysql-test/r/ndb_binlog_ddl_multi.result @@ -23,7 +23,7 @@ reset master; alter table t2 add column (b int); show binlog events from ; Log_name Pos Event_type Server_id End_log_pos Info -master-bin1.000001 # Query # # use `test`; alter table t2 add column (b int) +master-bin.000001 # Query # # use `test`; alter table t2 add column (b int) reset master; reset master; ALTER DATABASE mysqltest CHARACTER SET latin1; diff --git a/mysql-test/t/greedy_optimizer.test b/mysql-test/t/greedy_optimizer.test index 049d0ab09f7..4feca43ae1a 100644 --- a/mysql-test/t/greedy_optimizer.test +++ b/mysql-test/t/greedy_optimizer.test @@ -145,11 +145,11 @@ select @@optimizer_prune_level; # # These are the values for the parameters that control the greedy optimizer # (total 6 combinations - 3 for optimizer_search_depth, 2 for optimizer_prune_level): --- +# # set optimizer_search_depth=0; - automatic # set optimizer_search_depth=1; - min # set optimizer_search_depth=62; - max (default) --- +# # set optimizer_prune_level=0 - exhaustive; # set optimizer_prune_level=1 - heuristic; # default diff --git a/mysql-test/t/join.test b/mysql-test/t/join.test index 712e0b64e88..f39938ec52c 100644 --- a/mysql-test/t/join.test +++ b/mysql-test/t/join.test @@ -489,9 +489,9 @@ select * from v1a join (t3 natural join t4) on a = y; # Negative tests (tests for errors) #-------------------------------------------------------------------- -- error 1052 -select * from t1 natural join (t3 cross join t4); -- works in Oracle - bug +select * from t1 natural join (t3 cross join t4); # works in Oracle - bug -- error 1052 -select * from (t3 cross join t4) natural join t1; -- works in Oracle - bug +select * from (t3 cross join t4) natural join t1; # works in Oracle - bug -- error 1052 select * from t1 join (t2, t3) using (b); -- error 1052 diff --git a/mysql-test/t/ndb_binlog_ddl_multi.test b/mysql-test/t/ndb_binlog_ddl_multi.test index 78cec137159..064bd88764a 100644 --- a/mysql-test/t/ndb_binlog_ddl_multi.test +++ b/mysql-test/t/ndb_binlog_ddl_multi.test @@ -45,7 +45,7 @@ reset master; --connection server2 alter table t2 add column (b int); ---connections server1 +--connection server1 --source include/show_binlog_events.inc # alter database diff --git a/mysql-test/t/ndb_blob_partition.test b/mysql-test/t/ndb_blob_partition.test index 6173c9d9851..35df57b96ba 100644 --- a/mysql-test/t/ndb_blob_partition.test +++ b/mysql-test/t/ndb_blob_partition.test @@ -36,15 +36,15 @@ set @s0 = 'rggurloniukyehuxdbfkkyzlceixzrehqhvxvxbpwizzvjzpucqmzrhzxzfau'; set @s1 = 'ykyymbzqgqlcjhlhmyqelfoaaohvtbekvifukdtnvcrrjveevfakxarxexomz'; set @s2 = 'dbnfqyzgtqxalcrwtfsqabknvtfcbpoonxsjiqvmhnfikxxhcgoexlkoezvah'; -set @v1 = repeat(@s0, 100); -- 1d42dd9090cf78314a06665d4ea938c35cc760f4 -set @v2 = repeat(@s1, 200); -- 10d3c783026b310218d10b7188da96a2401648c6 -set @v3 = repeat(@s2, 300); -- a33549d9844092289a58ac348dd59f09fc28406a -set @v4 = repeat(@s0, 400); -- daa61c6de36a0526f0d47dc29d6b9de7e6d2630c -set @v5 = repeat(@s1, 500); -- 70fc9a7d08beebc522258bfb02000a30c77a8f1d -set @v6 = repeat(@s2, 600); -- 090565c580809efed3d369481a4bbb168b20713e -set @v7 = repeat(@s0, 700); -- 1e0070bec426871a46291de27b9bd6e4255ab4e5 -set @v8 = repeat(@s1, 800); -- acbaba01bc2e682f015f40e79d9cbe475db3002e -set @v9 = repeat(@s2, 900); -- 9ee30d99162574f79c66ae95cdf132dcf9cbc259 +set @v1 = repeat(@s0, 100); # 1d42dd9090cf78314a06665d4ea938c35cc760f4 +set @v2 = repeat(@s1, 200); # 10d3c783026b310218d10b7188da96a2401648c6 +set @v3 = repeat(@s2, 300); # a33549d9844092289a58ac348dd59f09fc28406a +set @v4 = repeat(@s0, 400); # daa61c6de36a0526f0d47dc29d6b9de7e6d2630c +set @v5 = repeat(@s1, 500); # 70fc9a7d08beebc522258bfb02000a30c77a8f1d +set @v6 = repeat(@s2, 600); # 090565c580809efed3d369481a4bbb168b20713e +set @v7 = repeat(@s0, 700); # 1e0070bec426871a46291de27b9bd6e4255ab4e5 +set @v8 = repeat(@s1, 800); # acbaba01bc2e682f015f40e79d9cbe475db3002e +set @v9 = repeat(@s2, 900); # 9ee30d99162574f79c66ae95cdf132dcf9cbc259 --enable_query_log # -- insert -- diff --git a/mysql-test/t/ndb_index_ordered.test b/mysql-test/t/ndb_index_ordered.test index a03e0729ece..b9a47725b85 100644 --- a/mysql-test/t/ndb_index_ordered.test +++ b/mysql-test/t/ndb_index_ordered.test @@ -50,7 +50,7 @@ update t1 set c = 13 where b <= 3; select * from t1 order by a; update t1 set b = b + 1 where b > 4 and b < 7; select * from t1 order by a; --- Update primary key +# Update primary key update t1 set a = a + 10 where b > 1 and b < 7; select * from t1 order by a; diff --git a/storage/ndb/src/kernel/vm/Rope.cpp b/storage/ndb/src/kernel/vm/Rope.cpp index b6bce864caf..afe08e063a9 100644 --- a/storage/ndb/src/kernel/vm/Rope.cpp +++ b/storage/ndb/src/kernel/vm/Rope.cpp @@ -115,7 +115,7 @@ Rope::compare(const char * str, size_t len) const { int res = memcmp(str, (const char*)curr.p->data, left); if(res){ if(DEBUG_ROPE) - ndbout_c("Rope::compare(%s, %d) -> %d", str, len, res); + ndbout_c("Rope::compare(%s, %d) -> %d", str, (int) len, res); return res; } } From 7fe3f31345313d8580d1b5e976fcfed67160e06a Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 28 Nov 2006 13:10:23 +0100 Subject: [PATCH 56/57] opt_range.cc: dummy commit to trigger pushbuild sql/opt_range.cc: dummy commit to trigger pushbuild --- sql/opt_range.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 79b3e023a5f..b23c37704e0 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -11056,7 +11056,7 @@ void QUICK_GROUP_MIN_MAX_SELECT::dbug_dump(int indent, bool verbose) #endif /* NOT_USED */ /***************************************************************************** -** Instantiate templates +** Instantiate templates *****************************************************************************/ #ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION From 76833ac489e01efa336fc12b2e565dc1d3ad3fd8 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 28 Nov 2006 21:35:20 +0300 Subject: [PATCH 57/57] A fix for Bug#24486 "Valgrind warnings: sp_head(), deadlock_innodb:events_grant". This was a memory leak introduced by the patch for Bug 22830. Post-review fixes. sql/sql_parse.cc: A fix for Bug#24486 "Valgrind warnings: sp_head(), deadlock_innodb:events_grant": delete the sphead object before returning with an error. --- sql/sql_parse.cc | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index afd242d922e..1c0ae7b8e2c 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -3925,6 +3925,7 @@ end_with_restore_list: } case SQLCOM_CREATE_EVENT: case SQLCOM_ALTER_EVENT: + do { DBUG_ASSERT(lex->event_parse_data); if (lex->table_or_sp_used()) @@ -3950,16 +3951,15 @@ end_with_restore_list: if (!res) send_ok(thd); - /* Don't do it, if we are inside a SP */ - if (!thd->spcont) - { - delete lex->sphead; - lex->sphead= NULL; - } - - /* lex->unit.cleanup() is called outside, no need to call it here */ - break; + } while (0); + /* Don't do it, if we are inside a SP */ + if (!thd->spcont) + { + delete lex->sphead; + lex->sphead= NULL; } + /* lex->unit.cleanup() is called outside, no need to call it here */ + break; case SQLCOM_DROP_EVENT: case SQLCOM_SHOW_CREATE_EVENT: {