Cleanups & indentation changes

- Only indentation changes in sql_rename.cc
- Ignore some WSREP error messages when there isn't a internet connection
- Force restart of stat_tables_part.test to make result stable
- Fixed compiler warnings in CONNECT
This commit is contained in:
Monty 2019-12-30 14:11:31 +02:00
parent 6a9e24d046
commit 736998cb75
15 changed files with 348 additions and 331 deletions

View File

@ -1,4 +1,4 @@
#
# suite.pm will make sure that all tests including this file
# will be skipped unless this is on Windows
# will be skipped if run under Windows
#

View File

@ -0,0 +1,275 @@
# Note that this test requires a fresh restart to not have problems with the
# old status values
set @save_use_stat_tables=@@use_stat_tables;
set use_stat_tables='preferably';
--disable_warnings
DROP DATABASE IF EXISTS dbt3_s001;
--enable_warnings
CREATE DATABASE dbt3_s001;
use dbt3_s001;
set @save_optimizer_switch=@@optimizer_switch;
set optimizer_switch='extended_keys=off';
--disable_query_log
--disable_result_log
--disable_warnings
--source include/dbt3_s001.inc
delete from mysql.table_stats;
delete from mysql.column_stats;
delete from mysql.index_stats;
ANALYZE TABLE
customer, lineitem, nation, orders, part, partsupp, region, supplier;
--enable_warnings
--enable_result_log
--enable_query_log
select * from mysql.table_stats;
select * from mysql.index_stats;
#
# Test for parallel memory allocation for statistical data
#
# assumes that start the code of memory allocation for stats data has this line:
#
# DEBUG_SYNC(thd, "statistics_mem_alloc_start1");
# DEBUG_SYNC(thd, "statistics_mem_alloc_start2");
#
let $Q6=
select round(sum(l_extendedprice*l_discount),4) as revenue
from lineitem
where l_shipdate >= date '1994-01-01'
and l_shipdate < date '1994-01-01' + interval '1' year
and l_discount between 0.06 - 0.01 and 0.06 + 0.01
and l_quantity < 24;
flush table lineitem;
set use_stat_tables='never';
eval $Q6;
connect (con1, localhost, root,,);
connect (con2, localhost, root,,);
connection con1;
set debug_sync='statistics_mem_alloc_start1 WAIT_FOR second_thread_started_too';
set debug_sync='statistics_mem_alloc_start2 SIGNAL first_thread_working';
use dbt3_s001;
set use_stat_tables='preferably';
--send_eval $Q6
connection con2;
set debug_sync='statistics_mem_alloc_start1 SIGNAL second_thread_started_too';
set debug_sync='statistics_mem_alloc_start2 WAIT_FOR first_thread_working';
use dbt3_s001;
set use_stat_tables='preferably';
--send_eval $Q6
connection con1;
--reap
connection con2;
--reap
connection default;
set use_stat_tables='preferably';
disconnect con1;
disconnect con2;
set debug_sync='RESET';
#
# Test for parallel statistics collection
#
# assumes that start of stats collection code has this line:
#
# DEBUG_SYNC(thd, "statistics_collection_start1");
# DEBUG_SYNC(thd, "statistics_collection_start2");
#
select * from mysql.index_stats where table_name='lineitem' order by index_name;
delete from mysql.index_stats
where table_name='lineitem' and
index_name in ('i_l_shipdate', 'i_l_receiptdate');
select * from mysql.index_stats where table_name='lineitem' order by index_name;
--disable_result_log
--disable_warnings
analyze table lineitem persistent for columns() indexes (i_l_shipdate);
--enable_warnings
--enable_result_log
select * from mysql.index_stats where table_name='lineitem' order by index_name;
delete from mysql.index_stats
where table_name='lineitem' and index_name= 'i_l_shipdate';
select * from mysql.index_stats where table_name='lineitem' order by index_name;
connect (con1, localhost, root,,);
connect (con2, localhost, root,,);
connection con1;
set debug_sync='statistics_collection_start1 WAIT_FOR second_thread_started_too';
set debug_sync='statistics_collection_start2 SIGNAL first_thread_working';
use dbt3_s001;
set use_stat_tables='preferably';
--send analyze table lineitem persistent for columns() indexes (i_l_shipdate)
connection con2;
set debug_sync='statistics_collection_start1 SIGNAL second_thread_started_too';
set debug_sync='statistics_collection_start2 WAIT_FOR first_thread_working';
use dbt3_s001;
set use_stat_tables='preferably';
--send analyze table lineitem persistent for columns() indexes (i_l_receiptdate)
connection con1;
--disable_result_log
--disable_warnings
--reap
--enable_warnings
--enable_result_log
connection con2;
--disable_result_log
--disable_warnings
--reap
--enable_warnings
--enable_result_log
connection default;
disconnect con1;
disconnect con2;
set debug_sync='RESET';
select * from mysql.index_stats where table_name='lineitem' order by index_name, prefix_arity;
#
# Test for parallel statistics collection and update (innodb)
#
select * from mysql.index_stats where table_name='lineitem'
order by index_name, prefix_arity;
set debug_sync='RESET';
let $innodb_storage_engine= 0;
if (`SELECT UPPER(@@default_storage_engine) = 'INNODB'`)
{
let $innodb_storage_engine= 1;
}
connect (con1, localhost, root,,);
connect (con2, localhost, root,,);
connection con1;
set debug_sync='statistics_collection_start SIGNAL parked WAIT_FOR finish';
use dbt3_s001;
set use_stat_tables='preferably';
--send analyze table lineitem persistent for all
connection con2;
set debug_sync='now WAIT_FOR parked';
use dbt3_s001;
set use_stat_tables='never';
if ($innodb_storage_engine)
{
select * from lineitem where l_orderkey=1 and l_partkey=156;
delete from lineitem where l_orderkey=1 and l_partkey=156;
select * from lineitem where l_orderkey=1 and l_partkey=156;
}
set debug_sync='now SIGNAL finish';
connection con1;
--disable_result_log
--disable_warnings
--reap
--enable_warnings
--enable_result_log
connection default;
disconnect con1;
disconnect con2;
set debug_sync='RESET';
select * from mysql.index_stats where table_name='lineitem'
order by index_name, prefix_arity;
#
# Bug mdev-3891: deadlock for ANALYZE and SELECT over mysql.index_stats
#
set @save_global_use_stat_tables=@@global.use_stat_tables;
set global use_stat_tables='preferably';
set debug_sync='RESET';
connect (con1, localhost, root,,);
connect (con2, localhost, root,,);
connection con1;
set debug_sync='statistics_update_start SIGNAL parker WAIT_FOR go1 EXECUTE 1';
set debug_sync='thr_multi_lock_before_thr_lock SIGNAL go2 EXECUTE 3';
use dbt3_s001;
--send analyze table lineitem persistent for all
connection con2;
set debug_sync='open_and_process_table WAIT_FOR parker';
set debug_sync='statistics_read_start SIGNAL go1 WAIT_FOR go2';
use dbt3_s001;
--send select * from mysql.index_stats, lineitem where index_name= 'i_l_shipdate' and l_orderkey=1 and l_partkey=68 order by prefix_arity;
connection con1;
--disable_result_log
--disable_warnings
--reap
--enable_warnings
--enable_result_log
connection con2;
--disable_warnings
--reap
--enable_warnings
connection default;
disconnect con1;
disconnect con2;
set debug_sync='RESET';
set global use_stat_tables=@save_global_use_stat_tables;
DROP DATABASE dbt3_s001;
use test;
#
# Bug mdev-4019: crash when executing in parallel ANALYZE and
# SELECT * FROM information_schema.statistics
#
set @save_global_use_stat_tables=@@global.use_stat_tables;
set global use_stat_tables='preferably';
set debug_sync='RESET';
create table t1 (a int, b int, key(a));
insert t1 values (1,1),(2,2);
analyze table t1;
SET debug_sync='after_open_table_ignore_flush WAIT_FOR go';
send select * from information_schema.statistics where table_schema='test';
connect(con1, localhost, root);
connection con1;
select * from t1;
SET DEBUG_SYNC= "now SIGNAL go";
connection default;
reap;
connection default;
disconnect con1;
set debug_sync='RESET';
drop table t1;
set global use_stat_tables=@save_global_use_stat_tables;
set use_stat_tables=@save_use_stat_tables;

View File

@ -1,3 +1,4 @@
# restart
set @save_use_stat_tables=@@use_stat_tables;
set use_stat_tables='preferably';
DROP DATABASE IF EXISTS dbt3_s001;

View File

@ -1,278 +1,9 @@
# Note that this test requires a fresh restart to not have problems with the
# old status values
--source include/have_stat_tables.inc
--source include/have_debug_sync.inc
--source include/not_embedded.inc
--source include/restart_mysqld.inc
set @save_use_stat_tables=@@use_stat_tables;
set use_stat_tables='preferably';
--disable_warnings
DROP DATABASE IF EXISTS dbt3_s001;
--enable_warnings
CREATE DATABASE dbt3_s001;
use dbt3_s001;
set @save_optimizer_switch=@@optimizer_switch;
set optimizer_switch='extended_keys=off';
--disable_query_log
--disable_result_log
--disable_warnings
--source include/dbt3_s001.inc
delete from mysql.table_stats;
delete from mysql.column_stats;
delete from mysql.index_stats;
ANALYZE TABLE
customer, lineitem, nation, orders, part, partsupp, region, supplier;
--enable_warnings
--enable_result_log
--enable_query_log
select * from mysql.table_stats;
select * from mysql.index_stats;
#
# Test for parallel memory allocation for statistical data
#
# assumes that start the code of memory allocation for stats data has this line:
#
# DEBUG_SYNC(thd, "statistics_mem_alloc_start1");
# DEBUG_SYNC(thd, "statistics_mem_alloc_start2");
#
let $Q6=
select round(sum(l_extendedprice*l_discount),4) as revenue
from lineitem
where l_shipdate >= date '1994-01-01'
and l_shipdate < date '1994-01-01' + interval '1' year
and l_discount between 0.06 - 0.01 and 0.06 + 0.01
and l_quantity < 24;
flush table lineitem;
set use_stat_tables='never';
eval $Q6;
connect (con1, localhost, root,,);
connect (con2, localhost, root,,);
connection con1;
set debug_sync='statistics_mem_alloc_start1 WAIT_FOR second_thread_started_too';
set debug_sync='statistics_mem_alloc_start2 SIGNAL first_thread_working';
use dbt3_s001;
set use_stat_tables='preferably';
--send_eval $Q6
connection con2;
set debug_sync='statistics_mem_alloc_start1 SIGNAL second_thread_started_too';
set debug_sync='statistics_mem_alloc_start2 WAIT_FOR first_thread_working';
use dbt3_s001;
set use_stat_tables='preferably';
--send_eval $Q6
connection con1;
--reap
connection con2;
--reap
connection default;
set use_stat_tables='preferably';
disconnect con1;
disconnect con2;
set debug_sync='RESET';
#
# Test for parallel statistics collection
#
# assumes that start of stats collection code has this line:
#
# DEBUG_SYNC(thd, "statistics_collection_start1");
# DEBUG_SYNC(thd, "statistics_collection_start2");
#
select * from mysql.index_stats where table_name='lineitem' order by index_name;
delete from mysql.index_stats
where table_name='lineitem' and
index_name in ('i_l_shipdate', 'i_l_receiptdate');
select * from mysql.index_stats where table_name='lineitem' order by index_name;
--disable_result_log
--disable_warnings
analyze table lineitem persistent for columns() indexes (i_l_shipdate);
--enable_warnings
--enable_result_log
select * from mysql.index_stats where table_name='lineitem' order by index_name;
delete from mysql.index_stats
where table_name='lineitem' and index_name= 'i_l_shipdate';
select * from mysql.index_stats where table_name='lineitem' order by index_name;
connect (con1, localhost, root,,);
connect (con2, localhost, root,,);
connection con1;
set debug_sync='statistics_collection_start1 WAIT_FOR second_thread_started_too';
set debug_sync='statistics_collection_start2 SIGNAL first_thread_working';
use dbt3_s001;
set use_stat_tables='preferably';
--send analyze table lineitem persistent for columns() indexes (i_l_shipdate)
connection con2;
set debug_sync='statistics_collection_start1 SIGNAL second_thread_started_too';
set debug_sync='statistics_collection_start2 WAIT_FOR first_thread_working';
use dbt3_s001;
set use_stat_tables='preferably';
--send analyze table lineitem persistent for columns() indexes (i_l_receiptdate)
connection con1;
--disable_result_log
--disable_warnings
--reap
--enable_warnings
--enable_result_log
connection con2;
--disable_result_log
--disable_warnings
--reap
--enable_warnings
--enable_result_log
connection default;
disconnect con1;
disconnect con2;
set debug_sync='RESET';
select * from mysql.index_stats where table_name='lineitem' order by index_name, prefix_arity;
#
# Test for parallel statistics collection and update (innodb)
#
select * from mysql.index_stats where table_name='lineitem'
order by index_name, prefix_arity;
set debug_sync='RESET';
let $innodb_storage_engine= 0;
if (`SELECT UPPER(@@default_storage_engine) = 'INNODB'`)
{
let $innodb_storage_engine= 1;
}
connect (con1, localhost, root,,);
connect (con2, localhost, root,,);
connection con1;
set debug_sync='statistics_collection_start SIGNAL parked WAIT_FOR finish';
use dbt3_s001;
set use_stat_tables='preferably';
--send analyze table lineitem persistent for all
connection con2;
set debug_sync='now WAIT_FOR parked';
use dbt3_s001;
set use_stat_tables='never';
if ($innodb_storage_engine)
{
select * from lineitem where l_orderkey=1 and l_partkey=156;
delete from lineitem where l_orderkey=1 and l_partkey=156;
select * from lineitem where l_orderkey=1 and l_partkey=156;
}
set debug_sync='now SIGNAL finish';
connection con1;
--disable_result_log
--disable_warnings
--reap
--enable_warnings
--enable_result_log
connection default;
disconnect con1;
disconnect con2;
set debug_sync='RESET';
select * from mysql.index_stats where table_name='lineitem'
order by index_name, prefix_arity;
#
# Bug mdev-3891: deadlock for ANALYZE and SELECT over mysql.index_stats
#
set @save_global_use_stat_tables=@@global.use_stat_tables;
set global use_stat_tables='preferably';
set debug_sync='RESET';
connect (con1, localhost, root,,);
connect (con2, localhost, root,,);
connection con1;
set debug_sync='statistics_update_start SIGNAL parker WAIT_FOR go1 EXECUTE 1';
set debug_sync='thr_multi_lock_before_thr_lock SIGNAL go2 EXECUTE 3';
use dbt3_s001;
--send analyze table lineitem persistent for all
connection con2;
set debug_sync='open_and_process_table WAIT_FOR parker';
set debug_sync='statistics_read_start SIGNAL go1 WAIT_FOR go2';
use dbt3_s001;
--send select * from mysql.index_stats, lineitem where index_name= 'i_l_shipdate' and l_orderkey=1 and l_partkey=68 order by prefix_arity;
connection con1;
--disable_result_log
--disable_warnings
--reap
--enable_warnings
--enable_result_log
connection con2;
--disable_warnings
--reap
--enable_warnings
connection default;
disconnect con1;
disconnect con2;
set debug_sync='RESET';
set global use_stat_tables=@save_global_use_stat_tables;
DROP DATABASE dbt3_s001;
use test;
#
# Bug mdev-4019: crash when executing in parallel ANALYZE and
# SELECT * FROM information_schema.statistics
#
set @save_global_use_stat_tables=@@global.use_stat_tables;
set global use_stat_tables='preferably';
set debug_sync='RESET';
create table t1 (a int, b int, key(a));
insert t1 values (1,1),(2,2);
analyze table t1;
SET debug_sync='after_open_table_ignore_flush WAIT_FOR go';
send select * from information_schema.statistics where table_schema='test';
connect(con1, localhost, root);
connection con1;
select * from t1;
SET DEBUG_SYNC= "now SIGNAL go";
connection default;
reap;
connection default;
disconnect con1;
set debug_sync='RESET';
drop table t1;
set global use_stat_tables=@save_global_use_stat_tables;
set use_stat_tables=@save_use_stat_tables;
--source stat_tables_par.inc

View File

@ -1,3 +1,4 @@
# restart
SET SESSION DEFAULT_STORAGE_ENGINE='InnoDB';
set @save_optimizer_switch_for_stat_tables_test=@@optimizer_switch;
set optimizer_switch='extended_keys=on';

View File

@ -1,11 +1,18 @@
# Note that this test requires a fresh restart to not have problems with the
# old status values
--source include/have_stat_tables.inc
--source include/have_debug_sync.inc
--source include/not_embedded.inc
--source include/have_innodb.inc
--source include/restart_mysqld.inc
SET SESSION DEFAULT_STORAGE_ENGINE='InnoDB';
set @save_optimizer_switch_for_stat_tables_test=@@optimizer_switch;
set optimizer_switch='extended_keys=on';
--source stat_tables_par.test
--source stat_tables_par.inc
set optimizer_switch=@save_optimizer_switch_for_stat_tables_test;

View File

@ -4779,7 +4779,9 @@ sub extract_warning_lines ($$) {
qr/InnoDB: Table .*mysql.*innodb_table_stats.* not found./,
qr/InnoDB: User stopword table .* does not exist./,
qr/Dump thread [0-9]+ last sent to server [0-9]+ binlog file:pos .+/,
qr/Detected table cache mutex contention at instance .* waits. Additional table cache instance cannot be activated: consider raising table_open_cache_instances. Number of active instances/
qr/Detected table cache mutex contention at instance .* waits. Additional table cache instance cannot be activated: consider raising table_open_cache_instances. Number of active instances/,
qr/WSREP: Failed to guess base node address/,
qr/WSREP: Guessing address for incoming client/,
);
my $matched_lines= [];

View File

@ -5050,7 +5050,7 @@ public:
INFORMATION_SCHEMA.TABLES without ORDER BY.
*/
void sort_desc();
#endif
#endif /* DBUG_OFF */
};
int ha_discover_table(THD *thd, TABLE_SHARE *share);
@ -5059,7 +5059,7 @@ int ha_discover_table_names(THD *thd, LEX_CSTRING *db, MY_DIR *dirp,
bool ha_table_exists(THD *thd, const LEX_CSTRING *db, const LEX_CSTRING *table_name,
handlerton **hton= 0, bool *is_sequence= 0);
bool ha_check_if_updates_are_ignored(THD *thd, handlerton *hton, const char *op);
#endif
#endif /* MYSQL_SERVER */
/* key cache */
extern "C" int ha_init_key_cache(const char *name, KEY_CACHE *key_cache, void *);

View File

@ -321,7 +321,6 @@ do_rename(THD *thd, TABLE_LIST *ren_table, const LEX_CSTRING *new_db,
DBUG_RETURN(1); // This can't be skipped
}
{
DBUG_ASSERT(!thd->locked_tables_mode);
#ifdef WITH_WSREP
@ -364,7 +363,7 @@ do_rename(THD *thd, TABLE_LIST *ren_table, const LEX_CSTRING *new_db,
else
{
/*
change of schema is not allowed
Change of schema is not allowed
except of ALTER ...UPGRADE DATA DIRECTORY NAME command
because a view has valid internal db&table names in this case.
*/
@ -374,7 +373,6 @@ do_rename(THD *thd, TABLE_LIST *ren_table, const LEX_CSTRING *new_db,
else
rc= mysql_rename_view(thd, new_db, &new_alias, ren_table);
}
}
DBUG_RETURN(rc && !skip_error ? 1 : 0);
}

View File

@ -1220,7 +1220,8 @@ bool FILTER::Eval(PGLOBAL g)
int i; // n = 0;
//PSUBQ subp = NULL;
PARRAY ap = NULL;
PDBUSER dup __attribute__((unused)) = PlgGetUser(g);
(void) PlgGetUser(g);
if (Opc <= OP_XX)
{

View File

@ -173,9 +173,9 @@ extern "C" {
char version[]= "Version 1.07.0001 November 12, 2019";
#if defined(__WIN__)
char compver[]= "Version 1.07.0001 " __DATE__ " " __TIME__;
char slash= '\\';
static char slash= '\\';
#else // !__WIN__
char slash= '/';
static char slash= '/';
#endif // !__WIN__
} // extern "C"

View File

@ -766,7 +766,6 @@ void JDBConn::AddJars(PSTRG jpop, char sep)
/***********************************************************************/
bool JDBConn::Connect(PJPARM sop)
{
int irc = RC_FX;
bool err = false;
jint rc;
PGLOBAL& g = m_G;

View File

@ -167,7 +167,7 @@ PQRYRES __stdcall ColREST(PGLOBAL g, PTOS tp, char *tab, char *db, bool info)
#endif // !MARIADB
// We used the file name relative to recorded datapath
strcat(strcat(strcat(strcpy(filename, "."), slash), db), slash);
strcat(strcat(strcat(strcpy(filename, "."), dir_slash), db), dir_slash);
strncat(filename, fn, _MAX_PATH - strlen(filename));
// Retrieve the file from the web and copy it locally

View File

@ -5,10 +5,11 @@
/***********************************************************************/
#pragma once
extern PCSZ slash;
#if defined(__WIN__)
static PCSZ slash = "\\";
#define dir_slash "\\"
#else // !__WIN__
static PCSZ slash = "/";
#define dir_slash "/"
#define stricmp strcasecmp
#endif // !__WIN__

View File

@ -72,6 +72,7 @@ static char *s3_wrap_strdup(const char *str)
static void s3_wrap_free(void *ptr)
{
if (ptr) /* Avoid tracing of null */
my_free(ptr);
}
@ -494,7 +495,7 @@ static my_bool copy_to_file(ms3_st *s3_client, const char *aws_bucket,
if (s3_get_object(s3_client, aws_bucket, aws_path, &block, compression, 1))
goto err;
error= my_write(file, block.str, block.length, MYF(MY_WME | MY_WME));
error= my_write(file, block.str, block.length, MYF(MY_WME | MY_FNABP));
s3_free(&block);
if (error == MY_FILE_ERROR)
goto err;