Merge tulin@bk-internal.mysql.com:/home/bk/mysql-5.0
into dl145b.mysql.com:/home/ndbdev/tomas/mysql-5.1 BitKeeper/etc/logging_ok: auto-union configure.in: Auto merged sql/mysql_priv.h: Auto merged storage/ndb/src/cw/cpcd/APIService.cpp: Auto merged storage/ndb/src/cw/cpcd/CPCD.hpp: Auto merged storage/ndb/src/cw/cpcd/Process.cpp: Auto merged storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp: Auto merged storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp: Auto merged storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp: Auto merged storage/ndb/src/kernel/error/ErrorReporter.cpp: Auto merged storage/ndb/src/mgmapi/mgmapi.cpp: Auto merged storage/ndb/src/ndbapi/ClusterMgr.cpp: Auto merged storage/ndb/src/ndbapi/ClusterMgr.hpp: Auto merged storage/ndb/src/ndbapi/NdbIndexOperation.cpp: Auto merged storage/ndb/src/ndbapi/ndberror.c: Auto merged storage/ndb/test/include/CpcClient.hpp: Auto merged storage/ndb/test/ndbapi/testNodeRestart.cpp: Auto merged storage/ndb/test/run-test/Makefile.am: Auto merged storage/ndb/test/run-test/main.cpp: Auto merged storage/ndb/test/run-test/make-config.sh: Auto merged storage/ndb/test/run-test/ndb-autotest.sh: Auto merged storage/ndb/test/run-test/run-test.hpp: Auto merged storage/ndb/test/src/CpcClient.cpp: Auto merged
This commit is contained in:
commit
beaedea1ee
@ -33,6 +33,9 @@ else
|
|||||||
esac
|
esac
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
cpu_flag=""
|
||||||
|
cpu_flag_old=""
|
||||||
|
|
||||||
case "$cpu_family--$model_name" in
|
case "$cpu_family--$model_name" in
|
||||||
Alpha*EV6*)
|
Alpha*EV6*)
|
||||||
cpu_flag="ev6";
|
cpu_flag="ev6";
|
||||||
@ -40,11 +43,21 @@ case "$cpu_family--$model_name" in
|
|||||||
*Xeon*)
|
*Xeon*)
|
||||||
cpu_flag="nocona";
|
cpu_flag="nocona";
|
||||||
;;
|
;;
|
||||||
|
*Pentium*4*Mobile*CPU*)
|
||||||
|
cpu_flag="pentium4m";
|
||||||
|
;;
|
||||||
*Pentium*4*CPU*)
|
*Pentium*4*CPU*)
|
||||||
cpu_flag="pentium4";
|
cpu_flag="pentium4";
|
||||||
;;
|
;;
|
||||||
|
*Pentium*III*Mobile*CPU*)
|
||||||
|
cpu_flag="pentium3m";
|
||||||
|
;;
|
||||||
|
*Pentium*III*CPU*)
|
||||||
|
cpu_flag="pentium3";
|
||||||
|
;;
|
||||||
*Athlon*64*)
|
*Athlon*64*)
|
||||||
cpu_flag="athlon64";
|
cpu_flag="athlon64";
|
||||||
|
cpu_flag_old="athlon";
|
||||||
;;
|
;;
|
||||||
*Athlon*)
|
*Athlon*)
|
||||||
cpu_flag="athlon";
|
cpu_flag="athlon";
|
||||||
@ -84,6 +97,10 @@ case "$cc_ver--$cc_verno" in
|
|||||||
check_cpu_cflags="-mtune=$cpu_flag -march=$cpu_flag"
|
check_cpu_cflags="-mtune=$cpu_flag -march=$cpu_flag"
|
||||||
;;
|
;;
|
||||||
*GCC*)
|
*GCC*)
|
||||||
|
# Fix for older compiler versions
|
||||||
|
if test -n "$cpu_flag_old"; then
|
||||||
|
cpu_flag="$cpu_flag_old"
|
||||||
|
fi
|
||||||
check_cpu_cflags="-mcpu=$cpu_flag -march=$cpu_flag"
|
check_cpu_cflags="-mcpu=$cpu_flag -march=$cpu_flag"
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
|
@ -308,9 +308,15 @@ C_MODE_END
|
|||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
#define CONFIG_SMP
|
#define CONFIG_SMP
|
||||||
#endif
|
#endif
|
||||||
|
#if defined(__ia64__)
|
||||||
|
#define new my_arg_new
|
||||||
|
#endif
|
||||||
C_MODE_START
|
C_MODE_START
|
||||||
#include <asm/atomic.h>
|
#include <asm/atomic.h>
|
||||||
C_MODE_END
|
C_MODE_END
|
||||||
|
#if defined(__ia64__)
|
||||||
|
#undef new
|
||||||
|
#endif
|
||||||
#endif
|
#endif
|
||||||
#include <errno.h> /* Recommended by debian */
|
#include <errno.h> /* Recommended by debian */
|
||||||
/* We need the following to go around a problem with openssl on solaris */
|
/* We need the following to go around a problem with openssl on solaris */
|
||||||
|
@ -817,6 +817,9 @@ drop table t1;
|
|||||||
select 'c' like '\_' as want0;
|
select 'c' like '\_' as want0;
|
||||||
want0
|
want0
|
||||||
0
|
0
|
||||||
|
SELECT SUBSTR('вася',-2);
|
||||||
|
SUBSTR('вася',-2)
|
||||||
|
ся
|
||||||
create table t1 (id integer, a varchar(100) character set utf8 collate utf8_unicode_ci);
|
create table t1 (id integer, a varchar(100) character set utf8 collate utf8_unicode_ci);
|
||||||
insert into t1 values (1, 'Test');
|
insert into t1 values (1, 'Test');
|
||||||
select * from t1 where soundex(a) = soundex('Test');
|
select * from t1 where soundex(a) = soundex('Test');
|
||||||
|
@ -2476,3 +2476,13 @@ x
|
|||||||
NULL
|
NULL
|
||||||
1.0000
|
1.0000
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
create table t1 (a int(11));
|
||||||
|
select all all * from t1;
|
||||||
|
a
|
||||||
|
select distinct distinct * from t1;
|
||||||
|
a
|
||||||
|
select all distinct * from t1;
|
||||||
|
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'distinct * from t1' at line 1
|
||||||
|
select distinct all * from t1;
|
||||||
|
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'all * from t1' at line 1
|
||||||
|
drop table t1;
|
||||||
|
@ -666,6 +666,12 @@ drop table t1;
|
|||||||
#
|
#
|
||||||
select 'c' like '\_' as want0;
|
select 'c' like '\_' as want0;
|
||||||
|
|
||||||
|
#
|
||||||
|
# SUBSTR with negative offset didn't work with multi-byte strings
|
||||||
|
#
|
||||||
|
SELECT SUBSTR('вася',-2);
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Bug #7730 Server crash using soundex on an utf8 table
|
# Bug #7730 Server crash using soundex on an utf8 table
|
||||||
#
|
#
|
||||||
|
@ -2056,3 +2056,19 @@ create table t1 (s1 int);
|
|||||||
insert into t1 values (null),(1);
|
insert into t1 values (null),(1);
|
||||||
select distinct avg(s1) as x from t1 group by s1 with rollup;
|
select distinct avg(s1) as x from t1 group by s1 with rollup;
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Bug#8733 server accepts malformed query (multiply mentioned distinct)
|
||||||
|
#
|
||||||
|
create table t1 (a int(11));
|
||||||
|
select all all * from t1;
|
||||||
|
select distinct distinct * from t1;
|
||||||
|
--error 1064
|
||||||
|
select all distinct * from t1;
|
||||||
|
--error 1064
|
||||||
|
select distinct all * from t1;
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
19
ndb/test/run-test/conf-daily-basic-dl145a.txt
Normal file
19
ndb/test/run-test/conf-daily-basic-dl145a.txt
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
baseport: 14000
|
||||||
|
basedir: /home/ndbdev/autotest/run
|
||||||
|
mgm: CHOOSE_host1
|
||||||
|
ndb: CHOOSE_host2 CHOOSE_host3
|
||||||
|
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
|
||||||
|
-- cluster config
|
||||||
|
[DB DEFAULT]
|
||||||
|
NoOfReplicas: 2
|
||||||
|
IndexMemory: 100M
|
||||||
|
DataMemory: 300M
|
||||||
|
BackupMemory: 64M
|
||||||
|
MaxNoOfConcurrentScans: 100
|
||||||
|
DataDir: .
|
||||||
|
FileSystemPath: /home/ndbdev/autotest/run
|
||||||
|
|
||||||
|
[MGM DEFAULT]
|
||||||
|
PortNumber: 14000
|
||||||
|
ArbitrationRank: 1
|
||||||
|
DataDir: .
|
19
ndb/test/run-test/conf-daily-basic-ndbmaster.txt
Normal file
19
ndb/test/run-test/conf-daily-basic-ndbmaster.txt
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
baseport: 14000
|
||||||
|
basedir: /space/autotest
|
||||||
|
mgm: CHOOSE_host1
|
||||||
|
ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
|
||||||
|
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
|
||||||
|
-- cluster config
|
||||||
|
[DB DEFAULT]
|
||||||
|
NoOfReplicas: 2
|
||||||
|
IndexMemory: 100M
|
||||||
|
DataMemory: 300M
|
||||||
|
BackupMemory: 64M
|
||||||
|
MaxNoOfConcurrentScans: 100
|
||||||
|
DataDir: .
|
||||||
|
FileSystemPath: /space/autotest/run
|
||||||
|
|
||||||
|
[MGM DEFAULT]
|
||||||
|
PortNumber: 14000
|
||||||
|
ArbitrationRank: 1
|
||||||
|
DataDir: .
|
19
ndb/test/run-test/conf-daily-basic-shark.txt
Normal file
19
ndb/test/run-test/conf-daily-basic-shark.txt
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
baseport: 14000
|
||||||
|
basedir: /space/autotest
|
||||||
|
mgm: CHOOSE_host1
|
||||||
|
ndb: CHOOSE_host1 CHOOSE_host1
|
||||||
|
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
|
||||||
|
-- cluster config
|
||||||
|
[DB DEFAULT]
|
||||||
|
NoOfReplicas: 2
|
||||||
|
IndexMemory: 100M
|
||||||
|
DataMemory: 300M
|
||||||
|
BackupMemory: 64M
|
||||||
|
MaxNoOfConcurrentScans: 100
|
||||||
|
DataDir: .
|
||||||
|
FileSystemPath: /space/autotest/run
|
||||||
|
|
||||||
|
[MGM DEFAULT]
|
||||||
|
PortNumber: 14000
|
||||||
|
ArbitrationRank: 1
|
||||||
|
DataDir: .
|
19
ndb/test/run-test/conf-daily-devel-ndbmaster.txt
Normal file
19
ndb/test/run-test/conf-daily-devel-ndbmaster.txt
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
baseport: 16000
|
||||||
|
basedir: /space/autotest
|
||||||
|
mgm: CHOOSE_host1
|
||||||
|
ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
|
||||||
|
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
|
||||||
|
-- cluster config
|
||||||
|
[DB DEFAULT]
|
||||||
|
NoOfReplicas: 2
|
||||||
|
IndexMemory: 100M
|
||||||
|
DataMemory: 300M
|
||||||
|
BackupMemory: 64M
|
||||||
|
MaxNoOfConcurrentScans: 100
|
||||||
|
DataDir: .
|
||||||
|
FileSystemPath: /space/autotest/run
|
||||||
|
|
||||||
|
[MGM DEFAULT]
|
||||||
|
PortNumber: 16000
|
||||||
|
ArbitrationRank: 1
|
||||||
|
DataDir: .
|
20
ndb/test/run-test/conf-daily-sql-ndbmaster.txt
Normal file
20
ndb/test/run-test/conf-daily-sql-ndbmaster.txt
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
baseport: 16000
|
||||||
|
basedir: /space/autotest
|
||||||
|
mgm: CHOOSE_host1
|
||||||
|
ndb: CHOOSE_host2 CHOOSE_host3
|
||||||
|
mysqld: CHOOSE_host1 CHOOSE_host4
|
||||||
|
mysql: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4
|
||||||
|
-- cluster config
|
||||||
|
[DB DEFAULT]
|
||||||
|
NoOfReplicas: 2
|
||||||
|
IndexMemory: 100M
|
||||||
|
DataMemory: 300M
|
||||||
|
BackupMemory: 64M
|
||||||
|
MaxNoOfConcurrentScans: 100
|
||||||
|
DataDir: .
|
||||||
|
FileSystemPath: /space/autotest/run
|
||||||
|
|
||||||
|
[MGM DEFAULT]
|
||||||
|
PortNumber: 16000
|
||||||
|
ArbitrationRank: 1
|
||||||
|
DataDir: .
|
@ -1013,7 +1013,7 @@ String *Item_func_substr::val_str(String *str)
|
|||||||
if ((null_value=(args[0]->null_value || args[1]->null_value ||
|
if ((null_value=(args[0]->null_value || args[1]->null_value ||
|
||||||
(arg_count == 3 && args[2]->null_value))))
|
(arg_count == 3 && args[2]->null_value))))
|
||||||
return 0; /* purecov: inspected */
|
return 0; /* purecov: inspected */
|
||||||
start= (int32)((start < 0) ? res->length() + start : start -1);
|
start= (int32)((start < 0) ? res->numchars() + start : start -1);
|
||||||
start=res->charpos(start);
|
start=res->charpos(start);
|
||||||
length=res->charpos(length,start);
|
length=res->charpos(length,start);
|
||||||
if (start < 0 || (uint) start+1 > res->length() || length <= 0)
|
if (start < 0 || (uint) start+1 > res->length() || length <= 0)
|
||||||
|
@ -268,6 +268,9 @@ extern CHARSET_INFO *national_charset_info, *table_alias_charset;
|
|||||||
#define OPTION_SCHEMA_TABLE (1L << 29)
|
#define OPTION_SCHEMA_TABLE (1L << 29)
|
||||||
/* Flag set if setup_tables already done */
|
/* Flag set if setup_tables already done */
|
||||||
#define OPTION_SETUP_TABLES_DONE (1L << 30)
|
#define OPTION_SETUP_TABLES_DONE (1L << 30)
|
||||||
|
/* Thr following is used to detect a conflict with DISTINCT
|
||||||
|
in the user query has requested */
|
||||||
|
#define SELECT_ALL (ULL(1) << 32)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Maximum length of time zone name that we support
|
Maximum length of time zone name that we support
|
||||||
|
@ -4000,7 +4000,15 @@ select_option:
|
|||||||
YYABORT;
|
YYABORT;
|
||||||
Lex->lock_option= TL_READ_HIGH_PRIORITY;
|
Lex->lock_option= TL_READ_HIGH_PRIORITY;
|
||||||
}
|
}
|
||||||
| DISTINCT { Select->options|= SELECT_DISTINCT; }
|
| DISTINCT
|
||||||
|
{
|
||||||
|
if (Select->options & SELECT_ALL)
|
||||||
|
{
|
||||||
|
yyerror(ER(ER_SYNTAX_ERROR));
|
||||||
|
YYABORT;
|
||||||
|
}
|
||||||
|
Select->options|= SELECT_DISTINCT;
|
||||||
|
}
|
||||||
| SQL_SMALL_RESULT { Select->options|= SELECT_SMALL_RESULT; }
|
| SQL_SMALL_RESULT { Select->options|= SELECT_SMALL_RESULT; }
|
||||||
| SQL_BIG_RESULT { Select->options|= SELECT_BIG_RESULT; }
|
| SQL_BIG_RESULT { Select->options|= SELECT_BIG_RESULT; }
|
||||||
| SQL_BUFFER_RESULT
|
| SQL_BUFFER_RESULT
|
||||||
@ -4020,7 +4028,15 @@ select_option:
|
|||||||
{
|
{
|
||||||
Lex->select_lex.options|= OPTION_TO_QUERY_CACHE;
|
Lex->select_lex.options|= OPTION_TO_QUERY_CACHE;
|
||||||
}
|
}
|
||||||
| ALL {}
|
| ALL
|
||||||
|
{
|
||||||
|
if (Select->options & SELECT_DISTINCT)
|
||||||
|
{
|
||||||
|
yyerror(ER(ER_SYNTAX_ERROR));
|
||||||
|
YYABORT;
|
||||||
|
}
|
||||||
|
Select->options|= SELECT_ALL;
|
||||||
|
}
|
||||||
;
|
;
|
||||||
|
|
||||||
select_lock_type:
|
select_lock_type:
|
||||||
|
17
sql/table.cc
17
sql/table.cc
@ -59,6 +59,7 @@ static byte* get_field_name(Field **buff,uint *length,
|
|||||||
3 Wrong data in .frm file
|
3 Wrong data in .frm file
|
||||||
4 Error (see frm_error)
|
4 Error (see frm_error)
|
||||||
5 Error (see frm_error: charset unavailable)
|
5 Error (see frm_error: charset unavailable)
|
||||||
|
6 Unknown .frm version
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
|
int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
|
||||||
@ -135,10 +136,14 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
|
|||||||
*fn_ext(share->table_name)='\0'; // Remove extension
|
*fn_ext(share->table_name)='\0'; // Remove extension
|
||||||
*fn_ext(share->path)='\0'; // Remove extension
|
*fn_ext(share->path)='\0'; // Remove extension
|
||||||
|
|
||||||
if (head[0] != (uchar) 254 || head[1] != 1 ||
|
if (head[0] != (uchar) 254 || head[1] != 1)
|
||||||
(head[2] != FRM_VER && head[2] != FRM_VER+1 &&
|
|
||||||
! (head[2] >= FRM_VER+3 && head[2] <= FRM_VER+4)))
|
|
||||||
goto err; /* purecov: inspected */
|
goto err; /* purecov: inspected */
|
||||||
|
if (head[2] != FRM_VER && head[2] != FRM_VER+1 &&
|
||||||
|
! (head[2] >= FRM_VER+3 && head[2] <= FRM_VER+4))
|
||||||
|
{
|
||||||
|
error= 6;
|
||||||
|
goto err; /* purecov: inspected */
|
||||||
|
}
|
||||||
new_field_pack_flag=head[27];
|
new_field_pack_flag=head[27];
|
||||||
new_frm_ver= (head[2] - FRM_VER);
|
new_frm_ver= (head[2] - FRM_VER);
|
||||||
field_pack_length= new_frm_ver < 2 ? 11 : 17;
|
field_pack_length= new_frm_ver < 2 ? 11 : 17;
|
||||||
@ -1084,6 +1089,12 @@ static void frm_error(int error, TABLE *form, const char *name,
|
|||||||
MYF(0), csname, real_name);
|
MYF(0), csname, real_name);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case 6:
|
||||||
|
my_printf_error(ER_NOT_FORM_FILE,
|
||||||
|
"Table '%-.64s' was created with a different version "
|
||||||
|
"of MySQL and cannot be read",
|
||||||
|
MYF(0), name);
|
||||||
|
break;
|
||||||
default: /* Better wrong error than none */
|
default: /* Better wrong error than none */
|
||||||
case 4:
|
case 4:
|
||||||
my_error(ER_NOT_FORM_FILE, errortype,
|
my_error(ER_NOT_FORM_FILE, errortype,
|
||||||
|
@ -122,6 +122,7 @@ ParserRow<CPCDAPISession> commands[] =
|
|||||||
CPCD_ARG("stderr", String, Optional, "Redirection of stderr"),
|
CPCD_ARG("stderr", String, Optional, "Redirection of stderr"),
|
||||||
CPCD_ARG("stdin", String, Optional, "Redirection of stderr"),
|
CPCD_ARG("stdin", String, Optional, "Redirection of stderr"),
|
||||||
CPCD_ARG("ulimit", String, Optional, "ulimit"),
|
CPCD_ARG("ulimit", String, Optional, "ulimit"),
|
||||||
|
CPCD_ARG("shutdown", String, Optional, "shutdown options"),
|
||||||
|
|
||||||
CPCD_CMD("undefine process", &CPCDAPISession::undefineProcess, ""),
|
CPCD_CMD("undefine process", &CPCDAPISession::undefineProcess, ""),
|
||||||
CPCD_CMD_ALIAS("undef", "undefine process", 0),
|
CPCD_CMD_ALIAS("undef", "undefine process", 0),
|
||||||
|
@ -243,6 +243,12 @@ public:
|
|||||||
* @desc Format c:unlimited d:0 ...
|
* @desc Format c:unlimited d:0 ...
|
||||||
*/
|
*/
|
||||||
BaseString m_ulimit;
|
BaseString m_ulimit;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief shutdown options
|
||||||
|
*/
|
||||||
|
BaseString m_shutdown_options;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
class CPCD *m_cpcd;
|
class CPCD *m_cpcd;
|
||||||
void do_exec();
|
void do_exec();
|
||||||
|
@ -44,6 +44,8 @@ CPCD::Process::print(FILE * f){
|
|||||||
fprintf(f, "stdout: %s\n", m_stdout.c_str() ? m_stdout.c_str() : "");
|
fprintf(f, "stdout: %s\n", m_stdout.c_str() ? m_stdout.c_str() : "");
|
||||||
fprintf(f, "stderr: %s\n", m_stderr.c_str() ? m_stderr.c_str() : "");
|
fprintf(f, "stderr: %s\n", m_stderr.c_str() ? m_stderr.c_str() : "");
|
||||||
fprintf(f, "ulimit: %s\n", m_ulimit.c_str() ? m_ulimit.c_str() : "");
|
fprintf(f, "ulimit: %s\n", m_ulimit.c_str() ? m_ulimit.c_str() : "");
|
||||||
|
fprintf(f, "shutdown: %s\n", m_shutdown_options.c_str() ?
|
||||||
|
m_shutdown_options.c_str() : "");
|
||||||
}
|
}
|
||||||
|
|
||||||
CPCD::Process::Process(const Properties & props, class CPCD *cpcd) {
|
CPCD::Process::Process(const Properties & props, class CPCD *cpcd) {
|
||||||
@ -64,6 +66,7 @@ CPCD::Process::Process(const Properties & props, class CPCD *cpcd) {
|
|||||||
props.get("stdout", m_stdout);
|
props.get("stdout", m_stdout);
|
||||||
props.get("stderr", m_stderr);
|
props.get("stderr", m_stderr);
|
||||||
props.get("ulimit", m_ulimit);
|
props.get("ulimit", m_ulimit);
|
||||||
|
props.get("shutdown", m_shutdown_options);
|
||||||
m_status = STOPPED;
|
m_status = STOPPED;
|
||||||
|
|
||||||
if(strcasecmp(m_type.c_str(), "temporary") == 0){
|
if(strcasecmp(m_type.c_str(), "temporary") == 0){
|
||||||
@ -454,7 +457,11 @@ CPCD::Process::stop() {
|
|||||||
m_status = STOPPING;
|
m_status = STOPPING;
|
||||||
|
|
||||||
errno = 0;
|
errno = 0;
|
||||||
int ret = kill(-m_pid, SIGTERM);
|
int signo= SIGTERM;
|
||||||
|
if(m_shutdown_options == "SIGKILL")
|
||||||
|
signo= SIGKILL;
|
||||||
|
|
||||||
|
int ret = kill(-m_pid, signo);
|
||||||
switch(ret) {
|
switch(ret) {
|
||||||
case 0:
|
case 0:
|
||||||
logger.debug("Sent SIGTERM to pid %d", (int)-m_pid);
|
logger.debug("Sent SIGTERM to pid %d", (int)-m_pid);
|
||||||
|
@ -585,34 +585,8 @@ public:
|
|||||||
*/
|
*/
|
||||||
ArrayPool<TcIndexOperation> c_theIndexOperationPool;
|
ArrayPool<TcIndexOperation> c_theIndexOperationPool;
|
||||||
|
|
||||||
/**
|
|
||||||
* The list of index operations
|
|
||||||
*/
|
|
||||||
ArrayList<TcIndexOperation> c_theIndexOperations;
|
|
||||||
|
|
||||||
UintR c_maxNumberOfIndexOperations;
|
UintR c_maxNumberOfIndexOperations;
|
||||||
|
|
||||||
struct TcSeizedIndexOperation {
|
|
||||||
/**
|
|
||||||
* Next ptr (used in pool/list)
|
|
||||||
*/
|
|
||||||
union {
|
|
||||||
Uint32 nextPool;
|
|
||||||
Uint32 nextList;
|
|
||||||
};
|
|
||||||
/**
|
|
||||||
* Prev pointer (used in list)
|
|
||||||
*/
|
|
||||||
Uint32 prevList;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Pool of seized index operations
|
|
||||||
*/
|
|
||||||
ArrayPool<TcSeizedIndexOperation> c_theSeizedIndexOperationPool;
|
|
||||||
|
|
||||||
typedef Ptr<TcSeizedIndexOperation> TcSeizedIndexOperationPtr;
|
|
||||||
|
|
||||||
/************************** API CONNECT RECORD ***********************
|
/************************** API CONNECT RECORD ***********************
|
||||||
* The API connect record contains the connection record to which the
|
* The API connect record contains the connection record to which the
|
||||||
* application connects.
|
* application connects.
|
||||||
@ -650,7 +624,7 @@ public:
|
|||||||
|
|
||||||
struct ApiConnectRecord {
|
struct ApiConnectRecord {
|
||||||
ApiConnectRecord(ArrayPool<TcFiredTriggerData> & firedTriggerPool,
|
ApiConnectRecord(ArrayPool<TcFiredTriggerData> & firedTriggerPool,
|
||||||
ArrayPool<TcSeizedIndexOperation> & seizedIndexOpPool):
|
ArrayPool<TcIndexOperation> & seizedIndexOpPool):
|
||||||
theFiredTriggers(firedTriggerPool),
|
theFiredTriggers(firedTriggerPool),
|
||||||
isIndexOp(false),
|
isIndexOp(false),
|
||||||
theSeizedIndexOperations(seizedIndexOpPool)
|
theSeizedIndexOperations(seizedIndexOpPool)
|
||||||
@ -763,7 +737,7 @@ public:
|
|||||||
UintR accumulatingIndexOp;
|
UintR accumulatingIndexOp;
|
||||||
UintR executingIndexOp;
|
UintR executingIndexOp;
|
||||||
UintR tcIndxSendArray[6];
|
UintR tcIndxSendArray[6];
|
||||||
ArrayList<TcSeizedIndexOperation> theSeizedIndexOperations;
|
ArrayList<TcIndexOperation> theSeizedIndexOperations;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef Ptr<ApiConnectRecord> ApiConnectRecordPtr;
|
typedef Ptr<ApiConnectRecord> ApiConnectRecordPtr;
|
||||||
|
@ -65,7 +65,6 @@ void Dbtc::initData()
|
|||||||
c_theFiredTriggerPool.setSize(c_maxNumberOfFiredTriggers);
|
c_theFiredTriggerPool.setSize(c_maxNumberOfFiredTriggers);
|
||||||
c_theIndexPool.setSize(c_maxNumberOfIndexes);
|
c_theIndexPool.setSize(c_maxNumberOfIndexes);
|
||||||
c_theIndexOperationPool.setSize(c_maxNumberOfIndexOperations);
|
c_theIndexOperationPool.setSize(c_maxNumberOfIndexOperations);
|
||||||
c_theSeizedIndexOperationPool.setSize(c_maxNumberOfIndexOperations);
|
|
||||||
c_theAttributeBufferPool.setSize(c_transactionBufferSpace);
|
c_theAttributeBufferPool.setSize(c_transactionBufferSpace);
|
||||||
c_firedTriggerHash.setSize((c_maxNumberOfFiredTriggers+10)/10);
|
c_firedTriggerHash.setSize((c_maxNumberOfFiredTriggers+10)/10);
|
||||||
}//Dbtc::initData()
|
}//Dbtc::initData()
|
||||||
@ -85,7 +84,7 @@ void Dbtc::initRecords()
|
|||||||
for(unsigned i = 0; i<capiConnectFilesize; i++) {
|
for(unsigned i = 0; i<capiConnectFilesize; i++) {
|
||||||
p = &apiConnectRecord[i];
|
p = &apiConnectRecord[i];
|
||||||
new (p) ApiConnectRecord(c_theFiredTriggerPool,
|
new (p) ApiConnectRecord(c_theFiredTriggerPool,
|
||||||
c_theSeizedIndexOperationPool);
|
c_theIndexOperationPool);
|
||||||
}
|
}
|
||||||
// Init all fired triggers
|
// Init all fired triggers
|
||||||
DLFifoList<TcFiredTriggerData> triggers(c_theFiredTriggerPool);
|
DLFifoList<TcFiredTriggerData> triggers(c_theFiredTriggerPool);
|
||||||
@ -177,7 +176,6 @@ Dbtc::Dbtc(const class Configuration & conf):
|
|||||||
c_maxNumberOfFiredTriggers(0),
|
c_maxNumberOfFiredTriggers(0),
|
||||||
c_theIndexes(c_theIndexPool),
|
c_theIndexes(c_theIndexPool),
|
||||||
c_maxNumberOfIndexes(0),
|
c_maxNumberOfIndexes(0),
|
||||||
c_theIndexOperations(c_theIndexOperationPool),
|
|
||||||
c_maxNumberOfIndexOperations(0),
|
c_maxNumberOfIndexOperations(0),
|
||||||
m_commitAckMarkerHash(m_commitAckMarkerPool)
|
m_commitAckMarkerHash(m_commitAckMarkerPool)
|
||||||
{
|
{
|
||||||
|
@ -11371,18 +11371,18 @@ void Dbtc::execTCINDXREQ(Signal* signal)
|
|||||||
jam();
|
jam();
|
||||||
// This is a newly started transaction, clean-up
|
// This is a newly started transaction, clean-up
|
||||||
releaseAllSeizedIndexOperations(regApiPtr);
|
releaseAllSeizedIndexOperations(regApiPtr);
|
||||||
|
|
||||||
|
regApiPtr->transid[0] = tcIndxReq->transId1;
|
||||||
|
regApiPtr->transid[1] = tcIndxReq->transId2;
|
||||||
}//if
|
}//if
|
||||||
if (!seizeIndexOperation(regApiPtr, indexOpPtr)) {
|
|
||||||
|
if (ERROR_INSERTED(8036) || !seizeIndexOperation(regApiPtr, indexOpPtr)) {
|
||||||
jam();
|
jam();
|
||||||
// Failed to allocate index operation
|
// Failed to allocate index operation
|
||||||
TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
|
terrorCode = 288;
|
||||||
|
regApiPtr->m_exec_flag |= TcKeyReq::getExecuteFlag(tcIndxRequestInfo);
|
||||||
tcIndxRef->connectPtr = tcIndxReq->senderData;
|
apiConnectptr = transPtr;
|
||||||
tcIndxRef->transId[0] = regApiPtr->transid[0];
|
abortErrorLab(signal);
|
||||||
tcIndxRef->transId[1] = regApiPtr->transid[1];
|
|
||||||
tcIndxRef->errorCode = 4000;
|
|
||||||
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
|
|
||||||
TcKeyRef::SignalLength, JBB);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
TcIndexOperation* indexOp = indexOpPtr.p;
|
TcIndexOperation* indexOp = indexOpPtr.p;
|
||||||
@ -11517,15 +11517,17 @@ void Dbtc::execINDXKEYINFO(Signal* signal)
|
|||||||
TcIndexOperationPtr indexOpPtr;
|
TcIndexOperationPtr indexOpPtr;
|
||||||
TcIndexOperation* indexOp;
|
TcIndexOperation* indexOp;
|
||||||
|
|
||||||
indexOpPtr.i = regApiPtr->accumulatingIndexOp;
|
if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL)
|
||||||
indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
|
{
|
||||||
if (saveINDXKEYINFO(signal,
|
indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
|
||||||
indexOp,
|
if (saveINDXKEYINFO(signal,
|
||||||
src,
|
indexOp,
|
||||||
keyInfoLength)) {
|
src,
|
||||||
jam();
|
keyInfoLength)) {
|
||||||
// We have received all we need
|
jam();
|
||||||
readIndexTable(signal, regApiPtr, indexOp);
|
// We have received all we need
|
||||||
|
readIndexTable(signal, regApiPtr, indexOp);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -11548,15 +11550,17 @@ void Dbtc::execINDXATTRINFO(Signal* signal)
|
|||||||
TcIndexOperationPtr indexOpPtr;
|
TcIndexOperationPtr indexOpPtr;
|
||||||
TcIndexOperation* indexOp;
|
TcIndexOperation* indexOp;
|
||||||
|
|
||||||
indexOpPtr.i = regApiPtr->accumulatingIndexOp;
|
if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL)
|
||||||
indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
|
{
|
||||||
if (saveINDXATTRINFO(signal,
|
indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
|
||||||
indexOp,
|
if (saveINDXATTRINFO(signal,
|
||||||
src,
|
indexOp,
|
||||||
attrInfoLength)) {
|
src,
|
||||||
jam();
|
attrInfoLength)) {
|
||||||
// We have received all we need
|
jam();
|
||||||
readIndexTable(signal, regApiPtr, indexOp);
|
// We have received all we need
|
||||||
|
readIndexTable(signal, regApiPtr, indexOp);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -11581,7 +11585,7 @@ bool Dbtc::saveINDXKEYINFO(Signal* signal,
|
|||||||
releaseIndexOperation(apiConnectptr.p, indexOp);
|
releaseIndexOperation(apiConnectptr.p, indexOp);
|
||||||
terrorCode = 4000;
|
terrorCode = 4000;
|
||||||
abortErrorLab(signal);
|
abortErrorLab(signal);
|
||||||
return true;
|
return false;
|
||||||
}
|
}
|
||||||
if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
|
if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
|
||||||
jam();
|
jam();
|
||||||
@ -11614,7 +11618,7 @@ bool Dbtc::saveINDXATTRINFO(Signal* signal,
|
|||||||
releaseIndexOperation(apiConnectptr.p, indexOp);
|
releaseIndexOperation(apiConnectptr.p, indexOp);
|
||||||
terrorCode = 4000;
|
terrorCode = 4000;
|
||||||
abortErrorLab(signal);
|
abortErrorLab(signal);
|
||||||
return true;
|
return false;
|
||||||
}
|
}
|
||||||
if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
|
if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
|
||||||
jam();
|
jam();
|
||||||
@ -11674,7 +11678,7 @@ void Dbtc::execTCKEYCONF(Signal* signal)
|
|||||||
|
|
||||||
jamEntry();
|
jamEntry();
|
||||||
indexOpPtr.i = tcKeyConf->apiConnectPtr;
|
indexOpPtr.i = tcKeyConf->apiConnectPtr;
|
||||||
TcIndexOperation* indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
|
TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
|
||||||
Uint32 confInfo = tcKeyConf->confInfo;
|
Uint32 confInfo = tcKeyConf->confInfo;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -11763,7 +11767,7 @@ void Dbtc::execTCKEYREF(Signal* signal)
|
|||||||
|
|
||||||
jamEntry();
|
jamEntry();
|
||||||
indexOpPtr.i = tcKeyRef->connectPtr;
|
indexOpPtr.i = tcKeyRef->connectPtr;
|
||||||
TcIndexOperation* indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
|
TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
|
||||||
indexOpPtr.p = indexOp;
|
indexOpPtr.p = indexOp;
|
||||||
if (!indexOp) {
|
if (!indexOp) {
|
||||||
jam();
|
jam();
|
||||||
@ -11864,7 +11868,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
|
|||||||
jamEntry();
|
jamEntry();
|
||||||
TcIndexOperationPtr indexOpPtr;
|
TcIndexOperationPtr indexOpPtr;
|
||||||
indexOpPtr.i = transIdAI->connectPtr;
|
indexOpPtr.i = transIdAI->connectPtr;
|
||||||
TcIndexOperation* indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
|
TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
|
||||||
indexOpPtr.p = indexOp;
|
indexOpPtr.p = indexOp;
|
||||||
if (!indexOp) {
|
if (!indexOp) {
|
||||||
jam();
|
jam();
|
||||||
@ -11972,7 +11976,7 @@ void Dbtc::execTCROLLBACKREP(Signal* signal)
|
|||||||
jamEntry();
|
jamEntry();
|
||||||
TcIndexOperationPtr indexOpPtr;
|
TcIndexOperationPtr indexOpPtr;
|
||||||
indexOpPtr.i = tcRollbackRep->connectPtr;
|
indexOpPtr.i = tcRollbackRep->connectPtr;
|
||||||
TcIndexOperation* indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
|
TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
|
||||||
indexOpPtr.p = indexOp;
|
indexOpPtr.p = indexOp;
|
||||||
tcRollbackRep = (TcRollbackRep *)signal->getDataPtrSend();
|
tcRollbackRep = (TcRollbackRep *)signal->getDataPtrSend();
|
||||||
tcRollbackRep->connectPtr = indexOp->tcIndxReq.senderData;
|
tcRollbackRep->connectPtr = indexOp->tcIndxReq.senderData;
|
||||||
@ -12300,16 +12304,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
|
|||||||
bool Dbtc::seizeIndexOperation(ApiConnectRecord* regApiPtr,
|
bool Dbtc::seizeIndexOperation(ApiConnectRecord* regApiPtr,
|
||||||
TcIndexOperationPtr& indexOpPtr)
|
TcIndexOperationPtr& indexOpPtr)
|
||||||
{
|
{
|
||||||
bool seizeOk;
|
return regApiPtr->theSeizedIndexOperations.seize(indexOpPtr);
|
||||||
|
|
||||||
seizeOk = c_theIndexOperations.seize(indexOpPtr);
|
|
||||||
if (seizeOk) {
|
|
||||||
jam();
|
|
||||||
TcSeizedIndexOperationPtr seizedIndexOpPtr;
|
|
||||||
seizeOk &= regApiPtr->theSeizedIndexOperations.seizeId(seizedIndexOpPtr,
|
|
||||||
indexOpPtr.i);
|
|
||||||
}
|
|
||||||
return seizeOk;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Dbtc::releaseIndexOperation(ApiConnectRecord* regApiPtr,
|
void Dbtc::releaseIndexOperation(ApiConnectRecord* regApiPtr,
|
||||||
@ -12323,18 +12318,16 @@ void Dbtc::releaseIndexOperation(ApiConnectRecord* regApiPtr,
|
|||||||
indexOp->expectedTransIdAI = 0;
|
indexOp->expectedTransIdAI = 0;
|
||||||
indexOp->transIdAI.release();
|
indexOp->transIdAI.release();
|
||||||
regApiPtr->theSeizedIndexOperations.release(indexOp->indexOpId);
|
regApiPtr->theSeizedIndexOperations.release(indexOp->indexOpId);
|
||||||
c_theIndexOperations.release(indexOp->indexOpId);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Dbtc::releaseAllSeizedIndexOperations(ApiConnectRecord* regApiPtr)
|
void Dbtc::releaseAllSeizedIndexOperations(ApiConnectRecord* regApiPtr)
|
||||||
{
|
{
|
||||||
TcSeizedIndexOperationPtr seizedIndexOpPtr;
|
TcIndexOperationPtr seizedIndexOpPtr;
|
||||||
|
|
||||||
regApiPtr->theSeizedIndexOperations.first(seizedIndexOpPtr);
|
regApiPtr->theSeizedIndexOperations.first(seizedIndexOpPtr);
|
||||||
while(seizedIndexOpPtr.i != RNIL) {
|
while(seizedIndexOpPtr.i != RNIL) {
|
||||||
jam();
|
jam();
|
||||||
TcIndexOperation* indexOp =
|
TcIndexOperation* indexOp = seizedIndexOpPtr.p;
|
||||||
c_theIndexOperations.getPtr(seizedIndexOpPtr.i);
|
|
||||||
|
|
||||||
indexOp->indexOpState = IOS_NOOP;
|
indexOp->indexOpState = IOS_NOOP;
|
||||||
indexOp->expectedKeyInfo = 0;
|
indexOp->expectedKeyInfo = 0;
|
||||||
@ -12343,7 +12336,6 @@ void Dbtc::releaseAllSeizedIndexOperations(ApiConnectRecord* regApiPtr)
|
|||||||
indexOp->attrInfo.release();
|
indexOp->attrInfo.release();
|
||||||
indexOp->expectedTransIdAI = 0;
|
indexOp->expectedTransIdAI = 0;
|
||||||
indexOp->transIdAI.release();
|
indexOp->transIdAI.release();
|
||||||
c_theIndexOperations.release(seizedIndexOpPtr.i);
|
|
||||||
regApiPtr->theSeizedIndexOperations.next(seizedIndexOpPtr);
|
regApiPtr->theSeizedIndexOperations.next(seizedIndexOpPtr);
|
||||||
}
|
}
|
||||||
regApiPtr->theSeizedIndexOperations.release();
|
regApiPtr->theSeizedIndexOperations.release();
|
||||||
|
@ -130,7 +130,7 @@ ErrorReporter::formatMessage(ErrorCategory type,
|
|||||||
"Date/Time: %s\nType of error: %s\n"
|
"Date/Time: %s\nType of error: %s\n"
|
||||||
"Message: %s\nFault ID: %d\nProblem data: %s"
|
"Message: %s\nFault ID: %d\nProblem data: %s"
|
||||||
"\nObject of reference: %s\nProgramName: %s\n"
|
"\nObject of reference: %s\nProgramName: %s\n"
|
||||||
"ProcessID: %d\nTraceFile: %s\n***EOM***\n",
|
"ProcessID: %d\nTraceFile: %s\n%s\n***EOM***\n",
|
||||||
formatTimeStampString() ,
|
formatTimeStampString() ,
|
||||||
errorType[type],
|
errorType[type],
|
||||||
lookupErrorMessage(faultID),
|
lookupErrorMessage(faultID),
|
||||||
@ -139,7 +139,8 @@ ErrorReporter::formatMessage(ErrorCategory type,
|
|||||||
objRef,
|
objRef,
|
||||||
my_progname,
|
my_progname,
|
||||||
processId,
|
processId,
|
||||||
theNameOfTheTraceFile ? theNameOfTheTraceFile : "<no tracefile>");
|
theNameOfTheTraceFile ? theNameOfTheTraceFile : "<no tracefile>",
|
||||||
|
NDB_VERSION_STRING);
|
||||||
|
|
||||||
// Add trailing blanks to get a fixed lenght of the message
|
// Add trailing blanks to get a fixed lenght of the message
|
||||||
while (strlen(messptr) <= MESSAGE_LENGTH-3){
|
while (strlen(messptr) <= MESSAGE_LENGTH-3){
|
||||||
|
@ -892,7 +892,10 @@ ndb_mgm_restart2(NdbMgmHandle handle, int no_of_nodes, const int * node_list,
|
|||||||
args.put("initialstart", initial);
|
args.put("initialstart", initial);
|
||||||
args.put("nostart", nostart);
|
args.put("nostart", nostart);
|
||||||
const Properties *reply;
|
const Properties *reply;
|
||||||
|
const int timeout = handle->read_timeout;
|
||||||
|
handle->read_timeout= 5*60*1000; // 5 minutes
|
||||||
reply = ndb_mgm_call(handle, restart_reply, "restart all", &args);
|
reply = ndb_mgm_call(handle, restart_reply, "restart all", &args);
|
||||||
|
handle->read_timeout= timeout;
|
||||||
CHECK_REPLY(reply, -1);
|
CHECK_REPLY(reply, -1);
|
||||||
|
|
||||||
BaseString result;
|
BaseString result;
|
||||||
@ -925,7 +928,10 @@ ndb_mgm_restart2(NdbMgmHandle handle, int no_of_nodes, const int * node_list,
|
|||||||
args.put("nostart", nostart);
|
args.put("nostart", nostart);
|
||||||
|
|
||||||
const Properties *reply;
|
const Properties *reply;
|
||||||
|
const int timeout = handle->read_timeout;
|
||||||
|
handle->read_timeout= 5*60*1000; // 5 minutes
|
||||||
reply = ndb_mgm_call(handle, restart_reply, "restart node", &args);
|
reply = ndb_mgm_call(handle, restart_reply, "restart node", &args);
|
||||||
|
handle->read_timeout= timeout;
|
||||||
if(reply != NULL) {
|
if(reply != NULL) {
|
||||||
BaseString result;
|
BaseString result;
|
||||||
reply->get("result", result);
|
reply->get("result", result);
|
||||||
|
@ -66,6 +66,7 @@ ClusterMgr::ClusterMgr(TransporterFacade & _facade):
|
|||||||
{
|
{
|
||||||
ndbSetOwnVersion();
|
ndbSetOwnVersion();
|
||||||
clusterMgrThreadMutex = NdbMutex_Create();
|
clusterMgrThreadMutex = NdbMutex_Create();
|
||||||
|
noOfAliveNodes= 0;
|
||||||
noOfConnectedNodes= 0;
|
noOfConnectedNodes= 0;
|
||||||
theClusterMgrThread= 0;
|
theClusterMgrThread= 0;
|
||||||
}
|
}
|
||||||
@ -336,9 +337,9 @@ ClusterMgr::execAPI_REGCONF(const Uint32 * theData){
|
|||||||
node.m_state = apiRegConf->nodeState;
|
node.m_state = apiRegConf->nodeState;
|
||||||
if (node.compatible && (node.m_state.startLevel == NodeState::SL_STARTED ||
|
if (node.compatible && (node.m_state.startLevel == NodeState::SL_STARTED ||
|
||||||
node.m_state.startLevel == NodeState::SL_SINGLEUSER)){
|
node.m_state.startLevel == NodeState::SL_SINGLEUSER)){
|
||||||
node.m_alive = true;
|
set_node_alive(node, true);
|
||||||
} else {
|
} else {
|
||||||
node.m_alive = false;
|
set_node_alive(node, false);
|
||||||
}//if
|
}//if
|
||||||
node.hbSent = 0;
|
node.hbSent = 0;
|
||||||
node.hbCounter = 0;
|
node.hbCounter = 0;
|
||||||
@ -361,7 +362,7 @@ ClusterMgr::execAPI_REGREF(const Uint32 * theData){
|
|||||||
assert(node.defined == true);
|
assert(node.defined == true);
|
||||||
|
|
||||||
node.compatible = false;
|
node.compatible = false;
|
||||||
node.m_alive = false;
|
set_node_alive(node, false);
|
||||||
node.m_state = NodeState::SL_NOTHING;
|
node.m_state = NodeState::SL_NOTHING;
|
||||||
node.m_info.m_version = ref->version;
|
node.m_info.m_version = ref->version;
|
||||||
|
|
||||||
@ -446,7 +447,7 @@ ClusterMgr::reportNodeFailed(NodeId nodeId){
|
|||||||
|
|
||||||
Node & theNode = theNodes[nodeId];
|
Node & theNode = theNodes[nodeId];
|
||||||
|
|
||||||
theNode.m_alive = false;
|
set_node_alive(theNode, false);
|
||||||
theNode.m_info.m_connectCount ++;
|
theNode.m_info.m_connectCount ++;
|
||||||
|
|
||||||
if(theNode.connected)
|
if(theNode.connected)
|
||||||
@ -462,8 +463,7 @@ ClusterMgr::reportNodeFailed(NodeId nodeId){
|
|||||||
}
|
}
|
||||||
|
|
||||||
theNode.nfCompleteRep = false;
|
theNode.nfCompleteRep = false;
|
||||||
|
if(noOfAliveNodes == 0){
|
||||||
if(noOfConnectedNodes == 0){
|
|
||||||
NFCompleteRep rep;
|
NFCompleteRep rep;
|
||||||
for(Uint32 i = 1; i<MAX_NODES; i++){
|
for(Uint32 i = 1; i<MAX_NODES; i++){
|
||||||
if(theNodes[i].defined && theNodes[i].nfCompleteRep == false){
|
if(theNodes[i].defined && theNodes[i].nfCompleteRep == false){
|
||||||
|
@ -80,6 +80,7 @@ public:
|
|||||||
Uint32 getNoOfConnectedNodes() const;
|
Uint32 getNoOfConnectedNodes() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
Uint32 noOfAliveNodes;
|
||||||
Uint32 noOfConnectedNodes;
|
Uint32 noOfConnectedNodes;
|
||||||
Node theNodes[MAX_NODES];
|
Node theNodes[MAX_NODES];
|
||||||
NdbThread* theClusterMgrThread;
|
NdbThread* theClusterMgrThread;
|
||||||
@ -100,6 +101,19 @@ private:
|
|||||||
void execAPI_REGREF (const Uint32 * theData);
|
void execAPI_REGREF (const Uint32 * theData);
|
||||||
void execNODE_FAILREP (const Uint32 * theData);
|
void execNODE_FAILREP (const Uint32 * theData);
|
||||||
void execNF_COMPLETEREP(const Uint32 * theData);
|
void execNF_COMPLETEREP(const Uint32 * theData);
|
||||||
|
|
||||||
|
inline void set_node_alive(Node& node, bool alive){
|
||||||
|
if(node.m_alive && !alive)
|
||||||
|
{
|
||||||
|
assert(noOfAliveNodes);
|
||||||
|
noOfAliveNodes--;
|
||||||
|
}
|
||||||
|
else if(!node.m_alive && alive)
|
||||||
|
{
|
||||||
|
noOfAliveNodes++;
|
||||||
|
}
|
||||||
|
node.m_alive = alive;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
inline
|
inline
|
||||||
|
@ -173,7 +173,7 @@ ErrorBundle ErrorCodes[] = {
|
|||||||
{ 4021, TR, "Out of Send Buffer space in NDB API" },
|
{ 4021, TR, "Out of Send Buffer space in NDB API" },
|
||||||
{ 4022, TR, "Out of Send Buffer space in NDB API" },
|
{ 4022, TR, "Out of Send Buffer space in NDB API" },
|
||||||
{ 4032, TR, "Out of Send Buffer space in NDB API" },
|
{ 4032, TR, "Out of Send Buffer space in NDB API" },
|
||||||
|
{ 288, TR, "Out of index operations in transaction coordinator (increase MaxNoOfConcurrentIndexOperations)" },
|
||||||
/**
|
/**
|
||||||
* InsufficientSpace
|
* InsufficientSpace
|
||||||
*/
|
*/
|
||||||
|
@ -56,6 +56,7 @@ public:
|
|||||||
BaseString m_stdout;
|
BaseString m_stdout;
|
||||||
BaseString m_stderr;
|
BaseString m_stderr;
|
||||||
BaseString m_ulimit;
|
BaseString m_ulimit;
|
||||||
|
BaseString m_shutdown_options;
|
||||||
};
|
};
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -359,7 +359,7 @@ int runLateCommit(NDBT_Context* ctx, NDBT_Step* step){
|
|||||||
if(hugoOps.startTransaction(pNdb) != 0)
|
if(hugoOps.startTransaction(pNdb) != 0)
|
||||||
return NDBT_FAILED;
|
return NDBT_FAILED;
|
||||||
|
|
||||||
if(hugoOps.pkUpdateRecord(pNdb, 1) != 0)
|
if(hugoOps.pkUpdateRecord(pNdb, 1, 128) != 0)
|
||||||
return NDBT_FAILED;
|
return NDBT_FAILED;
|
||||||
|
|
||||||
if(hugoOps.execute_NoCommit(pNdb) != 0)
|
if(hugoOps.execute_NoCommit(pNdb) != 0)
|
||||||
|
@ -6,7 +6,12 @@ include $(top_srcdir)/storage/ndb/config/type_util.mk.am
|
|||||||
include $(top_srcdir)/storage/ndb/config/type_mgmapiclient.mk.am
|
include $(top_srcdir)/storage/ndb/config/type_mgmapiclient.mk.am
|
||||||
|
|
||||||
test_PROGRAMS = atrt
|
test_PROGRAMS = atrt
|
||||||
test_DATA=daily-basic-tests.txt daily-devel-tests.txt 16node-tests.txt
|
test_DATA=daily-basic-tests.txt daily-devel-tests.txt 16node-tests.txt \
|
||||||
|
conf-daily-basic-ndbmaster.txt \
|
||||||
|
conf-daily-basic-shark.txt \
|
||||||
|
conf-daily-devel-ndbmaster.txt \
|
||||||
|
conf-daily-sql-ndbmaster.txt \
|
||||||
|
conf-daily-basic-dl145a.txt
|
||||||
test_SCRIPTS=atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh \
|
test_SCRIPTS=atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh \
|
||||||
atrt-clear-result.sh make-config.sh make-index.sh make-html-reports.sh
|
atrt-clear-result.sh make-config.sh make-index.sh make-html-reports.sh
|
||||||
|
|
||||||
|
@ -116,10 +116,7 @@ main(int argc, const char ** argv){
|
|||||||
*/
|
*/
|
||||||
if(restart){
|
if(restart){
|
||||||
g_logger.info("(Re)starting ndb processes");
|
g_logger.info("(Re)starting ndb processes");
|
||||||
if(!stop_processes(g_config, atrt_process::NDB_MGM))
|
if(!stop_processes(g_config, ~0))
|
||||||
goto end;
|
|
||||||
|
|
||||||
if(!stop_processes(g_config, atrt_process::NDB_DB))
|
|
||||||
goto end;
|
goto end;
|
||||||
|
|
||||||
if(!start_processes(g_config, atrt_process::NDB_MGM))
|
if(!start_processes(g_config, atrt_process::NDB_MGM))
|
||||||
@ -142,6 +139,9 @@ main(int argc, const char ** argv){
|
|||||||
goto end;
|
goto end;
|
||||||
|
|
||||||
started:
|
started:
|
||||||
|
if(!start_processes(g_config, p_servers))
|
||||||
|
goto end;
|
||||||
|
|
||||||
g_logger.info("Ndb start completed");
|
g_logger.info("Ndb start completed");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -158,9 +158,6 @@ main(int argc, const char ** argv){
|
|||||||
if(!setup_test_case(g_config, test_case))
|
if(!setup_test_case(g_config, test_case))
|
||||||
goto end;
|
goto end;
|
||||||
|
|
||||||
if(!start_processes(g_config, p_servers))
|
|
||||||
goto end;
|
|
||||||
|
|
||||||
if(!start_processes(g_config, p_clients))
|
if(!start_processes(g_config, p_clients))
|
||||||
goto end;
|
goto end;
|
||||||
|
|
||||||
@ -201,9 +198,6 @@ main(int argc, const char ** argv){
|
|||||||
if(!stop_processes(g_config, p_clients))
|
if(!stop_processes(g_config, p_clients))
|
||||||
goto end;
|
goto end;
|
||||||
|
|
||||||
if(!stop_processes(g_config, p_servers))
|
|
||||||
goto end;
|
|
||||||
|
|
||||||
if(!gather_result(g_config, &result))
|
if(!gather_result(g_config, &result))
|
||||||
goto end;
|
goto end;
|
||||||
|
|
||||||
@ -454,6 +448,7 @@ setup_config(atrt_config& config){
|
|||||||
proc.m_proc.m_runas = proc.m_host->m_user;
|
proc.m_proc.m_runas = proc.m_host->m_user;
|
||||||
proc.m_proc.m_ulimit = "c:unlimited";
|
proc.m_proc.m_ulimit = "c:unlimited";
|
||||||
proc.m_proc.m_env.assfmt("MYSQL_BASE_DIR=%s", dir.c_str());
|
proc.m_proc.m_env.assfmt("MYSQL_BASE_DIR=%s", dir.c_str());
|
||||||
|
proc.m_proc.m_shutdown_options = "";
|
||||||
proc.m_hostname = proc.m_host->m_hostname;
|
proc.m_hostname = proc.m_host->m_hostname;
|
||||||
proc.m_ndb_mgm_port = g_default_base_port;
|
proc.m_ndb_mgm_port = g_default_base_port;
|
||||||
if(split1[0] == "mgm"){
|
if(split1[0] == "mgm"){
|
||||||
@ -476,21 +471,19 @@ setup_config(atrt_config& config){
|
|||||||
proc.m_proc.m_path.assign(dir).append("/libexec/mysqld");
|
proc.m_proc.m_path.assign(dir).append("/libexec/mysqld");
|
||||||
proc.m_proc.m_args = "--core-file --ndbcluster";
|
proc.m_proc.m_args = "--core-file --ndbcluster";
|
||||||
proc.m_proc.m_cwd.appfmt("%d.mysqld", index);
|
proc.m_proc.m_cwd.appfmt("%d.mysqld", index);
|
||||||
if(mysql_port_offset > 0 || g_mysqld_use_base){
|
proc.m_proc.m_shutdown_options = "SIGKILL"; // not nice
|
||||||
// setup mysql specific stuff
|
|
||||||
const char * basedir = proc.m_proc.m_cwd.c_str();
|
|
||||||
proc.m_proc.m_args.appfmt("--datadir=%s", basedir);
|
|
||||||
proc.m_proc.m_args.appfmt("--pid-file=%s/mysql.pid", basedir);
|
|
||||||
proc.m_proc.m_args.appfmt("--socket=%s/mysql.sock", basedir);
|
|
||||||
proc.m_proc.m_args.appfmt("--port=%d",
|
|
||||||
g_default_base_port-(++mysql_port_offset));
|
|
||||||
}
|
|
||||||
} else if(split1[0] == "api"){
|
} else if(split1[0] == "api"){
|
||||||
proc.m_type = atrt_process::NDB_API;
|
proc.m_type = atrt_process::NDB_API;
|
||||||
proc.m_proc.m_name.assfmt("%d-%s", index, "ndb_api");
|
proc.m_proc.m_name.assfmt("%d-%s", index, "ndb_api");
|
||||||
proc.m_proc.m_path = "";
|
proc.m_proc.m_path = "";
|
||||||
proc.m_proc.m_args = "";
|
proc.m_proc.m_args = "";
|
||||||
proc.m_proc.m_cwd.appfmt("%d.ndb_api", index);
|
proc.m_proc.m_cwd.appfmt("%d.ndb_api", index);
|
||||||
|
} else if(split1[0] == "mysql"){
|
||||||
|
proc.m_type = atrt_process::MYSQL_CLIENT;
|
||||||
|
proc.m_proc.m_name.assfmt("%d-%s", index, "mysql");
|
||||||
|
proc.m_proc.m_path = "";
|
||||||
|
proc.m_proc.m_args = "";
|
||||||
|
proc.m_proc.m_cwd.appfmt("%d.mysql", index);
|
||||||
} else {
|
} else {
|
||||||
g_logger.critical("%s:%d: Unhandled process type: %s",
|
g_logger.critical("%s:%d: Unhandled process type: %s",
|
||||||
g_process_config_filename, lineno,
|
g_process_config_filename, lineno,
|
||||||
@ -913,6 +906,11 @@ read_test_case(FILE * file, atrt_testcase& tc, int& line){
|
|||||||
tc.m_report= true;
|
tc.m_report= true;
|
||||||
else
|
else
|
||||||
tc.m_report= false;
|
tc.m_report= false;
|
||||||
|
|
||||||
|
if(p.get("run-all", &mt) && strcmp(mt, "yes") == 0)
|
||||||
|
tc.m_run_all= true;
|
||||||
|
else
|
||||||
|
tc.m_run_all= false;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -928,16 +926,17 @@ setup_test_case(atrt_config& config, const atrt_testcase& tc){
|
|||||||
size_t i = 0;
|
size_t i = 0;
|
||||||
for(; i<config.m_processes.size(); i++){
|
for(; i<config.m_processes.size(); i++){
|
||||||
atrt_process & proc = config.m_processes[i];
|
atrt_process & proc = config.m_processes[i];
|
||||||
if(proc.m_type == atrt_process::NDB_API){
|
if(proc.m_type == atrt_process::NDB_API || proc.m_type == atrt_process::MYSQL_CLIENT){
|
||||||
proc.m_proc.m_path.assfmt("%s/bin/%s", proc.m_host->m_base_dir.c_str(),
|
proc.m_proc.m_path.assfmt("%s/bin/%s", proc.m_host->m_base_dir.c_str(),
|
||||||
tc.m_command.c_str());
|
tc.m_command.c_str());
|
||||||
proc.m_proc.m_args.assign(tc.m_args);
|
proc.m_proc.m_args.assign(tc.m_args);
|
||||||
break;
|
if(!tc.m_run_all)
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for(i++; i<config.m_processes.size(); i++){
|
for(i++; i<config.m_processes.size(); i++){
|
||||||
atrt_process & proc = config.m_processes[i];
|
atrt_process & proc = config.m_processes[i];
|
||||||
if(proc.m_type == atrt_process::NDB_API){
|
if(proc.m_type == atrt_process::NDB_API || proc.m_type == atrt_process::MYSQL_CLIENT){
|
||||||
proc.m_proc.m_path.assign("");
|
proc.m_proc.m_path.assign("");
|
||||||
proc.m_proc.m_args.assign("");
|
proc.m_proc.m_args.assign("");
|
||||||
}
|
}
|
||||||
|
@ -1,465 +1,101 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
# NAME
|
|
||||||
# make-config.sh - Makes a config file for mgm server
|
|
||||||
#
|
|
||||||
# SYNOPSIS
|
|
||||||
# make-config.sh [ -t <template> ] [-s] [ -m <machine conf> [ -d <directory> ]
|
|
||||||
#
|
|
||||||
# DESCRIPTION
|
|
||||||
#
|
|
||||||
# OPTIONS
|
|
||||||
#
|
|
||||||
# EXAMPLES
|
|
||||||
#
|
|
||||||
#
|
|
||||||
# ENVIRONMENT
|
|
||||||
# NDB_PROJ_HOME Home dir for ndb
|
|
||||||
#
|
|
||||||
# FILES
|
|
||||||
# $NDB_PROJ_HOME/lib/funcs.sh general shell script functions
|
|
||||||
#
|
|
||||||
#
|
|
||||||
# SEE ALSO
|
|
||||||
#
|
|
||||||
# DIAGNOSTICTS
|
|
||||||
#
|
|
||||||
# VERSION
|
|
||||||
# 1.0
|
|
||||||
# 1.1 021112 epesson: Adapted for new mgmt server in NDB 2.00
|
|
||||||
#
|
|
||||||
# AUTHOR
|
|
||||||
# Jonas Oreland
|
|
||||||
#
|
|
||||||
# CHANGES
|
|
||||||
# also generate ndbnet config
|
|
||||||
#
|
|
||||||
|
|
||||||
progname=`basename $0`
|
baseport=""
|
||||||
synopsis="make-config.sh [ -t template ] [ -m <machine conf> ] [ -d <dst directory> ][-s] [<mgm host>]"
|
basedir=""
|
||||||
|
proc_no=1
|
||||||
|
node_id=1
|
||||||
|
|
||||||
#: ${NDB_PROJ_HOME:?} # If undefined, exit with error message
|
d_file=/tmp/d.$$
|
||||||
|
dir_file=/tmp/dirs.$$
|
||||||
|
config_file=/tmp/config.$$
|
||||||
|
cluster_file=/tmp/cluster.$$
|
||||||
|
|
||||||
#: ${NDB_LOCAL_BUILD_OPTIONS:=--} # If undef, set to --. Keeps getopts happy.
|
add_procs(){
|
||||||
# You may have to experiment a bit
|
|
||||||
# to get quoting right (if you need it).
|
|
||||||
|
|
||||||
|
|
||||||
#. $NDB_PROJ_HOME/lib/funcs.sh # Load some good stuff
|
|
||||||
trace() {
|
|
||||||
echo $* 1>&2
|
|
||||||
}
|
|
||||||
syndie() {
|
|
||||||
trace $*
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# defaults for options related variables
|
|
||||||
#
|
|
||||||
|
|
||||||
mgm_nodes=0
|
|
||||||
ndb_nodes=0
|
|
||||||
api_nodes=0
|
|
||||||
uniq_id=$$.$$
|
|
||||||
own_host=`hostname`
|
|
||||||
dst_dir=""
|
|
||||||
template=/dev/null
|
|
||||||
machines=/dev/null
|
|
||||||
verbose=yes
|
|
||||||
|
|
||||||
# used if error when parsing the options environment variable
|
|
||||||
#
|
|
||||||
env_opterr="options environment variable: <<$options>>"
|
|
||||||
|
|
||||||
# Option parsing, for the options variable as well as the command line.
|
|
||||||
#
|
|
||||||
# We want to be able to set options in an environment variable,
|
|
||||||
# as well as on the command line. In order not to have to repeat
|
|
||||||
# the same getopts information twice, we loop two times over the
|
|
||||||
# getopts while loop. The first time, we process options from
|
|
||||||
# the options environment variable, the second time we process
|
|
||||||
# options from the command line.
|
|
||||||
#
|
|
||||||
# The things to change are the actual options and what they do.
|
|
||||||
#
|
|
||||||
add_node(){
|
|
||||||
no=$1; shift
|
|
||||||
type=$1; shift
|
type=$1; shift
|
||||||
echo $* | awk 'BEGIN{FS=":";}{h=$1; if(h=="localhost") h="'$own_host'";
|
while [ $# -ne 0 ]
|
||||||
printf("%s_%d_host=%s\n", "'$type'", "'$no'", h);
|
do
|
||||||
if(NF>1 && $2!="") printf("%s_%d_port=%d\n",
|
add_proc $type $1
|
||||||
"'$type'", "'$no'", $2);
|
shift
|
||||||
if(NF>2 && $3!="") printf("%s_%d_dir=%s\n",
|
done
|
||||||
"'$type'", "'$no'", $3);
|
|
||||||
}'
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
add_proc (){
|
||||||
add_mgm_node(){
|
dir=""
|
||||||
mgm_nodes=`cat /tmp/mgm_nodes.$uniq_id | grep "_host=" | wc -l`
|
conf=""
|
||||||
mgm_nodes=`expr $mgm_nodes + 1`
|
case $type in
|
||||||
while [ $# -gt 0 ]
|
mgm)
|
||||||
do
|
dir="ndb_mgmd"
|
||||||
add_node ${mgm_nodes} mgm_node $1 >> /tmp/mgm_nodes.$uniq_id
|
conf="[ndb_mgmd]\nId: $node_id\nHostName: $2\n"
|
||||||
shift
|
node_id=`expr $node_id + 1`
|
||||||
mgm_nodes=`expr $mgm_nodes + 1`
|
;;
|
||||||
done
|
api)
|
||||||
}
|
dir="ndb_api"
|
||||||
|
conf="[api]\nId: $node_id\nHostName: $2\n"
|
||||||
add_ndb_node(){
|
node_id=`expr $node_id + 1`
|
||||||
ndb_nodes=`cat /tmp/ndb_nodes.$uniq_id | grep "_host=" | wc -l`
|
;;
|
||||||
ndb_nodes=`expr $ndb_nodes + 1`
|
ndb)
|
||||||
while [ $# -gt 0 ]
|
dir="ndbd"
|
||||||
do
|
conf="[ndbd]\nId: $node_id\nHostName: $2\n"
|
||||||
add_node ${ndb_nodes} ndb_node $1 >> /tmp/ndb_nodes.$uniq_id
|
node_id=`expr $node_id + 1`
|
||||||
shift
|
;;
|
||||||
ndb_nodes=`expr $ndb_nodes + 1`
|
mysqld)
|
||||||
done
|
dir="mysqld"
|
||||||
}
|
conf="[mysqld]\nId: $node_id\nHostName: $2\n"
|
||||||
|
node_id=`expr $node_id + 1`
|
||||||
add_api_node(){
|
;;
|
||||||
api_nodes=`cat /tmp/api_nodes.$uniq_id | grep "_host=" |wc -l`
|
mysql)
|
||||||
api_nodes=`expr $api_nodes + 1`
|
dir="mysql"
|
||||||
while [ $# -gt 0 ]
|
;;
|
||||||
do
|
|
||||||
add_node ${api_nodes} api_node $1 >> /tmp/api_nodes.$uniq_id
|
|
||||||
shift
|
|
||||||
api_nodes=`expr $api_nodes + 1`
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
rm -rf /tmp/mgm_nodes.$uniq_id ; touch /tmp/mgm_nodes.$uniq_id
|
|
||||||
rm -rf /tmp/ndb_nodes.$uniq_id ; touch /tmp/ndb_nodes.$uniq_id
|
|
||||||
rm -rf /tmp/api_nodes.$uniq_id ; touch /tmp/api_nodes.$uniq_id
|
|
||||||
|
|
||||||
for optstring in "$options" "" # 1. options variable 2. cmd line
|
|
||||||
do
|
|
||||||
|
|
||||||
while getopts d:m:t:n:o:a:b:p:s i $optstring # optstring empty => no arg => cmd line
|
|
||||||
do
|
|
||||||
case $i in
|
|
||||||
|
|
||||||
q) verbose="";; # echo important things
|
|
||||||
t) template=$OPTARG;; # Template
|
|
||||||
d) dst_dir=$OPTARG;; # Destination directory
|
|
||||||
m) machines=$OPTARG;; # Machine configuration
|
|
||||||
s) mgm_start=yes;; # Make mgm start script
|
|
||||||
\?) syndie $env_opterr;; # print synopsis and exit
|
|
||||||
|
|
||||||
esac
|
esac
|
||||||
done
|
dir="$proc_no.$dir"
|
||||||
|
proc_no=`expr $proc_no + 1`
|
||||||
[ -n "$optstring" ] && OPTIND=1 # Reset for round 2, cmdline options
|
echo -e $dir >> $dir_file
|
||||||
|
if [ "$conf" ]
|
||||||
env_opterr= # Round 2 should not use the value
|
|
||||||
|
|
||||||
done
|
|
||||||
shift `expr $OPTIND - 1`
|
|
||||||
|
|
||||||
if [ -z "$dst_dir" ]
|
|
||||||
then
|
|
||||||
verbose=
|
|
||||||
fi
|
|
||||||
|
|
||||||
skip(){
|
|
||||||
no=$1; shift
|
|
||||||
shift $no
|
|
||||||
echo $*
|
|
||||||
}
|
|
||||||
|
|
||||||
# --- option parsing done ---
|
|
||||||
grep "^ndb: " $machines | while read node
|
|
||||||
do
|
|
||||||
node=`skip 1 $node`
|
|
||||||
add_ndb_node $node
|
|
||||||
done
|
|
||||||
|
|
||||||
grep "^api: " $machines | while read node
|
|
||||||
do
|
|
||||||
node=`skip 1 $node`
|
|
||||||
add_api_node $node
|
|
||||||
done
|
|
||||||
|
|
||||||
grep "^mgm: " $machines | while read node
|
|
||||||
do
|
|
||||||
node=`skip 1 $node`
|
|
||||||
add_mgm_node $node
|
|
||||||
done
|
|
||||||
|
|
||||||
tmp=`grep "^baseport: " $machines | tail -1 | cut -d ":" -f 2`
|
|
||||||
if [ "$tmp" ]
|
|
||||||
then
|
|
||||||
baseport=`echo $tmp`
|
|
||||||
else
|
|
||||||
syndie "Unable to find baseport"
|
|
||||||
fi
|
|
||||||
|
|
||||||
trim(){
|
|
||||||
echo $*
|
|
||||||
}
|
|
||||||
tmp=`grep "^basedir: " $machines | tail -1 | cut -d ":" -f 2`
|
|
||||||
if [ "$tmp" ]
|
|
||||||
then
|
|
||||||
basedir=`trim $tmp`
|
|
||||||
fi
|
|
||||||
|
|
||||||
# -- Load enviroment --
|
|
||||||
ndb_nodes=`cat /tmp/ndb_nodes.$uniq_id | grep "_host=" | wc -l`
|
|
||||||
api_nodes=`cat /tmp/api_nodes.$uniq_id | grep "_host=" | wc -l`
|
|
||||||
mgm_nodes=`cat /tmp/mgm_nodes.$uniq_id | grep "_host=" | wc -l`
|
|
||||||
. /tmp/ndb_nodes.$uniq_id
|
|
||||||
. /tmp/api_nodes.$uniq_id
|
|
||||||
. /tmp/mgm_nodes.$uniq_id
|
|
||||||
rm -f /tmp/ndb_nodes.$uniq_id /tmp/api_nodes.$uniq_id /tmp/mgm_nodes.$uniq_id
|
|
||||||
|
|
||||||
# -- Verify
|
|
||||||
trace "Verifying arguments"
|
|
||||||
|
|
||||||
if [ ! -r $template ]
|
|
||||||
then
|
|
||||||
syndie "Unable to read template file: $template"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ $ndb_nodes -le 0 ]
|
|
||||||
then
|
|
||||||
syndie "No ndb nodes specified"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ $api_nodes -le 0 ]
|
|
||||||
then
|
|
||||||
syndie "No api nodes specified"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ $mgm_nodes -gt 1 ]
|
|
||||||
then
|
|
||||||
syndie "More than one mgm node specified"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ $mgm_nodes -eq 0 ]
|
|
||||||
then
|
|
||||||
trace "No managment server specified using `hostname`"
|
|
||||||
mgm_nodes=1
|
|
||||||
mgm_node_1=`hostname`
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$dst_dir" ]
|
|
||||||
then
|
|
||||||
mkdir -p $dst_dir
|
|
||||||
if [ ! -d $dst_dir ]
|
|
||||||
then
|
|
||||||
syndie "Unable to create dst dir: $dst_dir"
|
|
||||||
fi
|
|
||||||
DST=/tmp/$uniq_id
|
|
||||||
fi
|
|
||||||
|
|
||||||
# --- option verifying done ---
|
|
||||||
|
|
||||||
# Find uniq computers
|
|
||||||
i=1
|
|
||||||
while [ $i -le $mgm_nodes ]
|
|
||||||
do
|
|
||||||
echo `eval echo "\$"mgm_node_${i}_host` >> /tmp/hosts.$uniq_id
|
|
||||||
i=`expr $i + 1`
|
|
||||||
done
|
|
||||||
|
|
||||||
i=1
|
|
||||||
while [ $i -le $ndb_nodes ]
|
|
||||||
do
|
|
||||||
echo `eval echo "\$"ndb_node_${i}_host` >> /tmp/hosts.$uniq_id
|
|
||||||
i=`expr $i + 1`
|
|
||||||
done
|
|
||||||
|
|
||||||
i=1
|
|
||||||
while [ $i -le $api_nodes ]
|
|
||||||
do
|
|
||||||
echo `eval echo "\$"api_node_${i}_host` >> /tmp/hosts.$uniq_id
|
|
||||||
i=`expr $i + 1`
|
|
||||||
done
|
|
||||||
|
|
||||||
sort -u -o /tmp/hosts.$uniq_id /tmp/hosts.$uniq_id
|
|
||||||
|
|
||||||
get_computer_id(){
|
|
||||||
grep -w -n $1 /tmp/hosts.$uniq_id | cut -d ":" -f 1
|
|
||||||
}
|
|
||||||
|
|
||||||
get_mgm_computer_id(){
|
|
||||||
a=`eval echo "\$"mgm_node_${1}_host`
|
|
||||||
get_computer_id $a
|
|
||||||
}
|
|
||||||
|
|
||||||
get_ndb_computer_id(){
|
|
||||||
a=`eval echo "\$"ndb_node_${1}_host`
|
|
||||||
get_computer_id $a
|
|
||||||
}
|
|
||||||
|
|
||||||
get_api_computer_id(){
|
|
||||||
a=`eval echo "\$"api_node_${1}_host`
|
|
||||||
get_computer_id $a
|
|
||||||
}
|
|
||||||
|
|
||||||
# -- Write config files --
|
|
||||||
|
|
||||||
mgm_port=$baseport
|
|
||||||
|
|
||||||
(
|
|
||||||
i=1
|
|
||||||
#echo "COMPUTERS"
|
|
||||||
cat /tmp/hosts.$uniq_id | while read host
|
|
||||||
do
|
|
||||||
echo "[COMPUTER]"
|
|
||||||
echo "Id: $i"
|
|
||||||
echo "ByteOrder: Big"
|
|
||||||
echo "HostName: $host"
|
|
||||||
echo
|
|
||||||
i=`expr $i + 1`
|
|
||||||
done
|
|
||||||
|
|
||||||
node_id=1
|
|
||||||
echo
|
|
||||||
|
|
||||||
# Mgm process
|
|
||||||
echo
|
|
||||||
echo "[MGM]"
|
|
||||||
echo "Id: $node_id"
|
|
||||||
echo "ExecuteOnComputer: `get_mgm_computer_id 1`"
|
|
||||||
echo "PortNumber: $mgm_port"
|
|
||||||
node_id=`expr $node_id + 1`
|
|
||||||
|
|
||||||
# Ndb processes
|
|
||||||
i=1
|
|
||||||
ndb_nodes=`trim $ndb_nodes`
|
|
||||||
while [ $i -le $ndb_nodes ]
|
|
||||||
do
|
|
||||||
echo
|
|
||||||
echo "[DB]"
|
|
||||||
echo "Id: $node_id"
|
|
||||||
echo "ExecuteOnComputer: `get_ndb_computer_id $i`"
|
|
||||||
echo "FileSystemPath: $basedir/run/node-${node_id}-fs"
|
|
||||||
i=`expr $i + 1`
|
|
||||||
node_id=`expr $node_id + 1`
|
|
||||||
done
|
|
||||||
|
|
||||||
# API processes
|
|
||||||
i=1
|
|
||||||
while [ $i -le $api_nodes ]
|
|
||||||
do
|
|
||||||
echo
|
|
||||||
echo "[API]"
|
|
||||||
echo "Id: $node_id"
|
|
||||||
echo "ExecuteOnComputer: `get_api_computer_id $i`"
|
|
||||||
i=`expr $i + 1`
|
|
||||||
node_id=`expr $node_id + 1`
|
|
||||||
done
|
|
||||||
|
|
||||||
# Connections
|
|
||||||
current_port=`expr $mgm_port + 1`
|
|
||||||
echo
|
|
||||||
|
|
||||||
# Connect Mgm with all ndb-nodes
|
|
||||||
i=1
|
|
||||||
while [ $i -le $ndb_nodes ]
|
|
||||||
do
|
|
||||||
echo
|
|
||||||
echo "[TCP]"
|
|
||||||
echo "NodeId1: 1"
|
|
||||||
echo "NodeId2: `expr $i + 1`"
|
|
||||||
echo "PortNumber: $current_port"
|
|
||||||
i=`expr $i + 1`
|
|
||||||
current_port=`expr $current_port + 1`
|
|
||||||
done
|
|
||||||
|
|
||||||
# Connect All ndb processes with all ndb processes
|
|
||||||
i=1
|
|
||||||
while [ $i -le $ndb_nodes ]
|
|
||||||
do
|
|
||||||
j=`expr $i + 1`
|
|
||||||
while [ $j -le $ndb_nodes ]
|
|
||||||
do
|
|
||||||
echo
|
|
||||||
echo "[TCP]"
|
|
||||||
echo "NodeId1: `expr $i + 1`"
|
|
||||||
echo "NodeId2: `expr $j + 1`"
|
|
||||||
echo "PortNumber: $current_port"
|
|
||||||
j=`expr $j + 1`
|
|
||||||
current_port=`expr $current_port + 1`
|
|
||||||
done
|
|
||||||
i=`expr $i + 1`
|
|
||||||
done
|
|
||||||
|
|
||||||
# Connect all ndb-nodes with all api nodes
|
|
||||||
i=1
|
|
||||||
while [ $i -le $ndb_nodes ]
|
|
||||||
do
|
|
||||||
j=1
|
|
||||||
while [ $j -le $api_nodes ]
|
|
||||||
do
|
|
||||||
echo
|
|
||||||
echo "[TCP]"
|
|
||||||
echo "NodeId1: `expr $i + 1`"
|
|
||||||
echo "NodeId2: `expr $j + $ndb_nodes + 1`"
|
|
||||||
echo "PortNumber: $current_port"
|
|
||||||
j=`expr $j + 1`
|
|
||||||
current_port=`expr $current_port + 1`
|
|
||||||
done
|
|
||||||
i=`expr $i + 1`
|
|
||||||
done
|
|
||||||
echo
|
|
||||||
) > $DST
|
|
||||||
|
|
||||||
trace "Init config file done"
|
|
||||||
|
|
||||||
if [ -z "$dst_dir" ]
|
|
||||||
then
|
|
||||||
cat $DST
|
|
||||||
rm -f $DST
|
|
||||||
rm -f /tmp/hosts.$uniq_id
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
###
|
|
||||||
# Create Ndb.cfg files
|
|
||||||
|
|
||||||
# nodeid=2;host=localhost:2200
|
|
||||||
|
|
||||||
# Mgm node
|
|
||||||
mkcfg(){
|
|
||||||
mkdir -p $dst_dir/${2}.ndb_${1}
|
|
||||||
(
|
|
||||||
echo "OwnProcessId $2"
|
|
||||||
echo "host://${mgm_node_1_host}:${mgm_port}"
|
|
||||||
) > $dst_dir/${2}.ndb_${1}/Ndb.cfg
|
|
||||||
if [ $1 = "db" ]
|
|
||||||
then
|
then
|
||||||
mkdir $dst_dir/node-${2}-fs
|
echo -e $conf >> $config_file
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
mkcfg mgm 1
|
|
||||||
cat $DST > $dst_dir/1.ndb_mgm/initconfig.txt
|
|
||||||
|
|
||||||
trace "Creating Ndb.cfg for ndb nodes"
|
cnf=/dev/null
|
||||||
|
cat $1 | while read line
|
||||||
current_node=2
|
|
||||||
i=1
|
|
||||||
while [ $i -le $ndb_nodes ]
|
|
||||||
do
|
do
|
||||||
mkcfg db ${current_node}
|
case $line in
|
||||||
i=`expr $i + 1`
|
baseport:*) baseport=`echo $line | sed 's/baseport[ ]*:[ ]*//g'`;;
|
||||||
current_node=`expr $current_node + 1`
|
basedir:*) basedir=`echo $line | sed 's/basedir[ ]*:[ ]*//g'`;;
|
||||||
|
mgm:*) add_procs mgm `echo $line | sed 's/mgm[ ]*:[ ]*//g'`;;
|
||||||
|
api:*) add_procs api `echo $line | sed 's/api[ ]*:[ ]*//g'`;;
|
||||||
|
ndb:*) add_procs ndb `echo $line | sed 's/ndb[ ]*:[ ]*//g'`;;
|
||||||
|
mysqld:*) add_procs mysqld `echo $line | sed 's/mysqld[ ]*:[ ]*//g'`;;
|
||||||
|
mysql:*) add_procs mysql `echo $line | sed 's/mysql[ ]*:[ ]*//g'`;;
|
||||||
|
"-- cluster config")
|
||||||
|
if [ "$cnf" = "/dev/null" ]
|
||||||
|
then
|
||||||
|
cnf=$cluster_file
|
||||||
|
else
|
||||||
|
cnf=/dev/null
|
||||||
|
fi
|
||||||
|
line="";;
|
||||||
|
*) echo $line >> $cnf; line="";;
|
||||||
|
esac
|
||||||
|
if [ "$line" ]
|
||||||
|
then
|
||||||
|
echo $line >> $d_file
|
||||||
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
trace "Creating Ndb.cfg for api nodes"
|
cat $dir_file | xargs mkdir -p
|
||||||
|
|
||||||
i=1
|
if [ -f $cluster_file ]
|
||||||
while [ $i -le $api_nodes ]
|
then
|
||||||
do
|
cat $cluster_file $config_file >> /tmp/config2.$$
|
||||||
mkcfg api ${current_node}
|
mv /tmp/config2.$$ $config_file
|
||||||
i=`expr $i + 1`
|
fi
|
||||||
current_node=`expr $current_node + 1`
|
|
||||||
|
for i in `find . -type d -name '*.ndb_mgmd'`
|
||||||
|
do
|
||||||
|
cp $config_file $i/config.ini
|
||||||
done
|
done
|
||||||
|
|
||||||
rm -f $DST
|
mv $d_file d.txt
|
||||||
rm -f /tmp/hosts.$uniq_id
|
rm -f $config_file $dir_file $cluster_file
|
||||||
|
|
||||||
|
|
||||||
exit 0
|
|
||||||
# vim: set sw=4:
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
save_args=$*
|
save_args=$*
|
||||||
VERSION="ndb-autotest.sh version 1.0"
|
VERSION="ndb-autotest.sh version 1.04"
|
||||||
|
|
||||||
DATE=`date '+%Y-%m-%d'`
|
DATE=`date '+%Y-%m-%d'`
|
||||||
export DATE
|
export DATE
|
||||||
@ -71,11 +71,18 @@ then
|
|||||||
cd $dst_place
|
cd $dst_place
|
||||||
rm -rf $run_dir/*
|
rm -rf $run_dir/*
|
||||||
aclocal; autoheader; autoconf; automake
|
aclocal; autoheader; autoconf; automake
|
||||||
(cd innobase; aclocal; autoheader; autoconf; automake)
|
if [ -d storage ]
|
||||||
(cd bdb/dist; sh s_all)
|
then
|
||||||
|
(cd storage/innobase; aclocal; autoheader; autoconf; automake)
|
||||||
|
(cd storage/bdb/dist; sh s_all)
|
||||||
|
else
|
||||||
|
(cd innobase; aclocal; autoheader; autoconf; automake)
|
||||||
|
(cd bdb/dist; sh s_all)
|
||||||
|
fi
|
||||||
eval $configure --prefix=$run_dir
|
eval $configure --prefix=$run_dir
|
||||||
make
|
make
|
||||||
make install
|
make install
|
||||||
|
(cd $run_dir; ./bin/mysql_install_db)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
###
|
###
|
||||||
@ -103,7 +110,9 @@ fi
|
|||||||
test_dir=$run_dir/mysql-test/ndb
|
test_dir=$run_dir/mysql-test/ndb
|
||||||
atrt=$test_dir/atrt
|
atrt=$test_dir/atrt
|
||||||
html=$test_dir/make-html-reports.sh
|
html=$test_dir/make-html-reports.sh
|
||||||
PATH=$test_dir:$PATH
|
mkconfig=$run_dir/mysql-test/ndb/make-config.sh
|
||||||
|
|
||||||
|
PATH=$run_dir/bin:$test_dir:$PATH
|
||||||
export PATH
|
export PATH
|
||||||
|
|
||||||
filter(){
|
filter(){
|
||||||
@ -125,20 +134,16 @@ hosts=`cat /tmp/hosts.$DATE`
|
|||||||
|
|
||||||
if [ "$deploy" ]
|
if [ "$deploy" ]
|
||||||
then
|
then
|
||||||
(cd / && tar cfz /tmp/build.$DATE.tgz $run_dir )
|
for i in $hosts
|
||||||
for i in $hosts
|
do
|
||||||
do
|
rsync -a --delete --force --ignore-errors $run_dir/ $i:$run_dir
|
||||||
ok=0
|
ok=$?
|
||||||
scp /tmp/build.$DATE.tgz $i:/tmp/build.$DATE.$$.tgz && \
|
if [ $ok -ne 0 ]
|
||||||
ssh $i "rm -rf /space/autotest/*" && \
|
then
|
||||||
ssh $i "cd / && tar xfz /tmp/build.$DATE.$$.tgz" && \
|
echo "$i failed during rsync, excluding"
|
||||||
ssh $i "rm /tmp/build.$DATE.$$.tgz" && ok=1
|
echo $i >> /tmp/failed.$DATE
|
||||||
if [ $ok -eq 0 ]
|
fi
|
||||||
then
|
done
|
||||||
echo "$i failed during scp/ssh, excluding"
|
|
||||||
echo $i >> /tmp/failed.$DATE
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
fi
|
fi
|
||||||
rm -f /tmp/build.$DATE.tgz
|
rm -f /tmp/build.$DATE.tgz
|
||||||
|
|
||||||
@ -170,6 +175,18 @@ choose(){
|
|||||||
cat $TMP1
|
cat $TMP1
|
||||||
rm -f $TMP1
|
rm -f $TMP1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
choose_conf(){
|
||||||
|
host=`hostname -s`
|
||||||
|
if [ -f $test_dir/conf-$1-$host.txt ]
|
||||||
|
then
|
||||||
|
echo "$test_dir/conf-$1-$host.txt"
|
||||||
|
elif [ -f $test_dir/conf-$1.txt ]
|
||||||
|
then
|
||||||
|
echo "$test_dir/conf-$1.txt"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
start(){
|
start(){
|
||||||
rm -rf report.txt result* log.txt
|
rm -rf report.txt result* log.txt
|
||||||
$atrt -v -v -r -R --log-file=log.txt --testcase-file=$test_dir/$2-tests.txt &
|
$atrt -v -v -r -R --log-file=log.txt --testcase-file=$test_dir/$2-tests.txt &
|
||||||
@ -186,11 +203,17 @@ start(){
|
|||||||
p2=`pwd`
|
p2=`pwd`
|
||||||
cd ..
|
cd ..
|
||||||
tar cfz /tmp/res.$$.tgz `basename $p2`/$DATE
|
tar cfz /tmp/res.$$.tgz `basename $p2`/$DATE
|
||||||
scp /tmp/res.$$.tgz $result_host:$result_path
|
scp /tmp/res.$$.tgz $result_host:$result_path/res.$DATE.`hostname -s`.$2.$$.tgz
|
||||||
ssh $result_host "cd $result_path && tar xfz res.$$.tgz && rm -f res.$$.tgz"
|
|
||||||
rm -f /tmp/res.$$.tgz
|
rm -f /tmp/res.$$.tgz
|
||||||
}
|
}
|
||||||
|
|
||||||
|
count_hosts(){
|
||||||
|
cnt=`grep "CHOOSE_host" $1 |
|
||||||
|
awk '{for(i=1; i<=NF;i++) if(match($i, "CHOOSE_host") > 0) print $i;}' |
|
||||||
|
sort | uniq | wc -l`
|
||||||
|
echo $cnt
|
||||||
|
}
|
||||||
|
|
||||||
p=`pwd`
|
p=`pwd`
|
||||||
for dir in $RUN
|
for dir in $RUN
|
||||||
do
|
do
|
||||||
@ -199,10 +222,11 @@ do
|
|||||||
run_dir=$base_dir/run-$dir-mysql-$clone-$target
|
run_dir=$base_dir/run-$dir-mysql-$clone-$target
|
||||||
res_dir=$base_dir/result-$dir-mysql-$clone-$target/$DATE
|
res_dir=$base_dir/result-$dir-mysql-$clone-$target/$DATE
|
||||||
|
|
||||||
mkdir -p $res_dir
|
mkdir -p $run_dir $res_dir
|
||||||
rm -rf $res_dir/*
|
rm -rf $res_dir/* $run_dir/*
|
||||||
|
|
||||||
count=`grep -c "COMPUTER" $run_dir/1.ndb_mgmd/initconfig.template`
|
conf=`choose_conf $dir`
|
||||||
|
count=`count_hosts $conf`
|
||||||
avail_hosts=`filter /tmp/filter_hosts.$$ $hosts`
|
avail_hosts=`filter /tmp/filter_hosts.$$ $hosts`
|
||||||
avail=`echo $avail_hosts | wc -w`
|
avail=`echo $avail_hosts | wc -w`
|
||||||
if [ $count -gt $avail ]
|
if [ $count -gt $avail ]
|
||||||
@ -212,12 +236,12 @@ do
|
|||||||
break;
|
break;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
run_hosts=`echo $avail_hosts| awk '{for(i=1;i<='$count';i++)print $i;}'`
|
run_hosts=`echo $avail_hosts|awk '{for(i=1;i<='$count';i++)print $i;}'`
|
||||||
choose $run_dir/d.template $run_hosts > $run_dir/d.txt
|
|
||||||
choose $run_dir/1.ndb_mgmd/initconfig.template $run_hosts > $run_dir/1.ndb_mgmd/config.ini
|
|
||||||
echo $run_hosts >> /tmp/filter_hosts.$$
|
echo $run_hosts >> /tmp/filter_hosts.$$
|
||||||
|
|
||||||
cd $run_dir
|
cd $run_dir
|
||||||
|
choose $conf $run_hosts > d.tmp
|
||||||
|
$mkconfig d.tmp
|
||||||
start $dir-mysql-$clone-$target $dir $res_dir &
|
start $dir-mysql-$clone-$target $dir $res_dir &
|
||||||
done
|
done
|
||||||
cd $p
|
cd $p
|
||||||
|
@ -69,6 +69,7 @@ struct atrt_config {
|
|||||||
|
|
||||||
struct atrt_testcase {
|
struct atrt_testcase {
|
||||||
bool m_report;
|
bool m_report;
|
||||||
|
bool m_run_all;
|
||||||
time_t m_max_time;
|
time_t m_max_time;
|
||||||
BaseString m_command;
|
BaseString m_command;
|
||||||
BaseString m_args;
|
BaseString m_args;
|
||||||
|
@ -282,6 +282,7 @@ convert(const Properties & src, SimpleCpcClient::Process & dst){
|
|||||||
b &= src.get("stdout", dst.m_stdout);
|
b &= src.get("stdout", dst.m_stdout);
|
||||||
b &= src.get("stderr", dst.m_stderr);
|
b &= src.get("stderr", dst.m_stderr);
|
||||||
b &= src.get("ulimit", dst.m_ulimit);
|
b &= src.get("ulimit", dst.m_ulimit);
|
||||||
|
b &= src.get("shutdown", dst.m_shutdown_options);
|
||||||
|
|
||||||
return b;
|
return b;
|
||||||
}
|
}
|
||||||
@ -305,6 +306,7 @@ convert(const SimpleCpcClient::Process & src, Properties & dst ){
|
|||||||
b &= dst.put("stdout", src.m_stdout.c_str());
|
b &= dst.put("stdout", src.m_stdout.c_str());
|
||||||
b &= dst.put("stderr", src.m_stderr.c_str());
|
b &= dst.put("stderr", src.m_stderr.c_str());
|
||||||
b &= dst.put("ulimit", src.m_ulimit.c_str());
|
b &= dst.put("ulimit", src.m_ulimit.c_str());
|
||||||
|
b &= dst.put("shutdown", src.m_shutdown_options.c_str());
|
||||||
|
|
||||||
return b;
|
return b;
|
||||||
}
|
}
|
||||||
@ -372,6 +374,7 @@ SimpleCpcClient::list_processes(Vector<Process> &procs, Properties& reply) {
|
|||||||
CPC_ARG("stdout",String, Mandatory, "Redirect stdout"),
|
CPC_ARG("stdout",String, Mandatory, "Redirect stdout"),
|
||||||
CPC_ARG("stderr",String, Mandatory, "Redirect stderr"),
|
CPC_ARG("stderr",String, Mandatory, "Redirect stderr"),
|
||||||
CPC_ARG("ulimit",String, Mandatory, "ulimit"),
|
CPC_ARG("ulimit",String, Mandatory, "ulimit"),
|
||||||
|
CPC_ARG("shutdown",String, Mandatory, "shutdown"),
|
||||||
|
|
||||||
CPC_END()
|
CPC_END()
|
||||||
};
|
};
|
||||||
|
@ -4591,7 +4591,7 @@ uint my_well_formed_len_sjis(CHARSET_INFO *cs __attribute__((unused)),
|
|||||||
{
|
{
|
||||||
const char *b0= b;
|
const char *b0= b;
|
||||||
*error= 0;
|
*error= 0;
|
||||||
while (pos && b < e)
|
while (pos-- && b < e)
|
||||||
{
|
{
|
||||||
if ((uchar) b[0] < 128)
|
if ((uchar) b[0] < 128)
|
||||||
{
|
{
|
||||||
|
Loading…
x
Reference in New Issue
Block a user