Merge jamppa@bk-internal.mysql.com:/home/bk/mysql-5.1-new
into a193-229-222-105.elisa-laajakaista.fi:/home/my/bk/mysql-5.1-new configure.in: Auto merged sql/mysqld.cc: Auto merged sql/set_var.cc: Auto merged
This commit is contained in:
commit
fd03e915ea
@ -48,6 +48,7 @@
|
||||
50
|
||||
=6
|
||||
BUILD/compile-pentium-maintainer
|
||||
BitKeeper/etc/RESYNC_TREE
|
||||
BitKeeper/etc/config
|
||||
BitKeeper/etc/csets
|
||||
BitKeeper/etc/csets-in
|
||||
@ -295,6 +296,7 @@ libmysql/*.ds?
|
||||
libmysql/*.vcproj
|
||||
libmysql/conf_to_src
|
||||
libmysql/debug/libmysql.exp
|
||||
libmysql/libmysql.ver
|
||||
libmysql/my_static.h
|
||||
libmysql/my_time.c
|
||||
libmysql/mysys_priv.h
|
||||
@ -1611,4 +1613,3 @@ vio/viotest-sslconnect.cpp
|
||||
vio/viotest.cpp
|
||||
zlib/*.ds?
|
||||
zlib/*.vcproj
|
||||
BitKeeper/etc/RESYNC_TREE
|
||||
|
@ -45,22 +45,22 @@ CLEAN_FILES: $(TXT_FILES)
|
||||
GT = $(srcdir)/Support/generate-text-files.pl
|
||||
|
||||
../INSTALL-SOURCE: mysql.info $(GT)
|
||||
perl -w $(GT) $< "installing-source" "windows-source-build" > $@
|
||||
perl -w $(GT) mysql.info "installing-source" "windows-source-build" > $@
|
||||
|
||||
../INSTALL-WIN-SOURCE: mysql.info $(GT)
|
||||
perl -w $(GT) $< "windows-source-build" "post-installation" > $@
|
||||
perl -w $(GT) mysql.info "windows-source-build" "post-installation" > $@
|
||||
|
||||
# We put the description for the binary installation here so that
|
||||
# people who download source wont have to see it. It is moved up to
|
||||
# the toplevel by the script that makes the binary tar files.
|
||||
INSTALL-BINARY: mysql.info $(GT)
|
||||
perl -w $(GT) $< "installing-binary" "installing-source" > $@
|
||||
perl -w $(GT) mysql.info "installing-binary" "installing-source" > $@
|
||||
|
||||
../EXCEPTIONS-CLIENT: mysql.info $(GT)
|
||||
perl -w $(GT) $< "mysql-floss-license-exception" "function-index" > $@
|
||||
perl -w $(GT) mysql.info "mysql-floss-license-exception" "function-index" > $@
|
||||
|
||||
../support-files/MacOSX/ReadMe.txt: mysql.info $(GT)
|
||||
perl -w $(GT) $< "mac-os-x-installation" "netware-installation" > $@
|
||||
perl -w $(GT) mysql.info "mac-os-x-installation" "netware-installation" > $@
|
||||
|
||||
# Don't update the files from bitkeeper
|
||||
%::SCCS/s.%
|
||||
|
@ -569,6 +569,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
|
||||
return -1;
|
||||
}
|
||||
mysql_close(mysql); /* Close connection to avoid error messages */
|
||||
argc=1; /* force SHUTDOWN to be the last command */
|
||||
if (got_pidfile)
|
||||
{
|
||||
if (opt_verbose)
|
||||
|
@ -1293,12 +1293,13 @@ at offset %lu ; this could be a log format error or read error",
|
||||
}
|
||||
else if (buf[4] == ROTATE_EVENT)
|
||||
{
|
||||
Log_event *ev;
|
||||
my_b_seek(file, tmp_pos); /* seek back to event's start */
|
||||
if (!Log_event::read_log_event(file, *description_event))
|
||||
if (!(ev= Log_event::read_log_event(file, *description_event)))
|
||||
/* EOF can't be hit here normally, so it's a real error */
|
||||
die("Could not read a Rotate_log_event event \
|
||||
at offset %lu ; this could be a log format error or read error",
|
||||
tmp_pos);
|
||||
die("Could not read a Rotate_log_event event at offset %lu ;"
|
||||
" this could be a log format error or read error", tmp_pos);
|
||||
delete ev;
|
||||
}
|
||||
else
|
||||
break;
|
||||
|
@ -1316,7 +1316,7 @@ static uint dump_routines_for_db(char *db)
|
||||
fprintf(sql_file, "DELIMITER ;\n");
|
||||
|
||||
if (lock_tables)
|
||||
mysql_query_with_error_report(sock, 0, "UNLOCK TABLES");
|
||||
VOID(mysql_query_with_error_report(sock, 0, "UNLOCK TABLES"));
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
@ -2139,7 +2139,10 @@ static void dump_table(char *table, char *db)
|
||||
else
|
||||
res=mysql_store_result(sock);
|
||||
if (!res)
|
||||
{
|
||||
DB_error(sock, "when retrieving data from server");
|
||||
goto err;
|
||||
}
|
||||
if (verbose)
|
||||
fprintf(stderr, "-- Retrieving rows...\n");
|
||||
if (mysql_num_fields(res) != num_fields)
|
||||
@ -2794,7 +2797,7 @@ static int dump_all_tables_in_db(char *database)
|
||||
check_io(md_result_file);
|
||||
}
|
||||
if (lock_tables)
|
||||
mysql_query_with_error_report(sock, 0, "UNLOCK TABLES");
|
||||
VOID(mysql_query_with_error_report(sock, 0, "UNLOCK TABLES"));
|
||||
return 0;
|
||||
} /* dump_all_tables_in_db */
|
||||
|
||||
@ -2849,23 +2852,23 @@ static my_bool dump_all_views_in_db(char *database)
|
||||
check_io(md_result_file);
|
||||
}
|
||||
if (lock_tables)
|
||||
mysql_query(sock,"UNLOCK TABLES");
|
||||
VOID(mysql_query_with_error_report(sock, 0, "UNLOCK TABLES"));
|
||||
return 0;
|
||||
} /* dump_all_tables_in_db */
|
||||
|
||||
|
||||
/*
|
||||
get_actual_table_name -- executes a SHOW TABLES LIKE '%s' to get the actual
|
||||
table name from the server for the table name given on the command line.
|
||||
we do this because the table name given on the command line may be a
|
||||
get_actual_table_name -- executes a SHOW TABLES LIKE '%s' to get the actual
|
||||
table name from the server for the table name given on the command line.
|
||||
we do this because the table name given on the command line may be a
|
||||
different case (e.g. T1 vs t1)
|
||||
|
||||
|
||||
RETURN
|
||||
int - 0 if a tablename was retrieved. 1 if not
|
||||
*/
|
||||
|
||||
static int get_actual_table_name(const char *old_table_name,
|
||||
char *new_table_name,
|
||||
static int get_actual_table_name(const char *old_table_name,
|
||||
char *new_table_name,
|
||||
int buf_size)
|
||||
{
|
||||
int retval;
|
||||
@ -2877,7 +2880,7 @@ static int get_actual_table_name(const char *old_table_name,
|
||||
|
||||
/* Check memory for quote_for_like() */
|
||||
DBUG_ASSERT(2*sizeof(old_table_name) < sizeof(show_name_buff));
|
||||
my_snprintf(query, sizeof(query), "SHOW TABLES LIKE %s",
|
||||
my_snprintf(query, sizeof(query), "SHOW TABLES LIKE %s",
|
||||
quote_for_like(old_table_name, show_name_buff));
|
||||
|
||||
if (mysql_query_with_error_report(sock, 0, query))
|
||||
@ -2886,7 +2889,7 @@ static int get_actual_table_name(const char *old_table_name,
|
||||
}
|
||||
|
||||
retval = 1;
|
||||
|
||||
|
||||
if ((table_res= mysql_store_result(sock)))
|
||||
{
|
||||
my_ulonglong num_rows= mysql_num_rows(table_res);
|
||||
@ -3008,7 +3011,7 @@ static int dump_selected_tables(char *db, char **table_names, int tables)
|
||||
check_io(md_result_file);
|
||||
}
|
||||
if (lock_tables)
|
||||
mysql_query_with_error_report(sock, 0, "UNLOCK TABLES");
|
||||
VOID(mysql_query_with_error_report(sock, 0, "UNLOCK TABLES"));
|
||||
DBUG_RETURN(0);
|
||||
} /* dump_selected_tables */
|
||||
|
||||
|
@ -676,8 +676,8 @@ history_load(History *h, const char *fname)
|
||||
(void) strunvis(ptr, line);
|
||||
line[sz] = c;
|
||||
if (HENTER(h, &ev, ptr) == -1) {
|
||||
h_free((ptr_t)ptr);
|
||||
return -1;
|
||||
i = -1;
|
||||
goto oomem;
|
||||
}
|
||||
}
|
||||
oomem:
|
||||
|
11
configure.in
11
configure.in
@ -13,7 +13,8 @@ AM_CONFIG_HEADER(config.h)
|
||||
PROTOCOL_VERSION=10
|
||||
DOT_FRM_VERSION=6
|
||||
# See the libtool docs for information on how to do shared lib versions.
|
||||
SHARED_LIB_VERSION=15:0:0
|
||||
SHARED_LIB_MAJOR_VERSION=15
|
||||
SHARED_LIB_VERSION=$SHARED_LIB_MAJOR_VERSION:0:0
|
||||
|
||||
# Set all version vars based on $VERSION. How do we do this more elegant ?
|
||||
# Remember that regexps needs to quote [ and ] since this is run through m4
|
||||
@ -60,6 +61,7 @@ AC_DEFINE_UNQUOTED([PROTOCOL_VERSION], [$PROTOCOL_VERSION],
|
||||
AC_SUBST(DOT_FRM_VERSION)
|
||||
AC_DEFINE_UNQUOTED([DOT_FRM_VERSION], [$DOT_FRM_VERSION],
|
||||
[Version of .frm files])
|
||||
AC_SUBST(SHARED_LIB_MAJOR_VERSION)
|
||||
AC_SUBST(SHARED_LIB_VERSION)
|
||||
AC_SUBST(AVAILABLE_LANGUAGES)
|
||||
|
||||
@ -339,6 +341,13 @@ fi
|
||||
|
||||
MYSQL_PROG_AR
|
||||
|
||||
# libmysqlclient versioning when linked with GNU ld.
|
||||
if $LD --version 2>/dev/null|grep -q GNU; then
|
||||
LD_VERSION_SCRIPT="-Wl,--version-script=\$(top_srcdir)/libmysql/libmysql.ver"
|
||||
AC_CONFIG_FILES(libmysql/libmysql.ver)
|
||||
fi
|
||||
AC_SUBST(LD_VERSION_SCRIPT)
|
||||
|
||||
# Avoid bug in fcntl on some versions of linux
|
||||
AC_MSG_CHECKING("if we should use 'skip-locking' as default for $target_os")
|
||||
# Any variation of Linux
|
||||
|
@ -426,7 +426,8 @@ enum ha_base_keytype {
|
||||
enum en_fieldtype {
|
||||
FIELD_LAST=-1,FIELD_NORMAL,FIELD_SKIP_ENDSPACE,FIELD_SKIP_PRESPACE,
|
||||
FIELD_SKIP_ZERO,FIELD_BLOB,FIELD_CONSTANT,FIELD_INTERVALL,FIELD_ZERO,
|
||||
FIELD_VARCHAR,FIELD_CHECK
|
||||
FIELD_VARCHAR,FIELD_CHECK,
|
||||
FIELD_enum_val_count
|
||||
};
|
||||
|
||||
enum data_file_type {
|
||||
|
@ -77,7 +77,7 @@ mysysobjects2 = my_lib.lo
|
||||
mysysobjects = $(mysysobjects1) $(mysysobjects2)
|
||||
target_libadd = $(mysysobjects) $(mystringsobjects) $(dbugobjects) \
|
||||
$(sql_cmn_objects) $(vio_objects) $(sqlobjects)
|
||||
target_ldflags = -version-info @SHARED_LIB_VERSION@
|
||||
target_ldflags = -version-info @SHARED_LIB_VERSION@ @LD_VERSION_SCRIPT@
|
||||
vio_objects= vio.lo viosocket.lo viossl.lo viosslfactories.lo
|
||||
CLEANFILES = $(target_libadd) $(SHLIBOBJS) \
|
||||
$(target)
|
||||
|
@ -818,7 +818,7 @@ my_bool handle_local_infile(MYSQL *mysql, const char *net_filename)
|
||||
if ((*options->local_infile_init)(&li_ptr, net_filename,
|
||||
options->local_infile_userdata))
|
||||
{
|
||||
my_net_write(net,"",0); /* Server needs one packet */
|
||||
VOID(my_net_write(net,"",0)); /* Server needs one packet */
|
||||
net_flush(net);
|
||||
strmov(net->sqlstate, unknown_sqlstate);
|
||||
net->last_errno= (*options->local_infile_error)(li_ptr,
|
||||
@ -2817,7 +2817,7 @@ my_bool STDCALL mysql_stmt_attr_get(MYSQL_STMT *stmt,
|
||||
{
|
||||
switch (attr_type) {
|
||||
case STMT_ATTR_UPDATE_MAX_LENGTH:
|
||||
*(ulong*) value= stmt->update_max_length;
|
||||
*(my_bool*) value= stmt->update_max_length;
|
||||
break;
|
||||
case STMT_ATTR_CURSOR_TYPE:
|
||||
*(ulong*) value= stmt->flags;
|
||||
|
1
libmysql/libmysql.ver.in
Normal file
1
libmysql/libmysql.ver.in
Normal file
@ -0,0 +1 @@
|
||||
libmysqlclient_@SHARED_LIB_MAJOR_VERSION@ { global: *; };
|
@ -234,5 +234,10 @@ insert into t1 values('test test '),('test'),('test'),('test'),
|
||||
('test'),('test'),('test'),('test'),('test'),('test'),('test'),('test'),
|
||||
('test'),('test'),('test'),('test'),('test'),('test'),('test'),('test');
|
||||
delete from t1 limit 1;
|
||||
truncate table t1;
|
||||
insert into t1 values('ab c d');
|
||||
update t1 set a='ab c d';
|
||||
select * from t1 where match a against('ab c' in boolean mode);
|
||||
a
|
||||
drop table t1;
|
||||
set names latin1;
|
||||
|
@ -1126,6 +1126,19 @@ DROP TABLE t1;
|
||||
DROP VIEW v1;
|
||||
DROP FUNCTION func1;
|
||||
DROP FUNCTION func2;
|
||||
create database mysqltest;
|
||||
create table mysqltest.t1(a int);
|
||||
select table_schema from information_schema.tables where table_schema='mysqltest';
|
||||
table_schema
|
||||
drop database mysqltest;
|
||||
select column_type, group_concat(table_schema, '.', table_name), count(*) as num
|
||||
from information_schema.columns where
|
||||
table_schema='information_schema' and
|
||||
(column_type = 'varchar(7)' or column_type = 'varchar(20)')
|
||||
group by column_type order by num;
|
||||
column_type group_concat(table_schema, '.', table_name) num
|
||||
varchar(7) information_schema.ROUTINES,information_schema.VIEWS 2
|
||||
varchar(20) information_schema.COLUMNS,information_schema.FILES,information_schema.FILES,information_schema.PLUGINS,information_schema.PLUGINS,information_schema.PLUGINS 6
|
||||
select * from information_schema.engines WHERE ENGINE="MyISAM";
|
||||
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
|
||||
MyISAM ENABLED Default engine as of MySQL 3.23 with great performance NO NO NO
|
||||
|
25
mysql-test/r/rpl_multi_update4.result
Normal file
25
mysql-test/r/rpl_multi_update4.result
Normal file
@ -0,0 +1,25 @@
|
||||
stop slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
reset master;
|
||||
reset slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
start slave;
|
||||
drop database if exists d1;
|
||||
drop database if exists d2;
|
||||
drop database if exists d2;
|
||||
create database d1;
|
||||
create table d1.t0 (id int);
|
||||
create database d2;
|
||||
use d2;
|
||||
create table t1 (id int);
|
||||
create table t2 (id int);
|
||||
insert into t1 values (1), (2), (3), (4), (5);
|
||||
insert into t2 select id + 3 from t1;
|
||||
update t1 join t2 using (id) set t1.id = 0;
|
||||
insert into d1.t0 values (0);
|
||||
use d1;
|
||||
select * from t0 where id=0;
|
||||
id
|
||||
0
|
||||
drop database d1;
|
||||
drop database d2;
|
@ -401,5 +401,18 @@ drop function f1;
|
||||
select * from t1;
|
||||
a
|
||||
1
|
||||
DROP PROCEDURE IF EXISTS p1;
|
||||
DROP TABLE IF EXISTS t1;
|
||||
CREATE TABLE t1(col VARCHAR(10));
|
||||
CREATE PROCEDURE p1(arg VARCHAR(10))
|
||||
INSERT INTO t1 VALUES(arg);
|
||||
CALL p1('test');
|
||||
SELECT * FROM t1;
|
||||
col
|
||||
test
|
||||
SELECT * FROM t1;
|
||||
col
|
||||
test
|
||||
DROP PROCEDURE p1;
|
||||
drop table t1;
|
||||
reset master;
|
||||
|
@ -1115,3 +1115,22 @@ ERROR HY000: View 'test.v1' references invalid table(s) or column(s) or function
|
||||
drop function bug11555_1;
|
||||
drop table t1;
|
||||
drop view v1;
|
||||
drop procedure if exists ` bug15658`;
|
||||
create procedure ``() select 1;
|
||||
ERROR 42000: Incorrect routine name ''
|
||||
create procedure ` `() select 1;
|
||||
ERROR 42000: Incorrect routine name ' '
|
||||
create procedure `bug15658 `() select 1;
|
||||
ERROR 42000: Incorrect routine name 'bug15658 '
|
||||
create procedure ``.bug15658() select 1;
|
||||
ERROR 42000: Incorrect database name ''
|
||||
create procedure `x `.bug15658() select 1;
|
||||
ERROR 42000: Incorrect database name 'x '
|
||||
create procedure ` bug15658`() select 1;
|
||||
call ` bug15658`();
|
||||
1
|
||||
1
|
||||
show procedure status;
|
||||
Db Name Type Definer Modified Created Security_type Comment
|
||||
test bug15658 PROCEDURE root@localhost 0000-00-00 00:00:00 0000-00-00 00:00:00 DEFINER
|
||||
drop procedure ` bug15658`;
|
||||
|
@ -263,3 +263,24 @@ CREATE VIEW v1 AS SELECT test.bug12812()|
|
||||
ERROR 42000: execute command denied to user 'user_bug12812'@'localhost' for routine 'test.bug12812'
|
||||
DROP USER user_bug12812@localhost|
|
||||
drop function bug12812|
|
||||
create database db_bug14834;
|
||||
create user user1_bug14834@localhost identified by '';
|
||||
grant all on `db\_bug14834`.* to user1_bug14834@localhost;
|
||||
create user user2_bug14834@localhost identified by '';
|
||||
grant all on `db\_bug14834`.* to user2_bug14834@localhost;
|
||||
create user user3_bug14834@localhost identified by '';
|
||||
grant all on `db__ug14834`.* to user3_bug14834@localhost;
|
||||
create procedure p_bug14834() select user(), current_user();
|
||||
call p_bug14834();
|
||||
user() current_user()
|
||||
user1_bug14834@localhost user1_bug14834@localhost
|
||||
call p_bug14834();
|
||||
user() current_user()
|
||||
user2_bug14834@localhost user1_bug14834@localhost
|
||||
call p_bug14834();
|
||||
user() current_user()
|
||||
user3_bug14834@localhost user1_bug14834@localhost
|
||||
drop user user1_bug14834@localhost;
|
||||
drop user user2_bug14834@localhost;
|
||||
drop user user3_bug14834@localhost;
|
||||
drop database db_bug14834;
|
||||
|
@ -1178,8 +1178,8 @@ drop view v2|
|
||||
delete from t1 |
|
||||
delete from t2 |
|
||||
drop table t4|
|
||||
drop table if exists fac|
|
||||
create table fac (n int unsigned not null primary key, f bigint unsigned)|
|
||||
drop table if exists t3|
|
||||
create table t3 (n int unsigned not null primary key, f bigint unsigned)|
|
||||
drop procedure if exists ifac|
|
||||
create procedure ifac(n int unsigned)
|
||||
begin
|
||||
@ -1189,13 +1189,13 @@ set n = 20; # bigint overflow otherwise
|
||||
end if;
|
||||
while i <= n do
|
||||
begin
|
||||
insert into test.fac values (i, fac(i));
|
||||
insert into test.t3 values (i, fac(i));
|
||||
set i = i + 1;
|
||||
end;
|
||||
end while;
|
||||
end|
|
||||
call ifac(20)|
|
||||
select * from fac|
|
||||
select * from t3|
|
||||
n f
|
||||
1 1
|
||||
2 2
|
||||
@ -1217,7 +1217,7 @@ n f
|
||||
18 6402373705728000
|
||||
19 121645100408832000
|
||||
20 2432902008176640000
|
||||
drop table fac|
|
||||
drop table t3|
|
||||
show function status like '%f%'|
|
||||
Db Name Type Definer Modified Created Security_type Comment
|
||||
test fac FUNCTION root@localhost 0000-00-00 00:00:00 0000-00-00 00:00:00 DEFINER
|
||||
@ -1225,12 +1225,12 @@ drop procedure ifac|
|
||||
drop function fac|
|
||||
show function status like '%f%'|
|
||||
Db Name Type Definer Modified Created Security_type Comment
|
||||
drop table if exists primes|
|
||||
create table primes (
|
||||
drop table if exists t3|
|
||||
create table t3 (
|
||||
i int unsigned not null primary key,
|
||||
p bigint unsigned not null
|
||||
)|
|
||||
insert into primes values
|
||||
insert into t3 values
|
||||
( 0, 3), ( 1, 5), ( 2, 7), ( 3, 11), ( 4, 13),
|
||||
( 5, 17), ( 6, 19), ( 7, 23), ( 8, 29), ( 9, 31),
|
||||
(10, 37), (11, 41), (12, 43), (13, 47), (14, 53),
|
||||
@ -1253,7 +1253,7 @@ set b = b+200, s = 0;
|
||||
else
|
||||
begin
|
||||
declare p bigint unsigned;
|
||||
select t.p into p from test.primes t where t.i = s;
|
||||
select t.p into p from test.t3 t where t.i = s;
|
||||
if b+p > r then
|
||||
set pp = 1;
|
||||
leave again;
|
||||
@ -1278,7 +1278,7 @@ begin
|
||||
declare pp bool default 0;
|
||||
call opp(p, pp);
|
||||
if pp then
|
||||
insert into test.primes values (i, p);
|
||||
insert into test.t3 values (i, p);
|
||||
set i = i+1;
|
||||
end if;
|
||||
set p = p+2;
|
||||
@ -1299,7 +1299,7 @@ set b = b+200, s = 0;
|
||||
else
|
||||
begin
|
||||
declare p bigint unsigned;
|
||||
select t.p into p from test.primes t where t.i = s;
|
||||
select t.p into p from test.t3 t where t.i = s;
|
||||
if b+p > r then
|
||||
set pp = 1;
|
||||
leave again;
|
||||
@ -1318,47 +1318,47 @@ Db Name Type Definer Modified Created Security_type Comment
|
||||
test ip PROCEDURE root@localhost 0000-00-00 00:00:00 0000-00-00 00:00:00 DEFINER
|
||||
test opp PROCEDURE root@localhost 0000-00-00 00:00:00 0000-00-00 00:00:00 DEFINER
|
||||
call ip(200)|
|
||||
select * from primes where i=45 or i=100 or i=199|
|
||||
select * from t3 where i=45 or i=100 or i=199|
|
||||
i p
|
||||
45 211
|
||||
100 557
|
||||
199 1229
|
||||
drop table primes|
|
||||
drop table t3|
|
||||
drop procedure opp|
|
||||
drop procedure ip|
|
||||
show procedure status like '%p%'|
|
||||
Db Name Type Definer Modified Created Security_type Comment
|
||||
drop table if exists fib|
|
||||
create table fib ( f bigint unsigned not null )|
|
||||
drop table if exists t3|
|
||||
create table t3 ( f bigint unsigned not null )|
|
||||
drop procedure if exists fib|
|
||||
create procedure fib(n int unsigned)
|
||||
begin
|
||||
if n > 1 then
|
||||
begin
|
||||
declare x, y bigint unsigned;
|
||||
declare c cursor for select f from fib order by f desc limit 2;
|
||||
declare c cursor for select f from t3 order by f desc limit 2;
|
||||
open c;
|
||||
fetch c into y;
|
||||
fetch c into x;
|
||||
close c;
|
||||
insert into fib values (x+y);
|
||||
insert into t3 values (x+y);
|
||||
call fib(n-1);
|
||||
end;
|
||||
end if;
|
||||
end|
|
||||
set @@max_sp_recursion_depth= 20|
|
||||
insert into fib values (0), (1)|
|
||||
insert into t3 values (0), (1)|
|
||||
call fib(3)|
|
||||
select * from fib order by f asc|
|
||||
select * from t3 order by f asc|
|
||||
f
|
||||
0
|
||||
1
|
||||
1
|
||||
2
|
||||
delete from fib|
|
||||
insert into fib values (0), (1)|
|
||||
call fib(20)|
|
||||
select * from fib order by f asc|
|
||||
delete from t3|
|
||||
insert into t3 values (0), (1)|
|
||||
call fib(10)|
|
||||
select * from t3 order by f asc|
|
||||
f
|
||||
0
|
||||
1
|
||||
@ -1371,17 +1371,7 @@ f
|
||||
21
|
||||
34
|
||||
55
|
||||
89
|
||||
144
|
||||
233
|
||||
377
|
||||
610
|
||||
987
|
||||
1597
|
||||
2584
|
||||
4181
|
||||
6765
|
||||
drop table fib|
|
||||
drop table t3|
|
||||
drop procedure fib|
|
||||
set @@max_sp_recursion_depth= 0|
|
||||
drop procedure if exists bar|
|
||||
|
@ -1,4 +1,4 @@
|
||||
drop table if exists t1, t2;
|
||||
drop table if exists t1, t2, t3;
|
||||
drop procedure if exists bug8850|
|
||||
create table t1 (a int) engine=innodb|
|
||||
create procedure bug8850()
|
||||
|
@ -215,9 +215,9 @@ select * from t1 where t1.a=(select t2.a from t2 where t2.b=(select max(a) from
|
||||
a
|
||||
select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4;
|
||||
b (select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2)
|
||||
8 7.5000
|
||||
8 4.5000
|
||||
9 7.5000
|
||||
8 7.5
|
||||
8 4.5
|
||||
9 7.5
|
||||
explain extended select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY t4 ALL NULL NULL NULL NULL 3
|
||||
|
@ -46,6 +46,7 @@ db CREATE TABLE `db` (
|
||||
`Alter_routine_priv` enum('N','Y') character set utf8 NOT NULL default 'N',
|
||||
`Execute_priv` enum('N','Y') character set utf8 NOT NULL default 'N',
|
||||
`Event_priv` enum('N','Y') character set utf8 NOT NULL default 'N',
|
||||
`Trigger_priv` enum('N','Y') character set utf8 NOT NULL default 'N',
|
||||
PRIMARY KEY (`Host`,`Db`,`User`),
|
||||
KEY `User` (`User`)
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Database privileges'
|
||||
@ -71,6 +72,7 @@ host CREATE TABLE `host` (
|
||||
`Create_routine_priv` enum('N','Y') character set utf8 NOT NULL default 'N',
|
||||
`Alter_routine_priv` enum('N','Y') character set utf8 NOT NULL default 'N',
|
||||
`Execute_priv` enum('N','Y') character set utf8 NOT NULL default 'N',
|
||||
`Trigger_priv` enum('N','Y') character set utf8 NOT NULL default 'N',
|
||||
PRIMARY KEY (`Host`,`Db`)
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Host privileges; Merged with database privileges'
|
||||
show create table user;
|
||||
@ -106,6 +108,7 @@ user CREATE TABLE `user` (
|
||||
`Alter_routine_priv` enum('N','Y') character set utf8 NOT NULL default 'N',
|
||||
`Create_user_priv` enum('N','Y') character set utf8 NOT NULL default 'N',
|
||||
`Event_priv` enum('N','Y') character set utf8 NOT NULL default 'N',
|
||||
`Trigger_priv` enum('N','Y') character set utf8 NOT NULL default 'N',
|
||||
`ssl_type` enum('','ANY','X509','SPECIFIED') character set utf8 NOT NULL default '',
|
||||
`ssl_cipher` blob NOT NULL,
|
||||
`x509_issuer` blob NOT NULL,
|
||||
@ -134,7 +137,7 @@ tables_priv CREATE TABLE `tables_priv` (
|
||||
`Table_name` char(64) collate utf8_bin NOT NULL default '',
|
||||
`Grantor` char(77) collate utf8_bin NOT NULL default '',
|
||||
`Timestamp` timestamp NOT NULL default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
|
||||
`Table_priv` set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view') character set utf8 NOT NULL default '',
|
||||
`Table_priv` set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger') character set utf8 NOT NULL default '',
|
||||
`Column_priv` set('Select','Insert','Update','References') character set utf8 NOT NULL default '',
|
||||
PRIMARY KEY (`Host`,`Db`,`User`,`Table_name`),
|
||||
KEY `Grantor` (`Grantor`)
|
||||
|
@ -240,6 +240,28 @@ t3 CREATE TABLE `t3` (
|
||||
`d` double(22,9) default NULL
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
drop table t1, t2, t3;
|
||||
create table t1 select 105213674794682365.00 + 0.0 x;
|
||||
show warnings;
|
||||
Level Code Message
|
||||
desc t1;
|
||||
Field Type Null Key Default Extra
|
||||
x decimal(21,2) unsigned NO 0.00
|
||||
drop table t1;
|
||||
create table t1 select 0.0 x;
|
||||
desc t1;
|
||||
Field Type Null Key Default Extra
|
||||
x decimal(2,1) unsigned NO 0.0
|
||||
create table t2 select 105213674794682365.00 y;
|
||||
desc t2;
|
||||
Field Type Null Key Default Extra
|
||||
y decimal(20,2) unsigned NO 0.00
|
||||
create table t3 select x+y a from t1,t2;
|
||||
show warnings;
|
||||
Level Code Message
|
||||
desc t3;
|
||||
Field Type Null Key Default Extra
|
||||
a decimal(21,2) unsigned NO 0.00
|
||||
drop table t1,t2,t3;
|
||||
create table t1 (s1 float(0,2));
|
||||
ERROR 42000: For float(M,D), double(M,D) or decimal(M,D), M must be >= D (column 's1').
|
||||
create table t1 (s1 float(1,2));
|
||||
|
@ -415,3 +415,10 @@ t1 CREATE TABLE `t1` (
|
||||
KEY `index1` (`f1`(10))
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
drop table t1;
|
||||
DROP TABLE IF EXISTS t1;
|
||||
CREATE TABLE t1(f1 VARCHAR(100) DEFAULT 'test');
|
||||
INSERT INTO t1 VALUES(SUBSTR(f1, 1, 3));
|
||||
DROP TABLE IF EXISTS t1;
|
||||
CREATE TABLE t1(f1 CHAR(100) DEFAULT 'test');
|
||||
INSERT INTO t1 VALUES(SUBSTR(f1, 1, 3));
|
||||
DROP TABLE IF EXISTS t1;
|
||||
|
@ -29,8 +29,6 @@ rpl_sp : Bug #16456
|
||||
ndb_autodiscover : Needs to be fixed w.r.t binlog
|
||||
ndb_autodiscover2 : Needs to be fixed w.r.t binlog
|
||||
ndb_blob : BLOB replication causes core in master1 (Pekka will fix)
|
||||
system_mysql_db : Needs fixing
|
||||
system_mysql_db_fix : Needs fixing
|
||||
#ndb_alter_table_row : sometimes wrong error 1015!=1046
|
||||
ndb_gis : garbled msgs from corrupt THD*
|
||||
ndb_binlog_ddl_multi : Bug #17038
|
||||
|
@ -209,6 +209,14 @@ insert into t1 values('test test '),('test'),('test'),('test'),
|
||||
('test'),('test'),('test'),('test'),('test'),('test'),('test'),('test'),
|
||||
('test'),('test'),('test'),('test'),('test'),('test'),('test'),('test');
|
||||
delete from t1 limit 1;
|
||||
|
||||
#
|
||||
# BUG#16489: utf8 + fulltext leads to corrupt index file.
|
||||
#
|
||||
truncate table t1;
|
||||
insert into t1 values('ab c d');
|
||||
update t1 set a='ab c d';
|
||||
select * from t1 where match a against('ab c' in boolean mode);
|
||||
drop table t1;
|
||||
set names latin1;
|
||||
|
||||
|
@ -121,7 +121,9 @@ drop table t1;
|
||||
#
|
||||
# InnoDB is required to reproduce the fault, but it is okay if we default to
|
||||
# MyISAM when testing.
|
||||
--disable_warnings
|
||||
create table t1 (a varchar(90), ts datetime not null, index (a)) engine=innodb default charset=utf8;
|
||||
--enable_warnings
|
||||
insert into t1 values ('http://www.foo.com/', now());
|
||||
select a from t1 where a='http://www.foo.com/' order by abs(timediff(ts, 0));
|
||||
drop table t1;
|
||||
|
@ -794,6 +794,29 @@ DROP TABLE t1;
|
||||
DROP VIEW v1;
|
||||
DROP FUNCTION func1;
|
||||
DROP FUNCTION func2;
|
||||
|
||||
#
|
||||
# Bug #15851 Unlistable directories yield no info from information_schema
|
||||
#
|
||||
create database mysqltest;
|
||||
create table mysqltest.t1(a int);
|
||||
--exec chmod -r $MYSQL_TEST_DIR/var/master-data/mysqltest
|
||||
select table_schema from information_schema.tables where table_schema='mysqltest';
|
||||
--exec chmod +r $MYSQL_TEST_DIR/var/master-data/mysqltest
|
||||
drop database mysqltest;
|
||||
|
||||
#
|
||||
# Bug#15307 GROUP_CONCAT() with ORDER BY returns empty set on information_schema
|
||||
#
|
||||
select column_type, group_concat(table_schema, '.', table_name), count(*) as num
|
||||
from information_schema.columns where
|
||||
table_schema='information_schema' and
|
||||
(column_type = 'varchar(7)' or column_type = 'varchar(20)')
|
||||
group by column_type order by num;
|
||||
|
||||
#
|
||||
# End of 5.0 tests.
|
||||
#
|
||||
# Show engines
|
||||
#
|
||||
|
||||
|
1
mysql-test/t/rpl_multi_update4-slave.opt
Normal file
1
mysql-test/t/rpl_multi_update4-slave.opt
Normal file
@ -0,0 +1 @@
|
||||
--replicate-wild-do-table=d1.%
|
44
mysql-test/t/rpl_multi_update4.test
Normal file
44
mysql-test/t/rpl_multi_update4.test
Normal file
@ -0,0 +1,44 @@
|
||||
# Let's verify that multi-update is not always skipped by slave if
|
||||
# some replicate-* rules exist.
|
||||
# (BUG#15699)
|
||||
|
||||
source include/master-slave.inc;
|
||||
|
||||
### Clean-up
|
||||
|
||||
connection master;
|
||||
--disable_warnings
|
||||
drop database if exists d1;
|
||||
drop database if exists d2;
|
||||
|
||||
connection slave;
|
||||
drop database if exists d2;
|
||||
--enable_warnings
|
||||
|
||||
### Test
|
||||
|
||||
connection master;
|
||||
create database d1; # accepted by slave
|
||||
create table d1.t0 (id int);
|
||||
create database d2; # ignored by slave
|
||||
use d2;
|
||||
create table t1 (id int);
|
||||
create table t2 (id int);
|
||||
insert into t1 values (1), (2), (3), (4), (5);
|
||||
insert into t2 select id + 3 from t1;
|
||||
# a problematic query which must be filter out by slave
|
||||
update t1 join t2 using (id) set t1.id = 0;
|
||||
insert into d1.t0 values (0); # replication works
|
||||
|
||||
sync_slave_with_master;
|
||||
use d1;
|
||||
#connection slave;
|
||||
select * from t0 where id=0; # must find
|
||||
|
||||
### Clean-up
|
||||
connection master;
|
||||
drop database d1;
|
||||
drop database d2;
|
||||
|
||||
|
||||
# End of test
|
@ -403,6 +403,42 @@ sync_slave_with_master;
|
||||
connection slave;
|
||||
select * from t1;
|
||||
|
||||
#
|
||||
# Bug#16621 "INSERTs in Stored Procedures causes data corruption in the Binary
|
||||
# Log for 5.0.18"
|
||||
#
|
||||
|
||||
# Prepare environment.
|
||||
|
||||
connection master;
|
||||
|
||||
--disable_warnings
|
||||
DROP PROCEDURE IF EXISTS p1;
|
||||
DROP TABLE IF EXISTS t1;
|
||||
--enable_warnings
|
||||
|
||||
# Test case.
|
||||
|
||||
CREATE TABLE t1(col VARCHAR(10));
|
||||
|
||||
CREATE PROCEDURE p1(arg VARCHAR(10))
|
||||
INSERT INTO t1 VALUES(arg);
|
||||
|
||||
CALL p1('test');
|
||||
|
||||
SELECT * FROM t1;
|
||||
|
||||
sync_slave_with_master;
|
||||
connection slave;
|
||||
|
||||
SELECT * FROM t1;
|
||||
|
||||
# Cleanup.
|
||||
|
||||
connection master;
|
||||
|
||||
DROP PROCEDURE p1;
|
||||
|
||||
# cleanup
|
||||
connection master;
|
||||
drop table t1;
|
||||
|
@ -1534,6 +1534,7 @@ drop procedure bug13012_1|
|
||||
drop function bug13012_2|
|
||||
delimiter ;|
|
||||
|
||||
#
|
||||
# BUG#11555 "Stored procedures: current SP tables locking make
|
||||
# impossible view security". We should not expose names of tables
|
||||
# which are implicitly used by view (via stored routines/triggers).
|
||||
@ -1594,7 +1595,33 @@ drop function bug11555_1;
|
||||
drop table t1;
|
||||
drop view v1;
|
||||
|
||||
#
|
||||
# BUG#15658: Server crashes after creating function as empty string
|
||||
#
|
||||
--disable_warnings
|
||||
drop procedure if exists ` bug15658`;
|
||||
--enable_warnings
|
||||
|
||||
--error ER_SP_WRONG_NAME
|
||||
create procedure ``() select 1;
|
||||
--error ER_SP_WRONG_NAME
|
||||
create procedure ` `() select 1;
|
||||
--error ER_SP_WRONG_NAME
|
||||
create procedure `bug15658 `() select 1;
|
||||
--error ER_WRONG_DB_NAME
|
||||
create procedure ``.bug15658() select 1;
|
||||
--error ER_WRONG_DB_NAME
|
||||
create procedure `x `.bug15658() select 1;
|
||||
|
||||
# This should work
|
||||
create procedure ` bug15658`() select 1;
|
||||
call ` bug15658`();
|
||||
--replace_column 5 '0000-00-00 00:00:00' 6 '0000-00-00 00:00:00'
|
||||
show procedure status;
|
||||
drop procedure ` bug15658`;
|
||||
|
||||
|
||||
#
|
||||
# BUG#NNNN: New bug synopsis
|
||||
#
|
||||
#--disable_warnings
|
||||
|
@ -437,4 +437,48 @@ disconnect test_user_12812|
|
||||
DROP USER user_bug12812@localhost|
|
||||
drop function bug12812|
|
||||
delimiter ;|
|
||||
|
||||
|
||||
#
|
||||
# BUG#14834: Server denies to execute Stored Procedure
|
||||
#
|
||||
# The problem here was with '_' in the database name.
|
||||
#
|
||||
create database db_bug14834;
|
||||
|
||||
create user user1_bug14834@localhost identified by '';
|
||||
# The exact name of the database (no wildcard)
|
||||
grant all on `db\_bug14834`.* to user1_bug14834@localhost;
|
||||
|
||||
create user user2_bug14834@localhost identified by '';
|
||||
# The exact name of the database (no wildcard)
|
||||
grant all on `db\_bug14834`.* to user2_bug14834@localhost;
|
||||
|
||||
create user user3_bug14834@localhost identified by '';
|
||||
# Wildcards in the database name
|
||||
grant all on `db__ug14834`.* to user3_bug14834@localhost;
|
||||
|
||||
connect (user1_bug14834,localhost,user1_bug14834,,db_bug14834);
|
||||
# Create the procedure and check that we can call it
|
||||
create procedure p_bug14834() select user(), current_user();
|
||||
call p_bug14834();
|
||||
|
||||
connect (user2_bug14834,localhost,user2_bug14834,,db_bug14834);
|
||||
# This didn't work before
|
||||
call p_bug14834();
|
||||
|
||||
connect (user3_bug14834,localhost,user3_bug14834,,db_bug14834);
|
||||
# Should also work
|
||||
call p_bug14834();
|
||||
|
||||
# Cleanup
|
||||
connection default;
|
||||
disconnect user1_bug14834;
|
||||
disconnect user2_bug14834;
|
||||
disconnect user3_bug14834;
|
||||
drop user user1_bug14834@localhost;
|
||||
drop user user2_bug14834@localhost;
|
||||
drop user user3_bug14834@localhost;
|
||||
drop database db_bug14834;
|
||||
|
||||
# End of 5.0 bugs.
|
||||
|
@ -1419,9 +1419,9 @@ drop table t4|
|
||||
# fac
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists fac|
|
||||
drop table if exists t3|
|
||||
--enable_warnings
|
||||
create table fac (n int unsigned not null primary key, f bigint unsigned)|
|
||||
create table t3 (n int unsigned not null primary key, f bigint unsigned)|
|
||||
|
||||
--disable_warnings
|
||||
drop procedure if exists ifac|
|
||||
@ -1435,15 +1435,15 @@ begin
|
||||
end if;
|
||||
while i <= n do
|
||||
begin
|
||||
insert into test.fac values (i, fac(i));
|
||||
insert into test.t3 values (i, fac(i));
|
||||
set i = i + 1;
|
||||
end;
|
||||
end while;
|
||||
end|
|
||||
|
||||
call ifac(20)|
|
||||
select * from fac|
|
||||
drop table fac|
|
||||
select * from t3|
|
||||
drop table t3|
|
||||
--replace_column 4 'root@localhost' 5 '0000-00-00 00:00:00' 6 '0000-00-00 00:00:00'
|
||||
show function status like '%f%'|
|
||||
drop procedure ifac|
|
||||
@ -1455,15 +1455,15 @@ show function status like '%f%'|
|
||||
# primes
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists primes|
|
||||
drop table if exists t3|
|
||||
--enable_warnings
|
||||
|
||||
create table primes (
|
||||
create table t3 (
|
||||
i int unsigned not null primary key,
|
||||
p bigint unsigned not null
|
||||
)|
|
||||
|
||||
insert into primes values
|
||||
insert into t3 values
|
||||
( 0, 3), ( 1, 5), ( 2, 7), ( 3, 11), ( 4, 13),
|
||||
( 5, 17), ( 6, 19), ( 7, 23), ( 8, 29), ( 9, 31),
|
||||
(10, 37), (11, 41), (12, 43), (13, 47), (14, 53),
|
||||
@ -1492,7 +1492,7 @@ begin
|
||||
begin
|
||||
declare p bigint unsigned;
|
||||
|
||||
select t.p into p from test.primes t where t.i = s;
|
||||
select t.p into p from test.t3 t where t.i = s;
|
||||
if b+p > r then
|
||||
set pp = 1;
|
||||
leave again;
|
||||
@ -1523,7 +1523,7 @@ begin
|
||||
|
||||
call opp(p, pp);
|
||||
if pp then
|
||||
insert into test.primes values (i, p);
|
||||
insert into test.t3 values (i, p);
|
||||
set i = i+1;
|
||||
end if;
|
||||
set p = p+2;
|
||||
@ -1545,8 +1545,8 @@ call ip(200)|
|
||||
# 45 211
|
||||
# 100 557
|
||||
# 199 1229
|
||||
select * from primes where i=45 or i=100 or i=199|
|
||||
drop table primes|
|
||||
select * from t3 where i=45 or i=100 or i=199|
|
||||
drop table t3|
|
||||
drop procedure opp|
|
||||
drop procedure ip|
|
||||
--replace_column 4 'root@localhost' 5 '0000-00-00 00:00:00' 6 '0000-00-00 00:00:00'
|
||||
@ -1556,9 +1556,9 @@ show procedure status like '%p%'|
|
||||
# Fibonacci, for recursion test. (Yet Another Numerical series :)
|
||||
#
|
||||
--disable_warnings
|
||||
drop table if exists fib|
|
||||
drop table if exists t3|
|
||||
--enable_warnings
|
||||
create table fib ( f bigint unsigned not null )|
|
||||
create table t3 ( f bigint unsigned not null )|
|
||||
|
||||
# We deliberately do it the awkward way, fetching the last two
|
||||
# values from the table, in order to exercise various statements
|
||||
@ -1571,13 +1571,13 @@ begin
|
||||
if n > 1 then
|
||||
begin
|
||||
declare x, y bigint unsigned;
|
||||
declare c cursor for select f from fib order by f desc limit 2;
|
||||
declare c cursor for select f from t3 order by f desc limit 2;
|
||||
|
||||
open c;
|
||||
fetch c into y;
|
||||
fetch c into x;
|
||||
close c;
|
||||
insert into fib values (x+y);
|
||||
insert into t3 values (x+y);
|
||||
call fib(n-1);
|
||||
end;
|
||||
end if;
|
||||
@ -1588,22 +1588,23 @@ set @@max_sp_recursion_depth= 20|
|
||||
|
||||
# Minimum test: recursion of 3 levels
|
||||
|
||||
insert into fib values (0), (1)|
|
||||
insert into t3 values (0), (1)|
|
||||
|
||||
call fib(3)|
|
||||
|
||||
select * from fib order by f asc|
|
||||
select * from t3 order by f asc|
|
||||
|
||||
delete from fib|
|
||||
delete from t3|
|
||||
|
||||
# Original test: 20 levels (may run into memory limits!)
|
||||
# The original test, 20 levels, ran into memory limits on some machines
|
||||
# and builds. Try 10 instead...
|
||||
|
||||
insert into fib values (0), (1)|
|
||||
insert into t3 values (0), (1)|
|
||||
|
||||
call fib(20)|
|
||||
call fib(10)|
|
||||
|
||||
select * from fib order by f asc|
|
||||
drop table fib|
|
||||
select * from t3 order by f asc|
|
||||
drop table t3|
|
||||
drop procedure fib|
|
||||
set @@max_sp_recursion_depth= 0|
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
-- source include/have_innodb.inc
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t1, t2;
|
||||
drop table if exists t1, t2, t3;
|
||||
--enable_warnings
|
||||
|
||||
delimiter |;
|
||||
|
@ -159,6 +159,23 @@ show create table t3;
|
||||
drop table t1, t2, t3;
|
||||
|
||||
|
||||
#
|
||||
# Bug #9855 (inconsistent column type for create select
|
||||
#
|
||||
create table t1 select 105213674794682365.00 + 0.0 x;
|
||||
show warnings;
|
||||
desc t1;
|
||||
drop table t1;
|
||||
|
||||
create table t1 select 0.0 x;
|
||||
desc t1;
|
||||
create table t2 select 105213674794682365.00 y;
|
||||
desc t2;
|
||||
create table t3 select x+y a from t1,t2;
|
||||
show warnings;
|
||||
desc t3;
|
||||
drop table t1,t2,t3;
|
||||
|
||||
# End of 4.1 tests
|
||||
|
||||
#
|
||||
|
@ -130,3 +130,19 @@ show create table t1;
|
||||
alter table t1 modify f1 tinytext;
|
||||
show create table t1;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# BUG#15588: String overrun
|
||||
#
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1;
|
||||
--enable_warnings
|
||||
|
||||
CREATE TABLE t1(f1 VARCHAR(100) DEFAULT 'test');
|
||||
INSERT INTO t1 VALUES(SUBSTR(f1, 1, 3));
|
||||
DROP TABLE IF EXISTS t1;
|
||||
|
||||
CREATE TABLE t1(f1 CHAR(100) DEFAULT 'test');
|
||||
INSERT INTO t1 VALUES(SUBSTR(f1, 1, 3));
|
||||
DROP TABLE IF EXISTS t1;
|
||||
|
@ -197,7 +197,7 @@ int modify_defaults_file(const char *file_location, const char *option,
|
||||
goto err;
|
||||
}
|
||||
if (my_fclose(cnf_file, MYF(MY_WME)))
|
||||
goto err;
|
||||
DBUG_RETURN(1);
|
||||
|
||||
my_free(file_buffer, MYF(0));
|
||||
DBUG_RETURN(0);
|
||||
|
@ -79,7 +79,7 @@ my_off_t my_b_safe_tell(IO_CACHE *info)
|
||||
|
||||
void my_b_seek(IO_CACHE *info,my_off_t pos)
|
||||
{
|
||||
my_off_t offset;
|
||||
my_off_t offset;
|
||||
DBUG_ENTER("my_b_seek");
|
||||
DBUG_PRINT("enter",("pos: %lu", (ulong) pos));
|
||||
|
||||
@ -91,10 +91,10 @@ void my_b_seek(IO_CACHE *info,my_off_t pos)
|
||||
b) see if there is a better way to make it work
|
||||
*/
|
||||
if (info->type == SEQ_READ_APPEND)
|
||||
flush_io_cache(info);
|
||||
|
||||
VOID(flush_io_cache(info));
|
||||
|
||||
offset=(pos - info->pos_in_file);
|
||||
|
||||
|
||||
if (info->type == READ_CACHE || info->type == SEQ_READ_APPEND)
|
||||
{
|
||||
/* TODO: explain why this works if pos < info->pos_in_file */
|
||||
@ -119,7 +119,7 @@ void my_b_seek(IO_CACHE *info,my_off_t pos)
|
||||
info->write_pos = info->write_buffer + offset;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
flush_io_cache(info);
|
||||
VOID(flush_io_cache(info));
|
||||
/* Correct buffer end so that we write in increments of IO_SIZE */
|
||||
info->write_end=(info->write_buffer+info->buffer_length-
|
||||
(pos & (IO_SIZE-1)));
|
||||
|
@ -114,7 +114,7 @@ my_bool bitmap_init(MY_BITMAP *map, uint32 *buf, uint n_bits,
|
||||
#endif
|
||||
;
|
||||
if (!(buf= (uint32*) my_malloc(size_in_bytes, MYF(MY_WME))))
|
||||
return 1;
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
#ifdef THREAD
|
||||
else
|
||||
|
@ -189,7 +189,10 @@ int my_error_register(const char **errmsgs, int first, int last)
|
||||
|
||||
/* Error numbers must be unique. No overlapping is allowed. */
|
||||
if (*search_meh_pp && ((*search_meh_pp)->meh_first <= last))
|
||||
{
|
||||
my_free((gptr)meh_p, MYF(0));
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Insert header into the chain. */
|
||||
meh_p->meh_next= *search_meh_pp;
|
||||
|
@ -170,6 +170,8 @@ MY_DIR *my_dir(const char *path, myf MyFlags)
|
||||
bzero(finfo.mystat, sizeof(MY_STAT));
|
||||
VOID(strmov(tmp_file,dp->d_name));
|
||||
VOID(my_stat(tmp_path, finfo.mystat, MyFlags));
|
||||
if (!(finfo.mystat->st_mode & MY_S_IREAD))
|
||||
continue;
|
||||
}
|
||||
else
|
||||
finfo.mystat= NULL;
|
||||
|
@ -674,5 +674,9 @@ ALTER TABLE event ADD sql_mode
|
||||
SET @hadTriggerPriv := 0;
|
||||
SELECT @hadTriggerPriv :=1 FROM user WHERE Trigger_priv LIKE '%';
|
||||
|
||||
ALTER TABLE user add Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
|
||||
ALTER TABLE user ADD Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Event_priv;
|
||||
ALTER TABLE host ADD Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
|
||||
ALTER TABLE db ADD Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
|
||||
ALTER TABLE tables_priv MODIFY Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger') COLLATE utf8_general_ci DEFAULT '' NOT NULL;
|
||||
|
||||
UPDATE user SET Trigger_priv=Super_priv WHERE @hadTriggerPriv = 0;
|
||||
|
@ -107,7 +107,7 @@ Token shift_token(const char **text, uint *word_len)
|
||||
int get_text_id(const char **text, uint *word_len, const char **id)
|
||||
{
|
||||
get_word(text, word_len);
|
||||
if (word_len == 0)
|
||||
if (*word_len == 0)
|
||||
return 1;
|
||||
*id= *text;
|
||||
return 0;
|
||||
|
@ -599,7 +599,7 @@ net_safe_read(MYSQL *mysql)
|
||||
DBUG_PRINT("error",("Wrong connection or packet. fd: %s len: %d",
|
||||
vio_description(net->vio),len));
|
||||
#ifdef MYSQL_SERVER
|
||||
if (vio_was_interrupted(net->vio))
|
||||
if (net->vio && vio_was_interrupted(net->vio))
|
||||
return (packet_error);
|
||||
#endif /*MYSQL_SERVER*/
|
||||
end_server(mysql);
|
||||
|
30
sql/field.cc
30
sql/field.cc
@ -1565,7 +1565,6 @@ Field *Field::new_key_field(MEM_ROOT *root, struct st_table *new_table,
|
||||
bool Field::quote_data(String *unquoted_string)
|
||||
{
|
||||
char escaped_string[IO_SIZE];
|
||||
char *unquoted_string_buffer= (char *)(unquoted_string->ptr());
|
||||
DBUG_ENTER("Field::quote_data");
|
||||
|
||||
if (!needs_quotes())
|
||||
@ -4545,8 +4544,6 @@ int Field_timestamp::store(const char *from,uint len,CHARSET_INFO *cs)
|
||||
error= 1;
|
||||
}
|
||||
}
|
||||
if (error > 1)
|
||||
error= 2;
|
||||
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
if (table->s->db_low_byte_first)
|
||||
@ -5880,7 +5877,7 @@ int Field_string::store(const char *from,uint length,CHARSET_INFO *cs)
|
||||
field_length/
|
||||
field_charset->mbmaxlen,
|
||||
&well_formed_error);
|
||||
memcpy(ptr,from,copy_length);
|
||||
memmove(ptr, from, copy_length);
|
||||
|
||||
/* Append spaces if the string was shorter than the field. */
|
||||
if (copy_length < field_length)
|
||||
@ -6296,7 +6293,7 @@ int Field_varstring::store(const char *from,uint length,CHARSET_INFO *cs)
|
||||
field_length/
|
||||
field_charset->mbmaxlen,
|
||||
&well_formed_error);
|
||||
memcpy(ptr + length_bytes, from, copy_length);
|
||||
memmove(ptr + length_bytes, from, copy_length);
|
||||
if (length_bytes == 1)
|
||||
*ptr= (uchar) copy_length;
|
||||
else
|
||||
@ -7113,7 +7110,7 @@ void Field_blob::get_key_image(char *buff, uint length, imagetype type)
|
||||
}
|
||||
get_ptr(&blob);
|
||||
gobj= Geometry::construct(&buffer, blob, blob_length);
|
||||
if (gobj->get_mbr(&mbr, &dummy))
|
||||
if (!gobj || gobj->get_mbr(&mbr, &dummy))
|
||||
bzero(buff, SIZEOF_STORED_DOUBLE*4);
|
||||
else
|
||||
{
|
||||
@ -7442,7 +7439,7 @@ void Field_geom::get_key_image(char *buff, uint length, imagetype type)
|
||||
}
|
||||
get_ptr(&blob);
|
||||
gobj= Geometry::construct(&buffer, blob, blob_length);
|
||||
if (gobj->get_mbr(&mbr, &dummy))
|
||||
if (!gobj || gobj->get_mbr(&mbr, &dummy))
|
||||
bzero(buff, SIZEOF_STORED_DOUBLE*4);
|
||||
else
|
||||
{
|
||||
@ -8239,16 +8236,13 @@ const char *Field_bit::unpack(char *to, const char *from)
|
||||
*/
|
||||
|
||||
Field_bit_as_char::Field_bit_as_char(char *ptr_arg, uint32 len_arg,
|
||||
uchar *null_ptr_arg, uchar null_bit_arg,
|
||||
uchar *bit_ptr_arg, uchar bit_ofs_arg,
|
||||
enum utype unireg_check_arg,
|
||||
uchar *null_ptr_arg, uchar null_bit_arg,
|
||||
enum utype unireg_check_arg,
|
||||
const char *field_name_arg)
|
||||
:Field_bit(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, bit_ptr_arg,
|
||||
bit_ofs_arg, unireg_check_arg, field_name_arg),
|
||||
:Field_bit(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, 0, 0,
|
||||
unireg_check_arg, field_name_arg),
|
||||
create_length(len_arg)
|
||||
{
|
||||
bit_ptr= 0;
|
||||
bit_ofs= 0;
|
||||
bit_len= 0;
|
||||
field_length= ((len_arg + 7) & ~7) / 8;
|
||||
}
|
||||
@ -8950,10 +8944,10 @@ Field *make_field(TABLE_SHARE *share, char *ptr, uint32 field_length,
|
||||
field_charset);
|
||||
case FIELD_TYPE_BIT:
|
||||
return f_bit_as_char(pack_flag) ?
|
||||
new Field_bit_as_char(ptr, field_length, null_pos, null_bit,
|
||||
bit_ptr, bit_offset, unireg_check, field_name) :
|
||||
new Field_bit(ptr, field_length, null_pos, null_bit, bit_ptr,
|
||||
bit_offset, unireg_check, field_name);
|
||||
new Field_bit_as_char(ptr, field_length, null_pos, null_bit,
|
||||
unireg_check, field_name) :
|
||||
new Field_bit(ptr, field_length, null_pos, null_bit, bit_ptr,
|
||||
bit_offset, unireg_check, field_name);
|
||||
|
||||
default: // Impossible (Wrong version)
|
||||
break;
|
||||
|
@ -1373,12 +1373,12 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
class Field_bit_as_char: public Field_bit {
|
||||
public:
|
||||
uchar create_length;
|
||||
Field_bit_as_char(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
|
||||
uchar null_bit_arg, uchar *bit_ptr_arg, uchar bit_ofs_arg,
|
||||
uchar null_bit_arg,
|
||||
enum utype unireg_check_arg, const char *field_name_arg);
|
||||
enum ha_base_keytype key_type() const { return HA_KEYTYPE_BINARY; }
|
||||
uint32 max_length() { return (uint32) create_length; }
|
||||
|
@ -139,6 +139,10 @@ static HASH archive_open_tables;
|
||||
|
||||
/* Static declarations for handerton */
|
||||
static handler *archive_create_handler(TABLE_SHARE *table);
|
||||
/*
|
||||
Number of rows that will force a bulk insert.
|
||||
*/
|
||||
#define ARCHIVE_MIN_ROWS_TO_USE_BULK_INSERT 2
|
||||
|
||||
|
||||
/* dummy handlerton - only to have something to return from archive_db_init */
|
||||
@ -1302,7 +1306,8 @@ void ha_archive::info(uint flag)
|
||||
void ha_archive::start_bulk_insert(ha_rows rows)
|
||||
{
|
||||
DBUG_ENTER("ha_archive::start_bulk_insert");
|
||||
bulk_insert= TRUE;
|
||||
if (!rows || rows >= ARCHIVE_MIN_ROWS_TO_USE_BULK_INSERT)
|
||||
bulk_insert= TRUE;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
@ -286,7 +286,8 @@ int ha_myisam::dump(THD* thd, int fd)
|
||||
|
||||
if (fd < 0)
|
||||
{
|
||||
my_net_write(net, "", 0);
|
||||
if (my_net_write(net, "", 0))
|
||||
error = errno ? errno : EPIPE;
|
||||
net_flush(net);
|
||||
}
|
||||
|
||||
@ -420,12 +421,14 @@ int ha_myisam::check(THD* thd, HA_CHECK_OPT* check_opt)
|
||||
{
|
||||
uint old_testflag=param.testflag;
|
||||
param.testflag|=T_MEDIUM;
|
||||
init_io_cache(¶m.read_cache, file->dfile,
|
||||
my_default_record_cache_size, READ_CACHE,
|
||||
share->pack.header_length, 1, MYF(MY_WME));
|
||||
error |= chk_data_link(¶m, file, param.testflag & T_EXTEND);
|
||||
end_io_cache(&(param.read_cache));
|
||||
param.testflag=old_testflag;
|
||||
if (!(error= init_io_cache(¶m.read_cache, file->dfile,
|
||||
my_default_record_cache_size, READ_CACHE,
|
||||
share->pack.header_length, 1, MYF(MY_WME))))
|
||||
{
|
||||
error= chk_data_link(¶m, file, param.testflag & T_EXTEND);
|
||||
end_io_cache(&(param.read_cache));
|
||||
}
|
||||
param.testflag= old_testflag;
|
||||
}
|
||||
}
|
||||
if (!error)
|
||||
|
@ -985,7 +985,7 @@ Item_case_expr::this_item_addr(THD *thd, Item **)
|
||||
|
||||
void Item_case_expr::print(String *str)
|
||||
{
|
||||
str->append(STRING_WITH_LEN("case_expr@"));
|
||||
VOID(str->append(STRING_WITH_LEN("case_expr@")));
|
||||
str->qs_append(m_case_expr_id);
|
||||
}
|
||||
|
||||
@ -3868,7 +3868,7 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table, bool fixed_length)
|
||||
name);
|
||||
break;
|
||||
case MYSQL_TYPE_BIT:
|
||||
field= new Field_bit_as_char(NULL, max_length, null_ptr, 0, NULL, 0,
|
||||
field= new Field_bit_as_char(NULL, max_length, null_ptr, 0,
|
||||
Field::NONE, name);
|
||||
break;
|
||||
default:
|
||||
|
@ -431,12 +431,19 @@ my_decimal *Item_real_func::val_decimal(my_decimal *decimal_value)
|
||||
|
||||
void Item_func::fix_num_length_and_dec()
|
||||
{
|
||||
decimals= 0;
|
||||
uint fl_length= 0;
|
||||
decimals=0;
|
||||
for (uint i=0 ; i < arg_count ; i++)
|
||||
{
|
||||
set_if_bigger(decimals, args[i]->decimals);
|
||||
set_if_bigger(decimals,args[i]->decimals);
|
||||
set_if_bigger(fl_length, args[i]->max_length);
|
||||
}
|
||||
max_length=float_length(decimals);
|
||||
if (fl_length > max_length)
|
||||
{
|
||||
decimals= NOT_FIXED_DEC;
|
||||
max_length= float_length(NOT_FIXED_DEC);
|
||||
}
|
||||
max_length= float_length(decimals);
|
||||
}
|
||||
|
||||
|
||||
|
@ -3342,6 +3342,10 @@ void Intvar_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
|
||||
case INSERT_ID_EVENT:
|
||||
msg="INSERT_ID";
|
||||
break;
|
||||
case INVALID_INT_EVENT:
|
||||
default: // cannot happen
|
||||
msg="INVALID_INT";
|
||||
break;
|
||||
}
|
||||
fprintf(file, "%s=%s;\n", msg, llstr(val,llbuff));
|
||||
fflush(file);
|
||||
|
@ -5130,7 +5130,7 @@ Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite,
|
||||
"Set to 0 (write and flush once per second), 1 (write and flush at each commit) or 2 (write at commit, flush once per second).",
|
||||
(gptr*) &innobase_flush_log_at_trx_commit,
|
||||
(gptr*) &innobase_flush_log_at_trx_commit,
|
||||
0, GET_UINT, OPT_ARG, 1, 0, 2, 0, 0, 0},
|
||||
0, GET_ULONG, OPT_ARG, 1, 0, 2, 0, 0, 0},
|
||||
{"innodb_flush_method", OPT_INNODB_FLUSH_METHOD,
|
||||
"With which method to flush data.", (gptr*) &innobase_unix_file_flush_method,
|
||||
(gptr*) &innobase_unix_file_flush_method, 0, GET_STR, REQUIRED_ARG, 0, 0, 0,
|
||||
|
@ -753,7 +753,6 @@ SQL_SELECT *make_select(TABLE *head, table_map const_tables,
|
||||
table_map read_tables, COND *conds,
|
||||
bool allow_null_cond,
|
||||
int *error)
|
||||
|
||||
{
|
||||
SQL_SELECT *select;
|
||||
DBUG_ENTER("make_select");
|
||||
@ -7059,10 +7058,7 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
|
||||
if (!quick)
|
||||
return 0; /* no ranges found */
|
||||
if (quick->init())
|
||||
{
|
||||
delete quick;
|
||||
goto err;
|
||||
}
|
||||
quick->records= records;
|
||||
|
||||
if (cp_buffer_from_ref(thd,ref) && thd->is_fatal_error ||
|
||||
@ -8404,7 +8400,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
|
||||
ha_rows cur_records;
|
||||
SEL_ARG *cur_index_tree= NULL;
|
||||
ha_rows cur_quick_prefix_records= 0;
|
||||
uint cur_param_idx;
|
||||
uint cur_param_idx=MAX_KEY;
|
||||
key_map cur_used_key_parts;
|
||||
uint pk= param->table->s->primary_key;
|
||||
|
||||
@ -8620,6 +8616,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
|
||||
*/
|
||||
if (cur_read_cost < best_read_cost - (DBL_EPSILON * cur_read_cost))
|
||||
{
|
||||
DBUG_ASSERT(tree != 0 || cur_param_idx == MAX_KEY);
|
||||
index_info= cur_index_info;
|
||||
index= cur_index;
|
||||
best_read_cost= cur_read_cost;
|
||||
|
@ -706,7 +706,7 @@ class SQL_SELECT :public Sql_alloc {
|
||||
class FT_SELECT: public QUICK_RANGE_SELECT {
|
||||
public:
|
||||
FT_SELECT(THD *thd, TABLE *table, uint key) :
|
||||
QUICK_RANGE_SELECT (thd, table, key, 1) { init(); }
|
||||
QUICK_RANGE_SELECT (thd, table, key, 1) { VOID(init()); }
|
||||
~FT_SELECT() { file->ft_end(); }
|
||||
int init() { return error=file->ft_init(); }
|
||||
int reset() { return 0; }
|
||||
|
@ -601,9 +601,23 @@ bool Protocol::send_fields(List<Item> *list, uint flags)
|
||||
else
|
||||
{
|
||||
/* With conversion */
|
||||
uint max_char_len;
|
||||
int2store(pos, thd_charset->number);
|
||||
uint char_len= field.length / item->collation.collation->mbmaxlen;
|
||||
int4store(pos+2, char_len * thd_charset->mbmaxlen);
|
||||
/*
|
||||
For TEXT/BLOB columns, field_length describes the maximum data
|
||||
length in bytes. There is no limit to the number of characters
|
||||
that a TEXT column can store, as long as the data fits into
|
||||
the designated space.
|
||||
For the rest of textual columns, field_length is evaluated as
|
||||
char_count * mbmaxlen, where character count is taken from the
|
||||
definition of the column. In other words, the maximum number
|
||||
of characters here is limited by the column definition.
|
||||
*/
|
||||
max_char_len= (field.type >= (int) MYSQL_TYPE_TINY_BLOB &&
|
||||
field.type <= (int) MYSQL_TYPE_BLOB) ?
|
||||
field.length / item->collation.collation->mbminlen :
|
||||
field.length / item->collation.collation->mbmaxlen;
|
||||
int4store(pos+2, max_char_len * thd_charset->mbmaxlen);
|
||||
}
|
||||
pos[6]= field.type;
|
||||
int2store(pos+7,field.flags);
|
||||
|
@ -930,7 +930,8 @@ bool load_master_data(THD* thd)
|
||||
host was specified; there could have been a problem when replication
|
||||
started, which led to relay log's IO_CACHE to not be inited.
|
||||
*/
|
||||
flush_master_info(active_mi, 0);
|
||||
if (flush_master_info(active_mi, 0))
|
||||
sql_print_error("Failed to flush master info file");
|
||||
}
|
||||
mysql_free_result(master_status_res);
|
||||
}
|
||||
|
@ -2496,7 +2496,6 @@ bool sys_var_slave_skip_counter::update(THD *thd, set_var *var)
|
||||
|
||||
bool sys_var_sync_binlog_period::update(THD *thd, set_var *var)
|
||||
{
|
||||
pthread_mutex_t *lock_log= mysql_bin_log.get_log_lock();
|
||||
sync_binlog_period= (ulong) var->save_result.ulonglong_value;
|
||||
return 0;
|
||||
}
|
||||
|
@ -5798,3 +5798,5 @@ ER_CANT_WRITE_LOCK_LOG_TABLE
|
||||
eng "You can't write-lock a log table. Only read access is possible."
|
||||
ER_CANT_READ_LOCK_LOG_TABLE
|
||||
eng "You can't use usual read lock with log tables. Try READ LOCAL instead."
|
||||
ER_SP_WRONG_NAME 42000
|
||||
eng "Incorrect routine name '%-.64s'"
|
||||
|
38
sql/slave.cc
38
sql/slave.cc
@ -1742,7 +1742,8 @@ static void write_ignored_events_info_to_relay_log(THD *thd, MASTER_INFO *mi)
|
||||
" to the relay log, "
|
||||
"SHOW SLAVE STATUS may be inaccurate");
|
||||
rli->relay_log.harvest_bytes_written(&rli->log_space_total);
|
||||
flush_master_info(mi, 1);
|
||||
if (flush_master_info(mi, 1))
|
||||
sql_print_error("Failed to flush master info file");
|
||||
delete ev;
|
||||
}
|
||||
else
|
||||
@ -2233,7 +2234,7 @@ bool show_master_info(THD* thd, MASTER_INFO* mi)
|
||||
|
||||
pthread_mutex_unlock(&mi->rli.data_lock);
|
||||
pthread_mutex_unlock(&mi->data_lock);
|
||||
|
||||
|
||||
if (my_net_write(&thd->net, (char*)thd->packet.ptr(), packet->length()))
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
@ -2241,8 +2242,13 @@ bool show_master_info(THD* thd, MASTER_INFO* mi)
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
|
||||
bool flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache)
|
||||
/*
|
||||
RETURN
|
||||
2 - flush relay log failed
|
||||
1 - flush master info failed
|
||||
0 - all ok
|
||||
*/
|
||||
int flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache)
|
||||
{
|
||||
IO_CACHE* file = &mi->file;
|
||||
char lbuf[22];
|
||||
@ -2261,8 +2267,9 @@ bool flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache)
|
||||
When we come to this place in code, relay log may or not be initialized;
|
||||
the caller is responsible for setting 'flush_relay_log_cache' accordingly.
|
||||
*/
|
||||
if (flush_relay_log_cache)
|
||||
flush_io_cache(mi->rli.relay_log.get_log_file());
|
||||
if (flush_relay_log_cache &&
|
||||
flush_io_cache(mi->rli.relay_log.get_log_file()))
|
||||
DBUG_RETURN(2);
|
||||
|
||||
/*
|
||||
We flushed the relay log BEFORE the master.info file, because if we crash
|
||||
@ -2274,13 +2281,13 @@ bool flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache)
|
||||
*/
|
||||
|
||||
/*
|
||||
In certain cases this code may create master.info files that seems
|
||||
corrupted, because of extra lines filled with garbage in the end
|
||||
file (this happens if new contents take less space than previous
|
||||
contents of file). But because of number of lines in the first line
|
||||
In certain cases this code may create master.info files that seems
|
||||
corrupted, because of extra lines filled with garbage in the end
|
||||
file (this happens if new contents take less space than previous
|
||||
contents of file). But because of number of lines in the first line
|
||||
of file we don't care about this garbage.
|
||||
*/
|
||||
|
||||
|
||||
my_b_seek(file, 0L);
|
||||
my_b_printf(file, "%u\n%s\n%s\n%s\n%s\n%s\n%d\n%d\n%d\n%s\n%s\n%s\n%s\n%s\n",
|
||||
LINES_IN_MASTER_INFO_WITH_SSL,
|
||||
@ -2289,8 +2296,7 @@ bool flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache)
|
||||
mi->password, mi->port, mi->connect_retry,
|
||||
(int)(mi->ssl), mi->ssl_ca, mi->ssl_capath, mi->ssl_cert,
|
||||
mi->ssl_cipher, mi->ssl_key);
|
||||
flush_io_cache(file);
|
||||
DBUG_RETURN(0);
|
||||
DBUG_RETURN(-flush_io_cache(file));
|
||||
}
|
||||
|
||||
|
||||
@ -3355,7 +3361,11 @@ reconnect done to recover from failed read");
|
||||
sql_print_error("Slave I/O thread could not queue event from master");
|
||||
goto err;
|
||||
}
|
||||
flush_master_info(mi, 1); /* sure that we can flush the relay log */
|
||||
if (flush_master_info(mi, 1))
|
||||
{
|
||||
sql_print_error("Failed to flush master info file");
|
||||
goto err;
|
||||
}
|
||||
/*
|
||||
See if the relay logs take too much space.
|
||||
We don't lock mi->rli.log_space_lock here; this dirty read saves time
|
||||
|
@ -231,7 +231,7 @@ int queue_event(MASTER_INFO* mi,const char* buf,ulong event_len);
|
||||
|
||||
int init_slave();
|
||||
void init_slave_skip_errors(const char* arg);
|
||||
bool flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache);
|
||||
int flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache);
|
||||
bool flush_relay_log_info(RELAY_LOG_INFO* rli);
|
||||
int register_slave_on_master(MYSQL* mysql);
|
||||
int terminate_slave_threads(MASTER_INFO* mi, int thread_mask,
|
||||
|
@ -105,21 +105,27 @@ sp_get_item_value(Item *item, String *str)
|
||||
|
||||
case STRING_RESULT:
|
||||
{
|
||||
char buf_holder[STRING_BUFFER_USUAL_SIZE];
|
||||
String buf(buf_holder, sizeof(buf_holder), &my_charset_latin1);
|
||||
String *result= item->val_str(str);
|
||||
|
||||
if (!result)
|
||||
return NULL;
|
||||
|
||||
buf.append('_');
|
||||
buf.append(result->charset()->csname);
|
||||
buf.append('\'');
|
||||
buf.append(*result);
|
||||
buf.append('\'');
|
||||
str->copy(buf);
|
||||
{
|
||||
char buf_holder[STRING_BUFFER_USUAL_SIZE];
|
||||
String buf(buf_holder, sizeof(buf_holder), result->charset());
|
||||
|
||||
return str;
|
||||
/* We must reset length of the buffer, because of String specificity. */
|
||||
buf.length(0);
|
||||
|
||||
buf.append('_');
|
||||
buf.append(result->charset()->csname);
|
||||
buf.append('\'');
|
||||
buf.append(*result);
|
||||
buf.append('\'');
|
||||
str->copy(buf);
|
||||
|
||||
return str;
|
||||
}
|
||||
}
|
||||
|
||||
case ROW_RESULT:
|
||||
@ -389,6 +395,23 @@ sp_name_current_db_new(THD *thd, LEX_STRING name)
|
||||
return qname;
|
||||
}
|
||||
|
||||
/*
|
||||
Check that the name 'ident' is ok. It's assumed to be an 'ident'
|
||||
from the parser, so we only have to check length and trailing spaces.
|
||||
The former is a standard requirement (and 'show status' assumes a
|
||||
non-empty name), the latter is a mysql:ism as trailing spaces are
|
||||
removed by get_field().
|
||||
|
||||
RETURN
|
||||
TRUE - bad name
|
||||
FALSE - name is ok
|
||||
*/
|
||||
|
||||
bool
|
||||
check_routine_name(LEX_STRING ident)
|
||||
{
|
||||
return (!ident.str || !ident.str[0] || ident.str[ident.length-1] == ' ');
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
@ -3083,9 +3106,16 @@ sp_instr_set_case_expr::exec_core(THD *thd, uint *nextp)
|
||||
void
|
||||
sp_instr_set_case_expr::print(String *str)
|
||||
{
|
||||
str->append(STRING_WITH_LEN("set_case_expr "));
|
||||
const char CASE_EXPR_TAG[]= "set_case_expr ";
|
||||
const int CASE_EXPR_TAG_LEN= sizeof(CASE_EXPR_TAG) - 1;
|
||||
const int INT_STRING_MAX_LEN= 10;
|
||||
|
||||
/* We must call reserve(), because qs_append() doesn't care about memory. */
|
||||
str->reserve(CASE_EXPR_TAG_LEN + INT_STRING_MAX_LEN + 2);
|
||||
|
||||
str->qs_append(CASE_EXPR_TAG, CASE_EXPR_TAG_LEN);
|
||||
str->qs_append(m_case_expr_id);
|
||||
str->append(' ');
|
||||
str->qs_append(' ');
|
||||
m_case_expr->print(str);
|
||||
}
|
||||
|
||||
|
@ -103,6 +103,8 @@ public:
|
||||
sp_name *
|
||||
sp_name_current_db_new(THD *thd, LEX_STRING name);
|
||||
|
||||
bool
|
||||
check_routine_name(LEX_STRING name);
|
||||
|
||||
class sp_head :private Query_arena
|
||||
{
|
||||
|
@ -142,7 +142,7 @@ struct MBR
|
||||
bool inner_point(double x, double y) const
|
||||
{
|
||||
/* The following should be safe, even if we compare doubles */
|
||||
return (xmin<x) && (xmax>x) && (ymin<y) && (ymax>x);
|
||||
return (xmin<x) && (xmax>x) && (ymin<y) && (ymax>y);
|
||||
}
|
||||
|
||||
int overlaps(const MBR *mbr)
|
||||
|
@ -951,7 +951,7 @@ bool acl_getroot_no_password(Security_context *sctx, char *user, char *host,
|
||||
|
||||
DBUG_PRINT("enter", ("Host: '%s', Ip: '%s', User: '%s', db: '%s'",
|
||||
(host ? host : "(NULL)"), (ip ? ip : "(NULL)"),
|
||||
(user ? user : "(NULL)"), (db ? db : "(NULL)")));
|
||||
user, (db ? db : "(NULL)")));
|
||||
sctx->user= user;
|
||||
sctx->host= host;
|
||||
sctx->ip= ip;
|
||||
@ -980,7 +980,7 @@ bool acl_getroot_no_password(Security_context *sctx, char *user, char *host,
|
||||
for (i=0 ; i < acl_users.elements ; i++)
|
||||
{
|
||||
acl_user= dynamic_element(&acl_users,i,ACL_USER*);
|
||||
if ((!acl_user->user && (!user || !user[0])) ||
|
||||
if ((!acl_user->user && !user[0]) ||
|
||||
(acl_user->user && strcmp(user, acl_user->user) == 0))
|
||||
{
|
||||
if (compare_hostname(&acl_user->host, host, ip))
|
||||
@ -1001,7 +1001,7 @@ bool acl_getroot_no_password(Security_context *sctx, char *user, char *host,
|
||||
{
|
||||
if (compare_hostname(&acl_db->host, host, ip))
|
||||
{
|
||||
if (!acl_db->db || (db && !strcmp(acl_db->db, db)))
|
||||
if (!acl_db->db || (db && !wild_compare(db, acl_db->db, 0)))
|
||||
{
|
||||
sctx->db_access= acl_db->access;
|
||||
break;
|
||||
@ -4980,8 +4980,6 @@ static int handle_grant_struct(uint struct_no, bool drop,
|
||||
}
|
||||
if (! user)
|
||||
user= "";
|
||||
if (! host)
|
||||
host= "";
|
||||
#ifdef EXTRA_DEBUG
|
||||
DBUG_PRINT("loop",("scan struct: %u index: %u user: '%s' host: '%s'",
|
||||
struct_no, idx, user, host));
|
||||
|
114
sql/sql_db.cc
114
sql/sql_db.cc
@ -287,7 +287,7 @@ static bool write_db_opt(THD *thd, const char *path, HA_CREATE_INFO *create)
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
/*
|
||||
Load database options file
|
||||
|
||||
load_db_opt()
|
||||
@ -313,68 +313,72 @@ bool load_db_opt(THD *thd, const char *path, HA_CREATE_INFO *create)
|
||||
|
||||
bzero((char*) create,sizeof(*create));
|
||||
create->default_table_charset= thd->variables.collation_server;
|
||||
|
||||
|
||||
/* Check if options for this database are already in the hash */
|
||||
if (!get_dbopt(path, create))
|
||||
DBUG_RETURN(0);
|
||||
|
||||
/* Otherwise, load options from the .opt file */
|
||||
if ((file=my_open(path, O_RDONLY | O_SHARE, MYF(0))) >= 0)
|
||||
{
|
||||
IO_CACHE cache;
|
||||
init_io_cache(&cache, file, IO_SIZE, READ_CACHE, 0, 0, MYF(0));
|
||||
DBUG_RETURN(0);
|
||||
|
||||
while ((int) (nbytes= my_b_gets(&cache, (char*) buf, sizeof(buf))) > 0)
|
||||
/* Otherwise, load options from the .opt file */
|
||||
if ((file=my_open(path, O_RDONLY | O_SHARE, MYF(0))) < 0)
|
||||
goto err1;
|
||||
|
||||
IO_CACHE cache;
|
||||
if (init_io_cache(&cache, file, IO_SIZE, READ_CACHE, 0, 0, MYF(0)))
|
||||
goto err2;
|
||||
|
||||
while ((int) (nbytes= my_b_gets(&cache, (char*) buf, sizeof(buf))) > 0)
|
||||
{
|
||||
char *pos= buf+nbytes-1;
|
||||
/* Remove end space and control characters */
|
||||
while (pos > buf && !my_isgraph(&my_charset_latin1, pos[-1]))
|
||||
pos--;
|
||||
*pos=0;
|
||||
if ((pos= strchr(buf, '=')))
|
||||
{
|
||||
char *pos= buf+nbytes-1;
|
||||
/* Remove end space and control characters */
|
||||
while (pos > buf && !my_isgraph(&my_charset_latin1, pos[-1]))
|
||||
pos--;
|
||||
*pos=0;
|
||||
if ((pos= strchr(buf, '=')))
|
||||
if (!strncmp(buf,"default-character-set", (pos-buf)))
|
||||
{
|
||||
if (!strncmp(buf,"default-character-set", (pos-buf)))
|
||||
{
|
||||
/*
|
||||
Try character set name, and if it fails
|
||||
try collation name, probably it's an old
|
||||
4.1.0 db.opt file, which didn't have
|
||||
separate default-character-set and
|
||||
default-collation commands.
|
||||
*/
|
||||
if (!(create->default_table_charset=
|
||||
get_charset_by_csname(pos+1, MY_CS_PRIMARY, MYF(0))) &&
|
||||
!(create->default_table_charset=
|
||||
get_charset_by_name(pos+1, MYF(0))))
|
||||
{
|
||||
sql_print_error("Error while loading database options: '%s':",path);
|
||||
sql_print_error(ER(ER_UNKNOWN_CHARACTER_SET),pos+1);
|
||||
create->default_table_charset= default_charset_info;
|
||||
}
|
||||
}
|
||||
else if (!strncmp(buf,"default-collation", (pos-buf)))
|
||||
{
|
||||
if (!(create->default_table_charset= get_charset_by_name(pos+1,
|
||||
MYF(0))))
|
||||
{
|
||||
sql_print_error("Error while loading database options: '%s':",path);
|
||||
sql_print_error(ER(ER_UNKNOWN_COLLATION),pos+1);
|
||||
create->default_table_charset= default_charset_info;
|
||||
}
|
||||
}
|
||||
/*
|
||||
Try character set name, and if it fails
|
||||
try collation name, probably it's an old
|
||||
4.1.0 db.opt file, which didn't have
|
||||
separate default-character-set and
|
||||
default-collation commands.
|
||||
*/
|
||||
if (!(create->default_table_charset=
|
||||
get_charset_by_csname(pos+1, MY_CS_PRIMARY, MYF(0))) &&
|
||||
!(create->default_table_charset=
|
||||
get_charset_by_name(pos+1, MYF(0))))
|
||||
{
|
||||
sql_print_error("Error while loading database options: '%s':",path);
|
||||
sql_print_error(ER(ER_UNKNOWN_CHARACTER_SET),pos+1);
|
||||
create->default_table_charset= default_charset_info;
|
||||
}
|
||||
}
|
||||
else if (!strncmp(buf,"default-collation", (pos-buf)))
|
||||
{
|
||||
if (!(create->default_table_charset= get_charset_by_name(pos+1,
|
||||
MYF(0))))
|
||||
{
|
||||
sql_print_error("Error while loading database options: '%s':",path);
|
||||
sql_print_error(ER(ER_UNKNOWN_COLLATION),pos+1);
|
||||
create->default_table_charset= default_charset_info;
|
||||
}
|
||||
}
|
||||
}
|
||||
end_io_cache(&cache);
|
||||
my_close(file,MYF(0));
|
||||
/*
|
||||
Put the loaded value into the hash.
|
||||
Note that another thread could've added the same
|
||||
entry to the hash after we called get_dbopt(),
|
||||
but it's not an error, as put_dbopt() takes this
|
||||
possibility into account.
|
||||
*/
|
||||
error= put_dbopt(path, create);
|
||||
}
|
||||
/*
|
||||
Put the loaded value into the hash.
|
||||
Note that another thread could've added the same
|
||||
entry to the hash after we called get_dbopt(),
|
||||
but it's not an error, as put_dbopt() takes this
|
||||
possibility into account.
|
||||
*/
|
||||
error= put_dbopt(path, create);
|
||||
|
||||
end_io_cache(&cache);
|
||||
err2:
|
||||
my_close(file,MYF(0));
|
||||
err1:
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
@ -567,7 +567,7 @@ SQL_SELECT *prepare_simple_select(THD *thd, Item *cond,
|
||||
|
||||
SQL_SELECT *res= make_select(table, 0, 0, cond, 0, error);
|
||||
if (*error || (res && res->check_quick(thd, 0, HA_POS_ERROR)) ||
|
||||
(res->quick && res->quick->reset()))
|
||||
(res && res->quick && res->quick->reset()))
|
||||
{
|
||||
delete res;
|
||||
res=0;
|
||||
|
@ -686,7 +686,7 @@ static bool check_view_insertability(THD * thd, TABLE_LIST *view)
|
||||
|
||||
DBUG_ASSERT(view->table != 0 && view->field_translation != 0);
|
||||
|
||||
bitmap_init(&used_fields, used_fields_buff, table->s->fields, 0);
|
||||
VOID(bitmap_init(&used_fields, used_fields_buff, table->s->fields, 0));
|
||||
bitmap_clear_all(&used_fields);
|
||||
|
||||
view->contain_auto_increment= 0;
|
||||
|
@ -1615,6 +1615,11 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
|
||||
statistic_increment(thd->status_var.com_other, &LOCK_status);
|
||||
thd->enable_slow_log= opt_log_slow_admin_statements;
|
||||
db= thd->alloc(db_len + tbl_len + 2);
|
||||
if (!db)
|
||||
{
|
||||
my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0));
|
||||
break;
|
||||
}
|
||||
tbl_name= strmake(db, packet + 1, db_len)+1;
|
||||
strmake(tbl_name, packet + db_len + 2, tbl_len);
|
||||
mysql_table_dump(thd, db, tbl_name, -1);
|
||||
@ -1628,14 +1633,14 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
|
||||
statistic_increment(thd->status_var.com_other, &LOCK_status);
|
||||
char *user= (char*) packet;
|
||||
char *passwd= strend(user)+1;
|
||||
/*
|
||||
/*
|
||||
Old clients send null-terminated string ('\0' for empty string) for
|
||||
password. New clients send the size (1 byte) + string (not null
|
||||
terminated, so also '\0' for empty string).
|
||||
*/
|
||||
char db_buff[NAME_LEN+1]; // buffer to store db in utf8
|
||||
char db_buff[NAME_LEN+1]; // buffer to store db in utf8
|
||||
char *db= passwd;
|
||||
uint passwd_len= thd->client_capabilities & CLIENT_SECURE_CONNECTION ?
|
||||
uint passwd_len= thd->client_capabilities & CLIENT_SECURE_CONNECTION ?
|
||||
*passwd++ : strlen(passwd);
|
||||
db+= passwd_len + 1;
|
||||
#ifndef EMBEDDED_LIBRARY
|
||||
@ -2414,23 +2419,26 @@ mysql_execute_command(THD *thd)
|
||||
}
|
||||
}
|
||||
else
|
||||
#endif /* HAVE_REPLICATION */
|
||||
|
||||
/*
|
||||
When option readonly is set deny operations which change non-temporary
|
||||
tables. Except for the replication thread and the 'super' users.
|
||||
*/
|
||||
if (opt_readonly &&
|
||||
!(thd->security_ctx->master_access & SUPER_ACL) &&
|
||||
uc_update_queries[lex->sql_command] &&
|
||||
!((lex->sql_command == SQLCOM_CREATE_TABLE) &&
|
||||
(lex->create_info.options & HA_LEX_CREATE_TMP_TABLE)) &&
|
||||
((lex->sql_command != SQLCOM_UPDATE_MULTI) &&
|
||||
some_non_temp_table_to_be_updated(thd, all_tables)))
|
||||
{
|
||||
my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--read-only");
|
||||
DBUG_RETURN(-1);
|
||||
}
|
||||
#endif /* HAVE_REPLICATION */
|
||||
/*
|
||||
When option readonly is set deny operations which change non-temporary
|
||||
tables. Except for the replication thread and the 'super' users.
|
||||
*/
|
||||
if (opt_readonly &&
|
||||
!(thd->security_ctx->master_access & SUPER_ACL) &&
|
||||
uc_update_queries[lex->sql_command] &&
|
||||
!((lex->sql_command == SQLCOM_CREATE_TABLE) &&
|
||||
(lex->create_info.options & HA_LEX_CREATE_TMP_TABLE)) &&
|
||||
((lex->sql_command != SQLCOM_UPDATE_MULTI) &&
|
||||
some_non_temp_table_to_be_updated(thd, all_tables)))
|
||||
{
|
||||
my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--read-only");
|
||||
DBUG_RETURN(-1);
|
||||
}
|
||||
#ifdef HAVE_REPLICATION
|
||||
} /* endif unlikely slave */
|
||||
#endif
|
||||
if(lex->orig_sql_command == SQLCOM_END)
|
||||
statistic_increment(thd->status_var.com_stat[lex->sql_command],
|
||||
&LOCK_status);
|
||||
@ -3226,8 +3234,7 @@ end_with_restore_list:
|
||||
else
|
||||
res= 0;
|
||||
|
||||
if ((res= mysql_multi_update_prepare(thd)))
|
||||
break;
|
||||
res= mysql_multi_update_prepare(thd);
|
||||
|
||||
#ifdef HAVE_REPLICATION
|
||||
/* Check slave filtering rules */
|
||||
@ -3235,20 +3242,33 @@ end_with_restore_list:
|
||||
{
|
||||
if (all_tables_not_ok(thd, all_tables))
|
||||
{
|
||||
if (res!= 0)
|
||||
{
|
||||
res= 0; /* don't care of prev failure */
|
||||
thd->clear_error(); /* filters are of highest prior */
|
||||
}
|
||||
/* we warn the slave SQL thread */
|
||||
my_error(ER_SLAVE_IGNORED_TABLE, MYF(0));
|
||||
break;
|
||||
}
|
||||
if (res)
|
||||
break;
|
||||
}
|
||||
else
|
||||
#endif /* HAVE_REPLICATION */
|
||||
if (opt_readonly &&
|
||||
!(thd->security_ctx->master_access & SUPER_ACL) &&
|
||||
some_non_temp_table_to_be_updated(thd, all_tables))
|
||||
{
|
||||
my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--read-only");
|
||||
break;
|
||||
}
|
||||
#endif /* HAVE_REPLICATION */
|
||||
if (res)
|
||||
break;
|
||||
if (opt_readonly &&
|
||||
!(thd->security_ctx->master_access & SUPER_ACL) &&
|
||||
some_non_temp_table_to_be_updated(thd, all_tables))
|
||||
{
|
||||
my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--read-only");
|
||||
break;
|
||||
}
|
||||
#ifdef HAVE_REPLICATION
|
||||
} /* unlikely */
|
||||
#endif
|
||||
|
||||
res= mysql_multi_update(thd, all_tables,
|
||||
&select_lex->item_list,
|
||||
@ -6631,6 +6651,7 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
|
||||
#ifdef HAVE_REPLICATION
|
||||
if (options & REFRESH_MASTER)
|
||||
{
|
||||
DBUG_ASSERT(thd);
|
||||
tmp_write_to_binlog= 0;
|
||||
if (reset_master(thd))
|
||||
{
|
||||
|
@ -2120,6 +2120,8 @@ void reinit_stmt_before_use(THD *thd, LEX *lex)
|
||||
were closed in the end of previous prepare or execute call.
|
||||
*/
|
||||
tables->table= 0;
|
||||
/* Reset is_schema_table_processed value(needed for I_S tables */
|
||||
tables->is_schema_table_processed= FALSE;
|
||||
|
||||
if (tables->prep_on_expr)
|
||||
{
|
||||
|
@ -1201,7 +1201,12 @@ bool change_master(THD* thd, MASTER_INFO* mi)
|
||||
Relay log's IO_CACHE may not be inited, if rli->inited==0 (server was never
|
||||
a slave before).
|
||||
*/
|
||||
flush_master_info(mi, 0);
|
||||
if (flush_master_info(mi, 0))
|
||||
{
|
||||
my_error(ER_RELAY_LOG_INIT, MYF(0), "Failed to flush master info file");
|
||||
unlock_slave_threads(mi);
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
if (need_relay_log_purge)
|
||||
{
|
||||
relay_log_purge= 1;
|
||||
@ -1311,14 +1316,15 @@ bool mysql_show_binlog_events(THD* thd)
|
||||
bool ret = TRUE;
|
||||
IO_CACHE log;
|
||||
File file = -1;
|
||||
Format_description_log_event *description_event= new
|
||||
Format_description_log_event(3); /* MySQL 4.0 by default */
|
||||
|
||||
Log_event::init_show_field_list(&field_list);
|
||||
if (protocol->send_fields(&field_list,
|
||||
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
Format_description_log_event *description_event= new
|
||||
Format_description_log_event(3); /* MySQL 4.0 by default */
|
||||
|
||||
/*
|
||||
Wait for handlers to insert any pending information
|
||||
into the binlog. For e.g. ndb which updates the binlog asynchronously
|
||||
|
@ -9117,7 +9117,7 @@ TABLE *create_virtual_tmp_table(THD *thd, List<create_field> &field_list)
|
||||
|
||||
field++;
|
||||
}
|
||||
*field= NULL; /* mark the end of the list */
|
||||
*field= NULL; /* mark the end of the list */
|
||||
share->blob_field[blob_count]= 0; /* mark the end of the list */
|
||||
share->blob_fields= blob_count;
|
||||
|
||||
@ -11659,6 +11659,12 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
/* Fill schema tables with data before filesort if it's necessary */
|
||||
if ((join->select_lex->options & OPTION_SCHEMA_TABLE) &&
|
||||
get_schema_tables_result(join))
|
||||
goto err;
|
||||
|
||||
if (table->s->tmp_table)
|
||||
table->file->info(HA_STATUS_VARIABLE); // Get record count
|
||||
table->sort.found_records=filesort(thd, table,sortorder, length,
|
||||
|
@ -781,7 +781,7 @@ append_identifier(THD *thd, String *packet, const char *name, uint length)
|
||||
it's a keyword
|
||||
*/
|
||||
|
||||
packet->reserve(length*2 + 2);
|
||||
VOID(packet->reserve(length*2 + 2));
|
||||
quote_char= (char) q;
|
||||
packet->append("e_char, 1, system_charset_info);
|
||||
|
||||
@ -1097,13 +1097,13 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
|
||||
if (key_part->field)
|
||||
append_identifier(thd,packet,key_part->field->field_name,
|
||||
strlen(key_part->field->field_name));
|
||||
if (!key_part->field ||
|
||||
if (key_part->field &&
|
||||
(key_part->length !=
|
||||
table->field[key_part->fieldnr-1]->key_length() &&
|
||||
!(key_info->flags & HA_FULLTEXT)))
|
||||
{
|
||||
buff[0] = '(';
|
||||
char* end=int10_to_str((long) key_part->length /
|
||||
char* end=int10_to_str((long) key_part->length /
|
||||
key_part->field->charset()->mbmaxlen,
|
||||
buff + 1,10);
|
||||
*end++ = ')';
|
||||
@ -1856,7 +1856,8 @@ LEX_STRING *make_lex_string(THD *thd, LEX_STRING *lex_str,
|
||||
{
|
||||
MEM_ROOT *mem= thd->mem_root;
|
||||
if (allocate_lex_string)
|
||||
lex_str= (LEX_STRING *)thd->alloc(sizeof(LEX_STRING));
|
||||
if (!(lex_str= (LEX_STRING *)thd->alloc(sizeof(LEX_STRING))))
|
||||
return 0;
|
||||
lex_str->str= strmake_root(mem, str, length);
|
||||
lex_str->length= length;
|
||||
return lex_str;
|
||||
@ -3115,7 +3116,7 @@ static int get_schema_stat_record(THD *thd, struct st_table_list *tables,
|
||||
/*
|
||||
I.e. we are in SELECT FROM INFORMATION_SCHEMA.STATISTICS
|
||||
rather than in SHOW KEYS
|
||||
*/
|
||||
*/
|
||||
if (!tables->view)
|
||||
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
||||
thd->net.last_errno, thd->net.last_error);
|
||||
@ -3128,7 +3129,7 @@ static int get_schema_stat_record(THD *thd, struct st_table_list *tables,
|
||||
{
|
||||
TABLE *show_table= tables->table;
|
||||
KEY *key_info=show_table->key_info;
|
||||
show_table->file->info(HA_STATUS_VARIABLE |
|
||||
show_table->file->info(HA_STATUS_VARIABLE |
|
||||
HA_STATUS_NO_LOCK |
|
||||
HA_STATUS_TIME);
|
||||
for (uint i=0 ; i < show_table->s->keys ; i++,key_info++)
|
||||
@ -3140,7 +3141,7 @@ static int get_schema_stat_record(THD *thd, struct st_table_list *tables,
|
||||
restore_record(table, s->default_values);
|
||||
table->field[1]->store(base_name, strlen(base_name), cs);
|
||||
table->field[2]->store(file_name, strlen(file_name), cs);
|
||||
table->field[3]->store((longlong) ((key_info->flags &
|
||||
table->field[3]->store((longlong) ((key_info->flags &
|
||||
HA_NOSAME) ? 0 : 1), TRUE);
|
||||
table->field[4]->store(base_name, strlen(base_name), cs);
|
||||
table->field[5]->store(key_info->name, strlen(key_info->name), cs);
|
||||
@ -3163,12 +3164,12 @@ static int get_schema_stat_record(THD *thd, struct st_table_list *tables,
|
||||
table->field[9]->store((longlong) records, TRUE);
|
||||
table->field[9]->set_notnull();
|
||||
}
|
||||
if (!(key_info->flags & HA_FULLTEXT) &&
|
||||
(!key_part->field ||
|
||||
key_part->length !=
|
||||
if (!(key_info->flags & HA_FULLTEXT) &&
|
||||
(key_part->field &&
|
||||
key_part->length !=
|
||||
show_table->field[key_part->fieldnr-1]->key_length()))
|
||||
{
|
||||
table->field[10]->store((longlong) key_part->length /
|
||||
table->field[10]->store((longlong) key_part->length /
|
||||
key_part->field->charset()->mbmaxlen);
|
||||
table->field[10]->set_notnull();
|
||||
}
|
||||
@ -4549,7 +4550,16 @@ bool get_schema_tables_result(JOIN *join)
|
||||
TABLE_LIST *table_list= tab->table->pos_in_table_list;
|
||||
if (table_list->schema_table && thd->fill_derived_tables())
|
||||
{
|
||||
if (&lex->unit != lex->current_select->master_unit()) // is subselect
|
||||
bool is_subselect= (&lex->unit != lex->current_select->master_unit());
|
||||
/*
|
||||
The schema table is already processed and
|
||||
the statement is not a subselect.
|
||||
So we don't need to handle this table again.
|
||||
*/
|
||||
if (table_list->is_schema_table_processed && !is_subselect)
|
||||
continue;
|
||||
|
||||
if (is_subselect) // is subselect
|
||||
{
|
||||
table_list->table->file->extra(HA_EXTRA_RESET_STATE);
|
||||
table_list->table->file->delete_all_rows();
|
||||
@ -4562,6 +4572,7 @@ bool get_schema_tables_result(JOIN *join)
|
||||
if (table_list->schema_table->fill_table(thd, table_list,
|
||||
tab->select_cond))
|
||||
result= 1;
|
||||
table_list->is_schema_table_processed= TRUE;
|
||||
}
|
||||
}
|
||||
thd->no_warnings_for_error= 0;
|
||||
|
@ -450,7 +450,7 @@ void mysql_print_status()
|
||||
|
||||
calc_sum_of_all_status(&tmp);
|
||||
printf("\nStatus information:\n\n");
|
||||
my_getwd(current_dir, sizeof(current_dir),MYF(0));
|
||||
VOID(my_getwd(current_dir, sizeof(current_dir),MYF(0)));
|
||||
printf("Current dir: %s\n", current_dir);
|
||||
printf("Running threads: %d Stack size: %ld\n", thread_count,
|
||||
(long) thread_stack);
|
||||
|
@ -1642,11 +1642,26 @@ clear_privileges:
|
||||
sp_name:
|
||||
ident '.' ident
|
||||
{
|
||||
if (!$1.str || check_db_name($1.str))
|
||||
{
|
||||
my_error(ER_WRONG_DB_NAME, MYF(0), $1.str);
|
||||
YYABORT;
|
||||
}
|
||||
if (check_routine_name($3))
|
||||
{
|
||||
my_error(ER_SP_WRONG_NAME, MYF(0), $3.str);
|
||||
YYABORT;
|
||||
}
|
||||
$$= new sp_name($1, $3);
|
||||
$$->init_qname(YYTHD);
|
||||
}
|
||||
| ident
|
||||
{
|
||||
if (check_routine_name($1))
|
||||
{
|
||||
my_error(ER_SP_WRONG_NAME, MYF(0), $1.str);
|
||||
YYABORT;
|
||||
}
|
||||
$$= sp_name_current_db_new(YYTHD, $1);
|
||||
}
|
||||
;
|
||||
|
@ -1640,7 +1640,10 @@ ulong get_form_pos(File file, uchar *head, TYPELIB *save_names)
|
||||
ret_value=uint4korr(pos);
|
||||
}
|
||||
if (! save_names)
|
||||
my_free((gptr) buf,MYF(0));
|
||||
{
|
||||
if (names)
|
||||
my_free((gptr) buf,MYF(0));
|
||||
}
|
||||
else if (!names)
|
||||
bzero((char*) save_names,sizeof(save_names));
|
||||
else
|
||||
|
@ -560,6 +560,7 @@ typedef struct st_table_list
|
||||
st_select_lex_unit *derived; /* SELECT_LEX_UNIT of derived table */
|
||||
ST_SCHEMA_TABLE *schema_table; /* Information_schema table */
|
||||
st_select_lex *schema_select_lex;
|
||||
bool is_schema_table_processed;
|
||||
/*
|
||||
True when the view field translation table is used to convert
|
||||
schema table fields for backwards compatibility with SHOW command.
|
||||
|
122
sql/uniques.cc
122
sql/uniques.cc
@ -38,8 +38,8 @@
|
||||
int unique_write_to_file(gptr key, element_count count, Unique *unique)
|
||||
{
|
||||
/*
|
||||
Use unique->size (size of element stored in the tree) and not
|
||||
unique->tree.size_of_element. The latter is different from unique->size
|
||||
Use unique->size (size of element stored in the tree) and not
|
||||
unique->tree.size_of_element. The latter is different from unique->size
|
||||
when tree implementation chooses to store pointer to key in TREE_ELEMENT
|
||||
(instead of storing the element itself there)
|
||||
*/
|
||||
@ -63,27 +63,27 @@ Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg,
|
||||
comp_func_fixed_arg);
|
||||
/* If the following fail's the next add will also fail */
|
||||
my_init_dynamic_array(&file_ptrs, sizeof(BUFFPEK), 16, 16);
|
||||
/*
|
||||
/*
|
||||
If you change the following, change it in get_max_elements function, too.
|
||||
*/
|
||||
max_elements= max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+size);
|
||||
open_cached_file(&file, mysql_tmpdir,TEMP_PREFIX, DISK_BUFFER_SIZE,
|
||||
MYF(MY_WME));
|
||||
VOID(open_cached_file(&file, mysql_tmpdir,TEMP_PREFIX, DISK_BUFFER_SIZE,
|
||||
MYF(MY_WME)));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Calculate log2(n!)
|
||||
|
||||
|
||||
NOTES
|
||||
Stirling's approximate formula is used:
|
||||
|
||||
n! ~= sqrt(2*M_PI*n) * (n/M_E)^n
|
||||
|
||||
|
||||
n! ~= sqrt(2*M_PI*n) * (n/M_E)^n
|
||||
|
||||
Derivation of formula used for calculations is as follows:
|
||||
|
||||
log2(n!) = log(n!)/log(2) = log(sqrt(2*M_PI*n)*(n/M_E)^n) / log(2) =
|
||||
|
||||
|
||||
= (log(2*M_PI*n)/2 + n*log(n/M_E)) / log(2).
|
||||
*/
|
||||
|
||||
@ -94,7 +94,7 @@ inline double log2_n_fact(double x)
|
||||
|
||||
|
||||
/*
|
||||
Calculate cost of merge_buffers function call for given sequence of
|
||||
Calculate cost of merge_buffers function call for given sequence of
|
||||
input stream lengths and store the number of rows in result stream in *last.
|
||||
|
||||
SYNOPSIS
|
||||
@ -103,21 +103,21 @@ inline double log2_n_fact(double x)
|
||||
elem_size Size of element stored in buffer
|
||||
first Pointer to first merged element size
|
||||
last Pointer to last merged element size
|
||||
|
||||
|
||||
RETURN
|
||||
Cost of merge_buffers operation in disk seeks.
|
||||
|
||||
|
||||
NOTES
|
||||
It is assumed that no rows are eliminated during merge.
|
||||
The cost is calculated as
|
||||
|
||||
The cost is calculated as
|
||||
|
||||
cost(read_and_write) + cost(merge_comparisons).
|
||||
|
||||
All bytes in the sequences is read and written back during merge so cost
|
||||
|
||||
All bytes in the sequences is read and written back during merge so cost
|
||||
of disk io is 2*elem_size*total_buf_elems/IO_SIZE (2 is for read + write)
|
||||
|
||||
|
||||
For comparisons cost calculations we assume that all merged sequences have
|
||||
the same length, so each of total_buf_size elements will be added to a sort
|
||||
the same length, so each of total_buf_size elements will be added to a sort
|
||||
heap with (n_buffers-1) elements. This gives the comparison cost:
|
||||
|
||||
total_buf_elems* log2(n_buffers) / TIME_FOR_COMPARE_ROWID;
|
||||
@ -125,16 +125,16 @@ inline double log2_n_fact(double x)
|
||||
|
||||
static double get_merge_buffers_cost(uint *buff_elems, uint elem_size,
|
||||
uint *first, uint *last)
|
||||
{
|
||||
{
|
||||
uint total_buf_elems= 0;
|
||||
for (uint *pbuf= first; pbuf <= last; pbuf++)
|
||||
total_buf_elems+= *pbuf;
|
||||
*last= total_buf_elems;
|
||||
|
||||
|
||||
int n_buffers= last - first + 1;
|
||||
|
||||
/* Using log2(n)=log(n)/log(2) formula */
|
||||
return 2*((double)total_buf_elems*elem_size) / IO_SIZE +
|
||||
return 2*((double)total_buf_elems*elem_size) / IO_SIZE +
|
||||
total_buf_elems*log((double) n_buffers) / (TIME_FOR_COMPARE_ROWID * M_LN2);
|
||||
}
|
||||
|
||||
@ -142,13 +142,13 @@ static double get_merge_buffers_cost(uint *buff_elems, uint elem_size,
|
||||
/*
|
||||
Calculate cost of merging buffers into one in Unique::get, i.e. calculate
|
||||
how long (in terms of disk seeks) the two calls
|
||||
merge_many_buffs(...);
|
||||
merge_buffers(...);
|
||||
merge_many_buffs(...);
|
||||
merge_buffers(...);
|
||||
will take.
|
||||
|
||||
SYNOPSIS
|
||||
get_merge_many_buffs_cost()
|
||||
buffer buffer space for temporary data, at least
|
||||
buffer buffer space for temporary data, at least
|
||||
Unique::get_cost_calc_buff_size bytes
|
||||
maxbuffer # of full buffers
|
||||
max_n_elems # of elements in first maxbuffer buffers
|
||||
@ -156,12 +156,12 @@ static double get_merge_buffers_cost(uint *buff_elems, uint elem_size,
|
||||
elem_size size of buffer element
|
||||
|
||||
NOTES
|
||||
maxbuffer+1 buffers are merged, where first maxbuffer buffers contain
|
||||
maxbuffer+1 buffers are merged, where first maxbuffer buffers contain
|
||||
max_n_elems elements each and last buffer contains last_n_elems elements.
|
||||
|
||||
The current implementation does a dumb simulation of merge_many_buffs
|
||||
function actions.
|
||||
|
||||
|
||||
RETURN
|
||||
Cost of merge in disk seeks.
|
||||
*/
|
||||
@ -173,17 +173,17 @@ static double get_merge_many_buffs_cost(uint *buffer,
|
||||
register int i;
|
||||
double total_cost= 0.0;
|
||||
uint *buff_elems= buffer; /* #s of elements in each of merged sequences */
|
||||
|
||||
/*
|
||||
|
||||
/*
|
||||
Set initial state: first maxbuffer sequences contain max_n_elems elements
|
||||
each, last sequence contains last_n_elems elements.
|
||||
*/
|
||||
for (i = 0; i < (int)maxbuffer; i++)
|
||||
buff_elems[i]= max_n_elems;
|
||||
buff_elems[i]= max_n_elems;
|
||||
buff_elems[maxbuffer]= last_n_elems;
|
||||
|
||||
/*
|
||||
Do it exactly as merge_many_buff function does, calling
|
||||
/*
|
||||
Do it exactly as merge_many_buff function does, calling
|
||||
get_merge_buffers_cost to get cost of merge_buffers.
|
||||
*/
|
||||
if (maxbuffer >= MERGEBUFF2)
|
||||
@ -194,17 +194,17 @@ static double get_merge_many_buffs_cost(uint *buffer,
|
||||
for (i = 0; i <= (int) maxbuffer - MERGEBUFF*3/2; i += MERGEBUFF)
|
||||
{
|
||||
total_cost+=get_merge_buffers_cost(buff_elems, elem_size,
|
||||
buff_elems + i,
|
||||
buff_elems + i,
|
||||
buff_elems + i + MERGEBUFF-1);
|
||||
lastbuff++;
|
||||
}
|
||||
total_cost+=get_merge_buffers_cost(buff_elems, elem_size,
|
||||
buff_elems + i,
|
||||
buff_elems + i,
|
||||
buff_elems + maxbuffer);
|
||||
maxbuffer= lastbuff;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Simulate final merge_buff call. */
|
||||
total_cost += get_merge_buffers_cost(buff_elems, elem_size,
|
||||
buff_elems, buff_elems + maxbuffer);
|
||||
@ -213,7 +213,7 @@ static double get_merge_many_buffs_cost(uint *buffer,
|
||||
|
||||
|
||||
/*
|
||||
Calculate cost of using Unique for processing nkeys elements of size
|
||||
Calculate cost of using Unique for processing nkeys elements of size
|
||||
key_size using max_in_memory_size memory.
|
||||
|
||||
SYNOPSIS
|
||||
@ -223,12 +223,12 @@ static double get_merge_many_buffs_cost(uint *buffer,
|
||||
nkeys #of elements in Unique
|
||||
key_size size of each elements in bytes
|
||||
max_in_memory_size amount of memory Unique will be allowed to use
|
||||
|
||||
|
||||
RETURN
|
||||
Cost in disk seeks.
|
||||
|
||||
|
||||
NOTES
|
||||
cost(using_unqiue) =
|
||||
cost(using_unqiue) =
|
||||
cost(create_trees) + (see #1)
|
||||
cost(merge) + (see #2)
|
||||
cost(read_result) (see #3)
|
||||
@ -237,42 +237,42 @@ static double get_merge_many_buffs_cost(uint *buffer,
|
||||
For each Unique::put operation there will be 2*log2(n+1) elements
|
||||
comparisons, where n runs from 1 tree_size (we assume that all added
|
||||
elements are different). Together this gives:
|
||||
|
||||
|
||||
n_compares = 2*(log2(2) + log2(3) + ... + log2(N+1)) = 2*log2((N+1)!)
|
||||
|
||||
|
||||
then cost(tree_creation) = n_compares*ROWID_COMPARE_COST;
|
||||
|
||||
Total cost of creating trees:
|
||||
(n_trees - 1)*max_size_tree_cost + non_max_size_tree_cost.
|
||||
|
||||
Approximate value of log2(N!) is calculated by log2_n_fact function.
|
||||
|
||||
|
||||
2. Cost of merging.
|
||||
If only one tree is created by Unique no merging will be necessary.
|
||||
Otherwise, we model execution of merge_many_buff function and count
|
||||
#of merges. (The reason behind this is that number of buffers is small,
|
||||
while size of buffers is big and we don't want to loose precision with
|
||||
#of merges. (The reason behind this is that number of buffers is small,
|
||||
while size of buffers is big and we don't want to loose precision with
|
||||
O(x)-style formula)
|
||||
|
||||
|
||||
3. If only one tree is created by Unique no disk io will happen.
|
||||
Otherwise, ceil(key_len*n_keys) disk seeks are necessary. We assume
|
||||
Otherwise, ceil(key_len*n_keys) disk seeks are necessary. We assume
|
||||
these will be random seeks.
|
||||
*/
|
||||
|
||||
double Unique::get_use_cost(uint *buffer, uint nkeys, uint key_size,
|
||||
double Unique::get_use_cost(uint *buffer, uint nkeys, uint key_size,
|
||||
ulong max_in_memory_size)
|
||||
{
|
||||
ulong max_elements_in_tree;
|
||||
ulong last_tree_elems;
|
||||
int n_full_trees; /* number of trees in unique - 1 */
|
||||
double result;
|
||||
|
||||
max_elements_in_tree=
|
||||
|
||||
max_elements_in_tree=
|
||||
max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size);
|
||||
|
||||
n_full_trees= nkeys / max_elements_in_tree;
|
||||
last_tree_elems= nkeys % max_elements_in_tree;
|
||||
|
||||
|
||||
/* Calculate cost of creating trees */
|
||||
result= 2*log2_n_fact(last_tree_elems + 1.0);
|
||||
if (n_full_trees)
|
||||
@ -285,13 +285,13 @@ double Unique::get_use_cost(uint *buffer, uint nkeys, uint key_size,
|
||||
|
||||
if (!n_full_trees)
|
||||
return result;
|
||||
|
||||
/*
|
||||
|
||||
/*
|
||||
There is more then one tree and merging is necessary.
|
||||
First, add cost of writing all trees to disk, assuming that all disk
|
||||
writes are sequential.
|
||||
*/
|
||||
result += DISK_SEEK_BASE_COST * n_full_trees *
|
||||
result += DISK_SEEK_BASE_COST * n_full_trees *
|
||||
ceil(((double) key_size)*max_elements_in_tree / IO_SIZE);
|
||||
result += DISK_SEEK_BASE_COST * ceil(((double) key_size)*last_tree_elems / IO_SIZE);
|
||||
|
||||
@ -303,8 +303,8 @@ double Unique::get_use_cost(uint *buffer, uint nkeys, uint key_size,
|
||||
return merge_cost;
|
||||
|
||||
result += merge_cost;
|
||||
/*
|
||||
Add cost of reading the resulting sequence, assuming there were no
|
||||
/*
|
||||
Add cost of reading the resulting sequence, assuming there were no
|
||||
duplicate elements.
|
||||
*/
|
||||
result += ceil((double)key_size*nkeys/IO_SIZE);
|
||||
@ -320,7 +320,7 @@ Unique::~Unique()
|
||||
}
|
||||
|
||||
|
||||
/* Write tree to disk; clear tree */
|
||||
/* Write tree to disk; clear tree */
|
||||
bool Unique::flush()
|
||||
{
|
||||
BUFFPEK file_ptr;
|
||||
@ -359,7 +359,7 @@ Unique::reset()
|
||||
}
|
||||
elements= 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
The comparison function, passed to queue_init() in merge_walk() must
|
||||
use comparison function of Uniques::tree, but compare members of struct
|
||||
@ -386,7 +386,7 @@ C_MODE_END
|
||||
|
||||
/*
|
||||
DESCRIPTION
|
||||
Function is very similar to merge_buffers, but instead of writing sorted
|
||||
Function is very similar to merge_buffers, but instead of writing sorted
|
||||
unique keys to the output file, it invokes walk_action for each key.
|
||||
This saves I/O if you need to pass through all unique keys only once.
|
||||
SYNOPSIS
|
||||
@ -601,7 +601,7 @@ bool Unique::get(TABLE *table)
|
||||
bool error=1;
|
||||
|
||||
/* Open cached file if it isn't open */
|
||||
outfile=table->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE),
|
||||
outfile=table->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE),
|
||||
MYF(MY_ZEROFILL));
|
||||
|
||||
if (!outfile || ! my_b_inited(outfile) &&
|
||||
@ -618,7 +618,7 @@ bool Unique::get(TABLE *table)
|
||||
sort_param.keys= max_in_memory_size / sort_param.sort_length;
|
||||
sort_param.not_killable=1;
|
||||
|
||||
if (!(sort_buffer=(uchar*) my_malloc((sort_param.keys+1) *
|
||||
if (!(sort_buffer=(uchar*) my_malloc((sort_param.keys+1) *
|
||||
sort_param.sort_length,
|
||||
MYF(0))))
|
||||
return 1;
|
||||
@ -633,7 +633,7 @@ bool Unique::get(TABLE *table)
|
||||
goto err;
|
||||
if (merge_buffers(&sort_param, &file, outfile, sort_buffer, file_ptr,
|
||||
file_ptr, file_ptr+maxbuffer,0))
|
||||
goto err;
|
||||
goto err;
|
||||
error=0;
|
||||
err:
|
||||
x_free((gptr) sort_buffer);
|
||||
|
@ -174,6 +174,10 @@ int _mi_ft_cmp(MI_INFO *info, uint keynr, const byte *rec1, const byte *rec2)
|
||||
FT_SEG_ITERATOR ftsi1, ftsi2;
|
||||
CHARSET_INFO *cs=info->s->keyinfo[keynr].seg->charset;
|
||||
DBUG_ENTER("_mi_ft_cmp");
|
||||
#ifndef MYSQL_HAS_TRUE_CTYPE_IMPLEMENTATION
|
||||
if (cs->mbmaxlen > 1)
|
||||
DBUG_RETURN(THOSE_TWO_DAMN_KEYS_ARE_REALLY_DIFFERENT);
|
||||
#endif
|
||||
|
||||
_mi_ft_segiterator_init(info, keynr, rec1, &ftsi1);
|
||||
_mi_ft_segiterator_init(info, keynr, rec2, &ftsi2);
|
||||
|
@ -276,7 +276,8 @@ static int d_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
|
||||
if (subkeys == -1)
|
||||
{
|
||||
/* the last entry in sub-tree */
|
||||
_mi_dispose(info, keyinfo, root,DFLT_INIT_HITS);
|
||||
if (_mi_dispose(info, keyinfo, root,DFLT_INIT_HITS))
|
||||
DBUG_RETURN(-1);
|
||||
/* fall through to normal delete */
|
||||
}
|
||||
else
|
||||
|
@ -1159,7 +1159,7 @@ static int compare_huff_elements(void *not_used __attribute__((unused)),
|
||||
static void check_counts(HUFF_COUNTS *huff_counts, uint trees,
|
||||
my_off_t records)
|
||||
{
|
||||
uint space_fields,fill_zero_fields,field_count[(int) FIELD_VARCHAR+1];
|
||||
uint space_fields,fill_zero_fields,field_count[(int) FIELD_enum_val_count];
|
||||
my_off_t old_length,new_length,length;
|
||||
DBUG_ENTER("check_counts");
|
||||
|
||||
|
@ -376,7 +376,10 @@ pthread_handler_t thr_find_all_keys(void *arg)
|
||||
{
|
||||
if (my_init_dynamic_array(&info->buffpek, sizeof(BUFFPEK),
|
||||
maxbuffer, maxbuffer/2))
|
||||
{
|
||||
my_free((gptr) sort_keys,MYF(0));
|
||||
sort_keys= (uchar **) NULL; /* for err: label */
|
||||
}
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
@ -125,6 +125,18 @@ public:
|
||||
*/
|
||||
void setErrorCode(int code);
|
||||
|
||||
/**
|
||||
* Returns the error string.
|
||||
*/
|
||||
char* getErrorStr();
|
||||
|
||||
/**
|
||||
* Sets the error string.
|
||||
*
|
||||
* @param str the error string.
|
||||
*/
|
||||
void setErrorStr(char* str);
|
||||
|
||||
/**
|
||||
* Parse logstring parameters
|
||||
*
|
||||
@ -195,6 +207,7 @@ private:
|
||||
|
||||
const char* m_pDateTimeFormat;
|
||||
int m_errorCode;
|
||||
char* m_errorStr;
|
||||
|
||||
// for handling repeated messages
|
||||
unsigned m_count_repeated_messages;
|
||||
|
@ -178,8 +178,11 @@ public:
|
||||
* Add a new handler
|
||||
*
|
||||
* @param logstring string describing the handler to add
|
||||
* @param err OS errno in event of error
|
||||
* @param len max length of errStr buffer
|
||||
* @param errStr logger error string in event of error
|
||||
*/
|
||||
bool addHandler(const BaseString &logstring);
|
||||
bool addHandler(const BaseString &logstring, int *err, int len, char* errStr);
|
||||
|
||||
/**
|
||||
* Remove a log handler.
|
||||
|
@ -992,6 +992,22 @@ extern "C" {
|
||||
int ndb_mgm_alloc_nodeid(NdbMgmHandle handle,
|
||||
unsigned version, int nodetype);
|
||||
|
||||
/**
|
||||
* End Session
|
||||
*
|
||||
* This function tells the mgm server to free all resources associated with
|
||||
* this connection. It will also close it.
|
||||
*
|
||||
* This differs from just disconnecting as we now synchronously clean up,
|
||||
* so that a quickly restarting server that needs the same node id can
|
||||
* get it when it restarts.
|
||||
*
|
||||
* @param handle NDB management handle
|
||||
* @return 0 on success
|
||||
*
|
||||
* @note you still have to destroy the NdbMgmHandle.
|
||||
*/
|
||||
int ndb_mgm_end_session(NdbMgmHandle handle);
|
||||
|
||||
/**
|
||||
* Get the node id of the mgm server we're connected to
|
||||
|
@ -78,6 +78,7 @@ public:
|
||||
const char *get_connectstring(char *buf, int buf_sz) const;
|
||||
NdbMgmHandle get_mgmHandle() { return m_handle; };
|
||||
NdbMgmHandle* get_mgmHandlePtr() { return &m_handle; };
|
||||
void end_session(bool end) { m_end_session= end; };
|
||||
|
||||
Uint32 get_configuration_nodeid() const;
|
||||
private:
|
||||
@ -92,6 +93,8 @@ private:
|
||||
void setError(ErrorType, const char * errorMsg);
|
||||
|
||||
Uint32 _ownNodeId;
|
||||
bool m_end_session;
|
||||
|
||||
/*
|
||||
Uint32 m_mgmd_port;
|
||||
const char *m_mgmd_host;
|
||||
|
@ -187,6 +187,7 @@ FileLogHandler::setParam(const BaseString ¶m, const BaseString &value){
|
||||
return setMaxSize(value);
|
||||
if(param == "maxfiles")
|
||||
return setMaxFiles(value);
|
||||
setErrorStr("Invalid parameter");
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -196,16 +197,18 @@ FileLogHandler::setFilename(const BaseString &filename) {
|
||||
if(m_pLogFile)
|
||||
delete m_pLogFile;
|
||||
m_pLogFile = new File_class(filename.c_str(), "a+");
|
||||
open();
|
||||
return true;
|
||||
return open();
|
||||
}
|
||||
|
||||
bool
|
||||
FileLogHandler::setMaxSize(const BaseString &size) {
|
||||
char *end;
|
||||
long val = strtol(size.c_str(), &end, 0); /* XXX */
|
||||
if(size.c_str() == end)
|
||||
if(size.c_str() == end || val < 0)
|
||||
{
|
||||
setErrorStr("Invalid file size");
|
||||
return false;
|
||||
}
|
||||
if(end[0] == 'M')
|
||||
val *= 1024*1024;
|
||||
if(end[0] == 'k')
|
||||
@ -220,8 +223,11 @@ bool
|
||||
FileLogHandler::setMaxFiles(const BaseString &files) {
|
||||
char *end;
|
||||
long val = strtol(files.c_str(), &end, 0);
|
||||
if(files.c_str() == end)
|
||||
if(files.c_str() == end || val < 1)
|
||||
{
|
||||
setErrorStr("Invalid maximum number of files");
|
||||
return false;
|
||||
}
|
||||
m_maxNoFiles = val;
|
||||
|
||||
return true;
|
||||
@ -230,6 +236,9 @@ FileLogHandler::setMaxFiles(const BaseString &files) {
|
||||
bool
|
||||
FileLogHandler::checkParams() {
|
||||
if(m_pLogFile == NULL)
|
||||
{
|
||||
setErrorStr("Log file cannot be null.");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -23,7 +23,8 @@
|
||||
//
|
||||
LogHandler::LogHandler() :
|
||||
m_pDateTimeFormat("%d-%.2d-%.2d %.2d:%.2d:%.2d"),
|
||||
m_errorCode(0)
|
||||
m_errorCode(0),
|
||||
m_errorStr(NULL)
|
||||
{
|
||||
m_max_repeat_frequency= 3; // repeat messages maximum every 3 seconds
|
||||
m_count_repeated_messages= 0;
|
||||
@ -155,6 +156,19 @@ LogHandler::setErrorCode(int code)
|
||||
m_errorCode = code;
|
||||
}
|
||||
|
||||
|
||||
char*
|
||||
LogHandler::getErrorStr()
|
||||
{
|
||||
return m_errorStr;
|
||||
}
|
||||
|
||||
void
|
||||
LogHandler::setErrorStr(char* str)
|
||||
{
|
||||
m_errorStr= str;
|
||||
}
|
||||
|
||||
bool
|
||||
LogHandler::parseParams(const BaseString &_params) {
|
||||
Vector<BaseString> v_args;
|
||||
@ -165,9 +179,18 @@ LogHandler::parseParams(const BaseString &_params) {
|
||||
for(size_t i=0; i < v_args.size(); i++) {
|
||||
Vector<BaseString> v_param_value;
|
||||
if(v_args[i].split(v_param_value, "=", 2) != 2)
|
||||
{
|
||||
ret = false;
|
||||
else if (!setParam(v_param_value[0], v_param_value[1]))
|
||||
ret = false;
|
||||
setErrorStr("Can't find key=value pair.");
|
||||
}
|
||||
else
|
||||
{
|
||||
v_param_value[0].trim(" \t");
|
||||
if (!setParam(v_param_value[0], v_param_value[1]))
|
||||
{
|
||||
ret = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(!checkParams())
|
||||
|
@ -167,7 +167,7 @@ Logger::addHandler(LogHandler* pHandler)
|
||||
}
|
||||
|
||||
bool
|
||||
Logger::addHandler(const BaseString &logstring) {
|
||||
Logger::addHandler(const BaseString &logstring, int *err, int len, char* errStr) {
|
||||
size_t i;
|
||||
Vector<BaseString> logdest;
|
||||
Vector<LogHandler *>loghandlers;
|
||||
@ -200,9 +200,18 @@ Logger::addHandler(const BaseString &logstring) {
|
||||
handler = new ConsoleLogHandler();
|
||||
|
||||
if(handler == NULL)
|
||||
{
|
||||
snprintf(errStr,len,"Could not create log destination: %s",
|
||||
logdest[i].c_str());
|
||||
DBUG_RETURN(false);
|
||||
}
|
||||
if(!handler->parseParams(params))
|
||||
{
|
||||
*err= handler->getErrorCode();
|
||||
if(handler->getErrorStr())
|
||||
strncpy(errStr, handler->getErrorStr(), len);
|
||||
DBUG_RETURN(false);
|
||||
}
|
||||
loghandlers.push_back(handler);
|
||||
}
|
||||
|
||||
|
@ -154,5 +154,6 @@ SysLogHandler::setFacility(const BaseString &facility) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
setErrorStr("Invalid syslog facility name");
|
||||
return false;
|
||||
}
|
||||
|
@ -52,6 +52,7 @@ ConfigRetriever::ConfigRetriever(const char * _connect_string,
|
||||
m_version = version;
|
||||
m_node_type = node_type;
|
||||
_ownNodeId= 0;
|
||||
m_end_session= true;
|
||||
|
||||
m_handle= ndb_mgm_create_handle();
|
||||
|
||||
@ -76,6 +77,8 @@ ConfigRetriever::~ConfigRetriever()
|
||||
{
|
||||
DBUG_ENTER("ConfigRetriever::~ConfigRetriever");
|
||||
if (m_handle) {
|
||||
if(m_end_session)
|
||||
ndb_mgm_end_session(m_handle);
|
||||
ndb_mgm_disconnect(m_handle);
|
||||
ndb_mgm_destroy_handle(&m_handle);
|
||||
}
|
||||
|
@ -42,6 +42,8 @@ SocketServer::~SocketServer() {
|
||||
delete m_sessions[i].m_session;
|
||||
}
|
||||
for(i = 0; i<m_services.size(); i++){
|
||||
if(m_services[i].m_socket)
|
||||
NDB_CLOSE_SOCKET(m_services[i].m_socket);
|
||||
delete m_services[i].m_service;
|
||||
}
|
||||
}
|
||||
|
@ -307,8 +307,11 @@ int main(int argc, char** argv)
|
||||
/**
|
||||
* We no longer need the mgm connection in this process
|
||||
* (as we are the angel, not ndb)
|
||||
*
|
||||
* We don't want to purge any allocated resources (nodeid), so
|
||||
* we set that option to false
|
||||
*/
|
||||
theConfig->closeConfiguration();
|
||||
theConfig->closeConfiguration(false);
|
||||
|
||||
int status = 0, error_exit = 0, signum = 0;
|
||||
while(waitpid(child, &status, 0) != child);
|
||||
|
@ -172,7 +172,8 @@ Configuration::~Configuration(){
|
||||
}
|
||||
|
||||
void
|
||||
Configuration::closeConfiguration(){
|
||||
Configuration::closeConfiguration(bool end_session){
|
||||
m_config_retriever->end_session(end_session);
|
||||
if (m_config_retriever) {
|
||||
delete m_config_retriever;
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ public:
|
||||
|
||||
void fetch_configuration();
|
||||
void setupConfiguration();
|
||||
void closeConfiguration();
|
||||
void closeConfiguration(bool end_session= true);
|
||||
|
||||
bool lockPagesInMainMemory() const;
|
||||
|
||||
|
@ -2321,4 +2321,23 @@ int ndb_mgm_report_event(NdbMgmHandle handle, Uint32 *data, Uint32 length)
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
extern "C"
|
||||
int ndb_mgm_end_session(NdbMgmHandle handle)
|
||||
{
|
||||
DBUG_ENTER("ndb_mgm_end_session");
|
||||
CHECK_HANDLE(handle, 0);
|
||||
CHECK_CONNECTED(handle, 0);
|
||||
|
||||
SocketOutputStream s_output(handle->socket);
|
||||
s_output.println("end session");
|
||||
s_output.println("");
|
||||
|
||||
SocketInputStream in(handle->socket, handle->read_timeout);
|
||||
char buf[32];
|
||||
|
||||
in.gets(buf, sizeof(buf));
|
||||
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
template class Vector<const ParserRow<ParserDummy>*>;
|
||||
|
@ -928,26 +928,21 @@ CommandInterpreter::executeShutdown(char* parameters)
|
||||
int result = 0;
|
||||
result = ndb_mgm_stop(m_mgmsrv, 0, 0);
|
||||
if (result < 0) {
|
||||
ndbout << "Shutdown off NDB Cluster storage node(s) failed." << endl;
|
||||
ndbout << "Shutdown of NDB Cluster node(s) failed." << endl;
|
||||
printError();
|
||||
return result;
|
||||
}
|
||||
|
||||
ndbout << result << " NDB Cluster storage node(s) have shutdown." << endl;
|
||||
ndbout << result << " NDB Cluster node(s) have shutdown." << endl;
|
||||
|
||||
int mgm_id= 0;
|
||||
for(int i=0; i < state->no_of_nodes; i++) {
|
||||
if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_MGM &&
|
||||
state->node_states[i].version != 0){
|
||||
if (mgm_id == 0)
|
||||
mgm_id= state->node_states[i].node_id;
|
||||
else {
|
||||
ndbout << "Unable to locate management server, "
|
||||
<< "shutdown manually with <id> STOP"
|
||||
<< endl;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
mgm_id= ndb_mgm_get_mgmd_nodeid(m_mgmsrv);
|
||||
if (mgm_id == 0)
|
||||
{
|
||||
ndbout << "Unable to locate management server, "
|
||||
<< "shutdown manually with <id> STOP"
|
||||
<< endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
result = ndb_mgm_stop(m_mgmsrv, 1, &mgm_id);
|
||||
|
@ -60,6 +60,9 @@
|
||||
|
||||
#include <SignalSender.hpp>
|
||||
|
||||
extern bool g_StopServer;
|
||||
extern bool g_RestartServer;
|
||||
|
||||
//#define MGM_SRV_DEBUG
|
||||
#ifdef MGM_SRV_DEBUG
|
||||
#define DEBUG(x) do ndbout << x << endl; while(0)
|
||||
@ -179,6 +182,8 @@ MgmtSrvr::startEventLog()
|
||||
}
|
||||
|
||||
const char * tmp;
|
||||
char errStr[100];
|
||||
int err= 0;
|
||||
BaseString logdest;
|
||||
char *clusterLog= NdbConfig_ClusterLogFileName(_ownNodeId);
|
||||
NdbAutoPtr<char> tmp_aptr(clusterLog);
|
||||
@ -192,9 +197,17 @@ MgmtSrvr::startEventLog()
|
||||
logdest.assfmt("FILE:filename=%s,maxsize=1000000,maxfiles=6",
|
||||
clusterLog);
|
||||
}
|
||||
if(!g_eventLogger.addHandler(logdest)) {
|
||||
errStr[0]='\0';
|
||||
if(!g_eventLogger.addHandler(logdest, &err, sizeof(errStr), errStr)) {
|
||||
ndbout << "Warning: could not add log destination \""
|
||||
<< logdest.c_str() << "\"" << endl;
|
||||
<< logdest.c_str() << "\". Reason: ";
|
||||
if(err)
|
||||
ndbout << strerror(err);
|
||||
if(err && errStr[0]!='\0')
|
||||
ndbout << ", ";
|
||||
if(errStr[0]!='\0')
|
||||
ndbout << errStr;
|
||||
ndbout << endl;
|
||||
}
|
||||
}
|
||||
|
||||
@ -373,7 +386,8 @@ MgmtSrvr::MgmtSrvr(SocketServer *socket_server,
|
||||
_ownReference(0),
|
||||
theSignalIdleList(NULL),
|
||||
theWaitState(WAIT_SUBSCRIBE_CONF),
|
||||
m_event_listner(this)
|
||||
m_event_listner(this),
|
||||
m_local_mgm_handle(0)
|
||||
{
|
||||
|
||||
DBUG_ENTER("MgmtSrvr::MgmtSrvr");
|
||||
@ -537,6 +551,8 @@ MgmtSrvr::check_start()
|
||||
bool
|
||||
MgmtSrvr::start(BaseString &error_string)
|
||||
{
|
||||
int mgm_connect_result;
|
||||
|
||||
DBUG_ENTER("MgmtSrvr::start");
|
||||
if (_props == NULL) {
|
||||
if (!check_start()) {
|
||||
@ -574,6 +590,13 @@ MgmtSrvr::start(BaseString &error_string)
|
||||
DBUG_RETURN(false);
|
||||
}
|
||||
|
||||
if((mgm_connect_result= connect_to_self()) < 0)
|
||||
{
|
||||
ndbout_c("Unable to connect to our own ndb_mgmd (Error %d)",
|
||||
mgm_connect_result);
|
||||
ndbout_c("This is probably a bug.");
|
||||
}
|
||||
|
||||
TransporterRegistry *reg = theFacade->get_registry();
|
||||
for(unsigned int i=0;i<reg->m_transporter_interface.size();i++) {
|
||||
BaseString msg;
|
||||
@ -831,9 +854,81 @@ MgmtSrvr::sendVersionReq(int v_nodeId, Uint32 &version, const char **address)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int MgmtSrvr::sendStopMgmd(NodeId nodeId,
|
||||
bool abort,
|
||||
bool stop,
|
||||
bool restart,
|
||||
bool nostart,
|
||||
bool initialStart)
|
||||
{
|
||||
const char* hostname;
|
||||
Uint32 port;
|
||||
BaseString connect_string;
|
||||
|
||||
{
|
||||
Guard g(m_configMutex);
|
||||
{
|
||||
ndb_mgm_configuration_iterator
|
||||
iter(* _config->m_configValues, CFG_SECTION_NODE);
|
||||
|
||||
if(iter.first()) return SEND_OR_RECEIVE_FAILED;
|
||||
if(iter.find(CFG_NODE_ID, nodeId)) return SEND_OR_RECEIVE_FAILED;
|
||||
if(iter.get(CFG_NODE_HOST, &hostname)) return SEND_OR_RECEIVE_FAILED;
|
||||
}
|
||||
{
|
||||
ndb_mgm_configuration_iterator
|
||||
iter(* _config->m_configValues, CFG_SECTION_NODE);
|
||||
|
||||
if(iter.first()) return SEND_OR_RECEIVE_FAILED;
|
||||
if(iter.find(CFG_NODE_ID, nodeId)) return SEND_OR_RECEIVE_FAILED;
|
||||
if(iter.get(CFG_MGM_PORT, &port)) return SEND_OR_RECEIVE_FAILED;
|
||||
}
|
||||
if( strlen(hostname) == 0 )
|
||||
return SEND_OR_RECEIVE_FAILED;
|
||||
}
|
||||
connect_string.assfmt("%s:%u",hostname,port);
|
||||
|
||||
DBUG_PRINT("info",("connect string: %s",connect_string.c_str()));
|
||||
|
||||
NdbMgmHandle h= ndb_mgm_create_handle();
|
||||
if ( h && connect_string.length() > 0 )
|
||||
{
|
||||
ndb_mgm_set_connectstring(h,connect_string.c_str());
|
||||
if(ndb_mgm_connect(h,1,0,0))
|
||||
{
|
||||
DBUG_PRINT("info",("failed ndb_mgm_connect"));
|
||||
return SEND_OR_RECEIVE_FAILED;
|
||||
}
|
||||
if(!restart)
|
||||
{
|
||||
if(ndb_mgm_stop(h, 1, (const int*)&nodeId) < 0)
|
||||
{
|
||||
return SEND_OR_RECEIVE_FAILED;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
int nodes[1];
|
||||
nodes[0]= (int)nodeId;
|
||||
if(ndb_mgm_restart2(h, 1, nodes, initialStart, nostart, abort) < 0)
|
||||
{
|
||||
return SEND_OR_RECEIVE_FAILED;
|
||||
}
|
||||
}
|
||||
}
|
||||
ndb_mgm_destroy_handle(&h);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Common method for handeling all STOP_REQ signalling that
|
||||
* is used by Stopping, Restarting and Single user commands
|
||||
*
|
||||
* In the event that we need to stop a mgmd, we create a mgm
|
||||
* client connection to that mgmd and stop it that way.
|
||||
* This allows us to stop mgm servers when there isn't any real
|
||||
* distributed communication up.
|
||||
*/
|
||||
|
||||
int MgmtSrvr::sendSTOP_REQ(NodeId nodeId,
|
||||
@ -845,6 +940,8 @@ int MgmtSrvr::sendSTOP_REQ(NodeId nodeId,
|
||||
bool nostart,
|
||||
bool initialStart)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
stoppedNodes.clear();
|
||||
|
||||
SignalSender ss(theFacade);
|
||||
@ -883,18 +980,34 @@ int MgmtSrvr::sendSTOP_REQ(NodeId nodeId,
|
||||
NodeBitmask nodes;
|
||||
if (nodeId)
|
||||
{
|
||||
if(nodeId==getOwnNodeId())
|
||||
{
|
||||
if(restart)
|
||||
g_RestartServer= true;
|
||||
g_StopServer= true;
|
||||
return 0;
|
||||
}
|
||||
if(getNodeType(nodeId) == NDB_MGM_NODE_TYPE_NDB)
|
||||
{
|
||||
int r;
|
||||
if((r = okToSendTo(nodeId, true)) != 0)
|
||||
return r;
|
||||
}
|
||||
{
|
||||
if((r= okToSendTo(nodeId, true)) != 0)
|
||||
return r;
|
||||
if (ss.sendSignal(nodeId, &ssig) != SEND_OK)
|
||||
return SEND_OR_RECEIVE_FAILED;
|
||||
}
|
||||
else if(getNodeType(nodeId) == NDB_MGM_NODE_TYPE_MGM)
|
||||
{
|
||||
error= sendStopMgmd(nodeId, abort, stop, restart, nostart, initialStart);
|
||||
if(error==0)
|
||||
stoppedNodes.set(nodeId);
|
||||
return error;
|
||||
}
|
||||
else
|
||||
return WRONG_PROCESS_TYPE;
|
||||
nodes.set(nodeId);
|
||||
}
|
||||
else
|
||||
{
|
||||
while(getNextNodeId(&nodeId, NDB_MGM_NODE_TYPE_NDB))
|
||||
{
|
||||
if(okToSendTo(nodeId, true) == 0)
|
||||
@ -904,9 +1017,17 @@ int MgmtSrvr::sendSTOP_REQ(NodeId nodeId,
|
||||
nodes.set(nodeId);
|
||||
}
|
||||
}
|
||||
nodeId= 0;
|
||||
while(getNextNodeId(&nodeId, NDB_MGM_NODE_TYPE_MGM))
|
||||
{
|
||||
if(nodeId==getOwnNodeId())
|
||||
continue;
|
||||
if(sendStopMgmd(nodeId, abort, stop, restart, nostart, initialStart)==0)
|
||||
stoppedNodes.set(nodeId);
|
||||
}
|
||||
}
|
||||
|
||||
// now wait for the replies
|
||||
int error = 0;
|
||||
while (!nodes.isclear())
|
||||
{
|
||||
SimpleSignal *signal = ss.waitFor();
|
||||
@ -2552,9 +2673,23 @@ void MgmtSrvr::transporter_connect(NDB_SOCKET_TYPE sockfd)
|
||||
}
|
||||
}
|
||||
|
||||
int MgmtSrvr::set_connect_string(const char *str)
|
||||
int MgmtSrvr::connect_to_self(void)
|
||||
{
|
||||
return ndb_mgm_set_connectstring(m_config_retriever->get_mgmHandle(),str);
|
||||
int r= 0;
|
||||
m_local_mgm_handle= ndb_mgm_create_handle();
|
||||
snprintf(m_local_mgm_connect_string,sizeof(m_local_mgm_connect_string),
|
||||
"localhost:%u",getPort());
|
||||
ndb_mgm_set_connectstring(m_local_mgm_handle, m_local_mgm_connect_string);
|
||||
|
||||
if((r= ndb_mgm_connect(m_local_mgm_handle, 0, 0, 0)) < 0)
|
||||
{
|
||||
ndb_mgm_destroy_handle(&m_local_mgm_handle);
|
||||
return r;
|
||||
}
|
||||
// TransporterRegistry now owns this NdbMgmHandle and will destroy it.
|
||||
theFacade->get_registry()->set_mgm_handle(m_local_mgm_handle);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
@ -466,7 +466,7 @@ public:
|
||||
int getConnectionDbParameter(int node1, int node2, int param,
|
||||
int *value, BaseString& msg);
|
||||
|
||||
int set_connect_string(const char *str);
|
||||
int connect_to_self(void);
|
||||
|
||||
void transporter_connect(NDB_SOCKET_TYPE sockfd);
|
||||
|
||||
@ -482,6 +482,13 @@ private:
|
||||
|
||||
int send(SignalSender &ss, SimpleSignal &ssig, Uint32 node, Uint32 node_type);
|
||||
|
||||
int sendStopMgmd(NodeId nodeId,
|
||||
bool abort,
|
||||
bool stop,
|
||||
bool restart,
|
||||
bool nostart,
|
||||
bool initialStart);
|
||||
|
||||
int sendSTOP_REQ(NodeId nodeId,
|
||||
NodeBitmask &stoppedNodes,
|
||||
Uint32 singleUserNodeId,
|
||||
@ -625,6 +632,8 @@ private:
|
||||
// signal arrives.
|
||||
// We wait in receiveOptimisedResponse and signal in handleReceivedSignal.
|
||||
|
||||
NdbMgmHandle m_local_mgm_handle;
|
||||
char m_local_mgm_connect_string[20];
|
||||
class TransporterFacade * theFacade;
|
||||
|
||||
int sendVersionReq( int processId, Uint32 &version, const char **address);
|
||||
|
@ -203,6 +203,8 @@ ParserRow<MgmApiSession> commands[] = {
|
||||
|
||||
MGM_CMD("bye", &MgmApiSession::bye, ""),
|
||||
|
||||
MGM_CMD("end session", &MgmApiSession::endSession, ""),
|
||||
|
||||
MGM_CMD("set loglevel", &MgmApiSession::setLogLevel, ""),
|
||||
MGM_ARG("node", Int, Mandatory, "Node"),
|
||||
MGM_ARG("category", Int, Mandatory, "Event category"),
|
||||
@ -719,10 +721,21 @@ MgmApiSession::dumpState(Parser<MgmApiSession>::Context &,
|
||||
|
||||
void
|
||||
MgmApiSession::bye(Parser<MgmApiSession>::Context &,
|
||||
Properties const &) {
|
||||
Properties const &) {
|
||||
m_stop = true;
|
||||
}
|
||||
|
||||
void
|
||||
MgmApiSession::endSession(Parser<MgmApiSession>::Context &,
|
||||
Properties const &) {
|
||||
if(m_allocated_resources)
|
||||
delete m_allocated_resources;
|
||||
|
||||
m_allocated_resources= new MgmtSrvr::Allocated_resources(m_mgmsrv);
|
||||
|
||||
m_output->println("end session reply");
|
||||
}
|
||||
|
||||
void
|
||||
MgmApiSession::setClusterLogLevel(Parser<MgmApiSession>::Context &,
|
||||
Properties const &args) {
|
||||
|
@ -79,6 +79,7 @@ public:
|
||||
void start(Parser_t::Context &ctx, const class Properties &args);
|
||||
void startAll(Parser_t::Context &ctx, const class Properties &args);
|
||||
void bye(Parser_t::Context &ctx, const class Properties &args);
|
||||
void endSession(Parser_t::Context &ctx, const class Properties &args);
|
||||
void setLogLevel(Parser_t::Context &ctx, const class Properties &args);
|
||||
void setClusterLogLevel(Parser_t::Context &ctx,
|
||||
const class Properties &args);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user