Merge mysql.com:/usr/local/home/marty/MySQL/mysql-5.1-new
into mysql.com:/usr/local/home/marty/MySQL/mysql-5.1-new-ndb
This commit is contained in:
commit
a2b3fa5ebf
@ -116,6 +116,7 @@ valgrind_flags="$valgrind_flags -DMYSQL_SERVER_SUFFIX=-valgrind-max"
|
||||
# Used in -debug builds
|
||||
debug_cflags="-DUNIV_MUST_NOT_INLINE -DEXTRA_DEBUG -DFORCE_INIT_OF_VARS "
|
||||
debug_cflags="$debug_cflags -DSAFEMALLOC -DPEDANTIC_SAFEMALLOC -DSAFE_MUTEX"
|
||||
error_inject="--with-error-inject "
|
||||
#
|
||||
# Base C++ flags for all builds
|
||||
base_cxxflags="-felide-constructors -fno-exceptions -fno-rtti"
|
||||
|
@ -4,6 +4,6 @@ path=`dirname $0`
|
||||
. "$path/SETUP.sh" "$@" --with-debug=full
|
||||
|
||||
extra_flags="$pentium_cflags $debug_cflags"
|
||||
extra_configs="$pentium_configs $debug_configs $max_configs"
|
||||
extra_configs="$pentium_configs $debug_configs $max_configs $error_inject"
|
||||
|
||||
. "$path/FINISH.sh"
|
||||
|
@ -2241,8 +2241,10 @@ print_table_data(MYSQL_RES *result)
|
||||
MYSQL_ROW cur;
|
||||
MYSQL_FIELD *field;
|
||||
bool *num_flag;
|
||||
bool *not_null_flag;
|
||||
|
||||
num_flag=(bool*) my_alloca(sizeof(bool)*mysql_num_fields(result));
|
||||
not_null_flag=(bool*) my_alloca(sizeof(bool)*mysql_num_fields(result));
|
||||
if (info_flag)
|
||||
{
|
||||
print_field_types(result);
|
||||
@ -2260,7 +2262,7 @@ print_table_data(MYSQL_RES *result)
|
||||
length=max(length,field->max_length);
|
||||
if (length < 4 && !IS_NOT_NULL(field->flags))
|
||||
length=4; // Room for "NULL"
|
||||
field->max_length=length+1;
|
||||
field->max_length=length;
|
||||
separator.fill(separator.length()+length+2,'-');
|
||||
separator.append('+');
|
||||
}
|
||||
@ -2272,10 +2274,11 @@ print_table_data(MYSQL_RES *result)
|
||||
(void) tee_fputs("|", PAGER);
|
||||
for (uint off=0; (field = mysql_fetch_field(result)) ; off++)
|
||||
{
|
||||
tee_fprintf(PAGER, " %-*s|",(int) min(field->max_length,
|
||||
tee_fprintf(PAGER, " %-*s |",(int) min(field->max_length,
|
||||
MAX_COLUMN_LENGTH),
|
||||
field->name);
|
||||
num_flag[off]= IS_NUM(field->type);
|
||||
not_null_flag[off]= IS_NOT_NULL(field->flags);
|
||||
}
|
||||
(void) tee_fputs("\n", PAGER);
|
||||
tee_puts((char*) separator.ptr(), PAGER);
|
||||
@ -2295,7 +2298,8 @@ print_table_data(MYSQL_RES *result)
|
||||
uint visible_length;
|
||||
uint extra_padding;
|
||||
|
||||
if (lengths[off] == 0)
|
||||
/* If this column may have a null value, use "NULL" for empty. */
|
||||
if (! not_null_flag[off] && (lengths[off] == 0))
|
||||
{
|
||||
buffer= "NULL";
|
||||
data_length= 4;
|
||||
@ -2335,6 +2339,7 @@ print_table_data(MYSQL_RES *result)
|
||||
}
|
||||
tee_puts((char*) separator.ptr(), PAGER);
|
||||
my_afree((gptr) num_flag);
|
||||
my_afree((gptr) not_null_flag);
|
||||
}
|
||||
|
||||
|
||||
@ -2349,11 +2354,8 @@ tee_print_sized_data(const char *data, unsigned int data_length, unsigned int to
|
||||
unsigned int i;
|
||||
const char *p;
|
||||
|
||||
total_bytes_to_send -= 1;
|
||||
/* Off by one, perhaps mistakenly accounting for a terminating NUL. */
|
||||
|
||||
if (right_justified)
|
||||
for (i= 0; i < (total_bytes_to_send - data_length); i++)
|
||||
for (i= data_length; i < total_bytes_to_send; i++)
|
||||
tee_putc((int)' ', PAGER);
|
||||
|
||||
for (i= 0, p= data; i < data_length; i+= 1, p+= 1)
|
||||
@ -2365,7 +2367,7 @@ tee_print_sized_data(const char *data, unsigned int data_length, unsigned int to
|
||||
}
|
||||
|
||||
if (! right_justified)
|
||||
for (i= 0; i < (total_bytes_to_send - data_length); i++)
|
||||
for (i= data_length; i < total_bytes_to_send; i++)
|
||||
tee_putc((int)' ', PAGER);
|
||||
}
|
||||
|
||||
|
@ -50,6 +50,7 @@
|
||||
#include "mysql.h"
|
||||
#include "mysql_version.h"
|
||||
#include "mysqld_error.h"
|
||||
#include "sql/ha_ndbcluster_tables.h"
|
||||
|
||||
/* Exit codes */
|
||||
|
||||
@ -134,7 +135,6 @@ static CHARSET_INFO *charset_info= &my_charset_latin1;
|
||||
const char *default_dbug_option="d:t:o,/tmp/mysqldump.trace";
|
||||
/* do we met VIEWs during tables scaning */
|
||||
my_bool was_views= 0;
|
||||
|
||||
const char *compatible_mode_names[]=
|
||||
{
|
||||
"MYSQL323", "MYSQL40", "POSTGRESQL", "ORACLE", "MSSQL", "DB2",
|
||||
@ -2937,6 +2937,8 @@ static int dump_all_tables_in_db(char *database)
|
||||
afterdot= strmov(hash_key, database);
|
||||
*afterdot++= '.';
|
||||
|
||||
if (!strcmp(database, NDB_REP_DB)) /* Skip cluster internal database */
|
||||
return 0;
|
||||
if (init_dumping(database))
|
||||
return 1;
|
||||
if (opt_xml)
|
||||
|
@ -361,7 +361,8 @@ AC_CACHE_VAL(mysql_cv_termcap_lib,
|
||||
[AC_CHECK_LIB(ncurses, tgetent, mysql_cv_termcap_lib=libncurses,
|
||||
[AC_CHECK_LIB(curses, tgetent, mysql_cv_termcap_lib=libcurses,
|
||||
[AC_CHECK_LIB(termcap, tgetent, mysql_cv_termcap_lib=libtermcap,
|
||||
mysql_cv_termcap_lib=NOT_FOUND)])])])
|
||||
[AC_CHECK_LIB(tinfo, tgetent, mysql_cv_termcap_lib=libtinfo,
|
||||
mysql_cv_termcap_lib=NOT_FOUND)])])])])
|
||||
AC_MSG_CHECKING(for termcap functions library)
|
||||
if test "$mysql_cv_termcap_lib" = "NOT_FOUND"; then
|
||||
AC_MSG_ERROR([No curses/termcap library found])
|
||||
@ -369,6 +370,8 @@ elif test "$mysql_cv_termcap_lib" = "libtermcap"; then
|
||||
TERMCAP_LIB=-ltermcap
|
||||
elif test "$mysql_cv_termcap_lib" = "libncurses"; then
|
||||
TERMCAP_LIB=-lncurses
|
||||
elif test "$mysql_cv_termcap_lib" = "libtinfo"; then
|
||||
TERMCAP_LIB=-ltinfo
|
||||
else
|
||||
TERMCAP_LIB=-lcurses
|
||||
fi
|
||||
|
21
configure.in
21
configure.in
@ -666,6 +666,7 @@ else
|
||||
AC_MSG_RESULT([no])
|
||||
fi
|
||||
|
||||
|
||||
MYSQL_SYS_LARGEFILE
|
||||
|
||||
# Types that must be checked AFTER large file support is checked
|
||||
@ -774,6 +775,9 @@ AC_SUBST(WRAPLIBS)
|
||||
if test "$TARGET_LINUX" = "true"; then
|
||||
AC_MSG_CHECKING([for atomic operations])
|
||||
|
||||
AC_LANG_SAVE
|
||||
AC_LANG_CPLUSPLUS
|
||||
|
||||
atom_ops=
|
||||
AC_TRY_RUN([
|
||||
#include <asm/atomic.h>
|
||||
@ -809,6 +813,8 @@ int main()
|
||||
if test -z "$atom_ops"; then atom_ops="no"; fi
|
||||
AC_MSG_RESULT($atom_ops)
|
||||
|
||||
AC_LANG_RESTORE
|
||||
|
||||
AC_ARG_WITH(pstack,
|
||||
[ --with-pstack Use the pstack backtrace library],
|
||||
[ USE_PSTACK=$withval ],
|
||||
@ -1608,6 +1614,21 @@ else
|
||||
CXXFLAGS="$OPTIMIZE_CXXFLAGS -DDBUG_OFF $CXXFLAGS"
|
||||
fi
|
||||
|
||||
# If we should allow error injection tests
|
||||
AC_ARG_WITH(error-inject,
|
||||
[ --with-error-inject Enable error injection in MySQL Server],
|
||||
[ with_error_inject=$withval ],
|
||||
[ with_error_inject=no ])
|
||||
|
||||
if test $with_debug != "no"
|
||||
then
|
||||
if test "$with_error_inject" = "yes"
|
||||
then
|
||||
AC_DEFINE([ERROR_INJECT_SUPPORT], [1],
|
||||
[Enable error injection in MySQL Server])
|
||||
fi
|
||||
fi
|
||||
|
||||
AC_ARG_WITH([fast-mutexes],
|
||||
AC_HELP_STRING([--with-fast-mutexes],
|
||||
[Compile with fast mutexes (default is disabled)]),
|
||||
|
@ -559,7 +559,7 @@ extern File my_register_filename(File fd, const char *FileName,
|
||||
enum file_type type_of_file,
|
||||
uint error_message_number, myf MyFlags);
|
||||
extern File my_create(const char *FileName,int CreateFlags,
|
||||
int AccsesFlags, myf MyFlags);
|
||||
int AccessFlags, myf MyFlags);
|
||||
extern int my_close(File Filedes,myf MyFlags);
|
||||
extern File my_dup(File file, myf MyFlags);
|
||||
extern int my_mkdir(const char *dir, int Flags, myf MyFlags);
|
||||
|
@ -43,6 +43,8 @@ dist-hook:
|
||||
$(distdir)/r \
|
||||
$(distdir)/include \
|
||||
$(distdir)/std_data \
|
||||
$(distdir)/std_data/ndb_backup50 \
|
||||
$(distdir)/std_data/ndb_backup51 \
|
||||
$(distdir)/lib
|
||||
-$(INSTALL_DATA) $(srcdir)/t/*.def $(distdir)/t
|
||||
$(INSTALL_DATA) $(srcdir)/t/*.test $(distdir)/t
|
||||
@ -63,6 +65,8 @@ dist-hook:
|
||||
$(INSTALL_DATA) $(srcdir)/std_data/*.pem $(distdir)/std_data
|
||||
$(INSTALL_DATA) $(srcdir)/std_data/*.frm $(distdir)/std_data
|
||||
$(INSTALL_DATA) $(srcdir)/std_data/*.cnf $(distdir)/std_data
|
||||
$(INSTALL_DATA) $(srcdir)/std_data/ndb_backup50/BACKUP* $(distdir)/std_data/ndb_backup50
|
||||
$(INSTALL_DATA) $(srcdir)/std_data/ndb_backup51/BACKUP* $(distdir)/std_data/ndb_backup51
|
||||
$(INSTALL_DATA) $(srcdir)/lib/init_db.sql $(distdir)/lib
|
||||
$(INSTALL_DATA) $(srcdir)/lib/*.pl $(distdir)/lib
|
||||
|
||||
@ -74,6 +78,8 @@ install-data-local:
|
||||
$(DESTDIR)$(testdir)/r \
|
||||
$(DESTDIR)$(testdir)/include \
|
||||
$(DESTDIR)$(testdir)/std_data \
|
||||
$(DESTDIR)$(testdir)/std_data/ndb_backup50 \
|
||||
$(DESTDIR)$(testdir)/std_data/ndb_backup51 \
|
||||
$(DESTDIR)$(testdir)/lib
|
||||
$(INSTALL_DATA) $(srcdir)/README $(DESTDIR)$(testdir)
|
||||
-$(INSTALL_DATA) $(srcdir)/t/*.def $(DESTDIR)$(testdir)/t
|
||||
@ -98,6 +104,8 @@ install-data-local:
|
||||
$(INSTALL_DATA) $(srcdir)/std_data/*.pem $(DESTDIR)$(testdir)/std_data
|
||||
$(INSTALL_DATA) $(srcdir)/std_data/*.frm $(DESTDIR)$(testdir)/std_data
|
||||
$(INSTALL_DATA) $(srcdir)/std_data/*.cnf $(DESTDIR)$(testdir)/std_data
|
||||
$(INSTALL_DATA) $(srcdir)/std_data/ndb_backup50/BACKUP* $(DESTDIR)$(testdir)/std_data/ndb_backup50
|
||||
$(INSTALL_DATA) $(srcdir)/std_data/ndb_backup51/BACKUP* $(DESTDIR)$(testdir)/std_data/ndb_backup51
|
||||
$(INSTALL_DATA) $(srcdir)/lib/init_db.sql $(DESTDIR)$(testdir)/lib
|
||||
$(INSTALL_DATA) $(srcdir)/lib/*.pl $(DESTDIR)$(testdir)/lib
|
||||
|
||||
|
@ -1524,6 +1524,7 @@ sub ndbcluster_start ($) {
|
||||
if ( mtr_run("$glob_mysql_test_dir/ndb/ndbcluster",
|
||||
["--port=$opt_ndbcluster_port",
|
||||
"--data-dir=$opt_vardir",
|
||||
"--character-sets-dir=$path_charsetsdir",
|
||||
"--verbose=2",
|
||||
"--core"],
|
||||
"", "/dev/null", "", "") )
|
||||
|
@ -1230,7 +1230,7 @@ start_ndbcluster()
|
||||
then
|
||||
NDBCLUSTER_EXTRA_OPTS="--small"
|
||||
fi
|
||||
OPTS="$NDBCLUSTER_OPTS $NDBCLUSTER_EXTRA_OPTS --verbose=$NDB_VERBOSE --initial --relative-config-data-dir --core"
|
||||
OPTS="$NDBCLUSTER_OPTS $NDBCLUSTER_EXTRA_OPTS --character-sets-dir=$CHARSETSDIR --verbose=$NDB_VERBOSE --initial --relative-config-data-dir --core"
|
||||
if [ "x$NDB_VERBOSE" != "x0" ] ; then
|
||||
echo "Starting master ndbcluster " $OPTS
|
||||
fi
|
||||
|
@ -66,6 +66,7 @@ VERBOSE=100
|
||||
NDB_MGM_EXTRA_OPTS=
|
||||
NDB_MGMD_EXTRA_OPTS=
|
||||
NDBD_EXTRA_OPTS=
|
||||
CHARSETSDIR=
|
||||
|
||||
while test $# -gt 0; do
|
||||
case "$1" in
|
||||
@ -119,6 +120,9 @@ while test $# -gt 0; do
|
||||
--ndbd-extra-opts=*)
|
||||
NDBD_EXTRA_OPTS=`echo "$1" | sed -e "s;--ndbd-extra-opts=;;"`
|
||||
;;
|
||||
--character-sets-dir=*)
|
||||
CHARSETSDIR=`echo "$1" | sed -e "s;--character-sets-dir=;;"`
|
||||
;;
|
||||
--core)
|
||||
opt_core="--core"
|
||||
;;
|
||||
@ -159,7 +163,7 @@ fi
|
||||
|
||||
exec_mgmtclient="$exec_mgmtclient --no-defaults $opt_core $NDB_MGM_EXTRA_OPTS"
|
||||
exec_mgmtsrvr="$exec_mgmtsrvr --no-defaults $opt_core $NDB_MGMD_EXTRA_OPTS"
|
||||
exec_ndb="$exec_ndb --no-defaults $opt_core $NDBD_EXTRA_OPTS"
|
||||
exec_ndb="$exec_ndb --no-defaults $opt_core $NDBD_EXTRA_OPTS --character-sets-dir=$CHARSETSDIR"
|
||||
exec_waiter="$exec_waiter --no-defaults $opt_core"
|
||||
|
||||
ndb_host="localhost"
|
||||
|
@ -674,6 +674,18 @@ Warnings:
|
||||
Warning 1264 Out of range value for column 'Field1' at row 1
|
||||
DROP TABLE t1;
|
||||
SET NAMES latin1;
|
||||
SELECT CONVERT(103, CHAR(50) UNICODE);
|
||||
CONVERT(103, CHAR(50) UNICODE)
|
||||
103
|
||||
SELECT CONVERT(103.0, CHAR(50) UNICODE);
|
||||
CONVERT(103.0, CHAR(50) UNICODE)
|
||||
103.0
|
||||
SELECT CONVERT(-103, CHAR(50) UNICODE);
|
||||
CONVERT(-103, CHAR(50) UNICODE)
|
||||
-103
|
||||
SELECT CONVERT(-103.0, CHAR(50) UNICODE);
|
||||
CONVERT(-103.0, CHAR(50) UNICODE)
|
||||
-103.0
|
||||
CREATE TABLE t1 (
|
||||
a varchar(255) NOT NULL default '',
|
||||
KEY a (a)
|
||||
|
@ -35,3 +35,14 @@ select -1 >> 0, -1 << 0;
|
||||
select -1 >> 1, -1 << 1;
|
||||
-1 >> 1 -1 << 1
|
||||
9223372036854775807 18446744073709551614
|
||||
drop table if exists t1,t2;
|
||||
create table t1(a int);
|
||||
create table t2(a int, b int);
|
||||
insert into t1 values (1), (2), (3);
|
||||
insert into t2 values (1, 7), (3, 7);
|
||||
select t1.a, t2.a, t2.b, bit_count(t2.b) from t1 left join t2 on t1.a=t2.a;
|
||||
a a b bit_count(t2.b)
|
||||
1 1 7 3
|
||||
2 NULL NULL NULL
|
||||
3 3 7 3
|
||||
drop table t1, t2;
|
||||
|
@ -1030,3 +1030,13 @@ c res
|
||||
y,abc abc
|
||||
y,abc abc
|
||||
drop table t1;
|
||||
select cast(rtrim(' 20.06 ') as decimal(19,2));
|
||||
cast(rtrim(' 20.06 ') as decimal(19,2))
|
||||
20.06
|
||||
select cast(ltrim(' 20.06 ') as decimal(19,2));
|
||||
cast(ltrim(' 20.06 ') as decimal(19,2))
|
||||
20.06
|
||||
select cast(rtrim(ltrim(' 20.06 ')) as decimal(19,2));
|
||||
cast(rtrim(ltrim(' 20.06 ')) as decimal(19,2))
|
||||
20.06
|
||||
End of 5.0 tests
|
||||
|
@ -1306,13 +1306,13 @@ insert into t1 (a) select b from t2;
|
||||
select count(*) from t1;
|
||||
count(*)
|
||||
29267
|
||||
explain select * from t1 where c between 1 and 10000;
|
||||
explain select * from t1 where c between 1 and 2500;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range c c 5 NULL # Using where
|
||||
update t1 set c=a;
|
||||
explain select * from t1 where c between 1 and 10000;
|
||||
explain select * from t1 where c between 1 and 2500;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ALL c NULL NULL NULL # Using where
|
||||
1 SIMPLE t1 range c c 5 NULL # Using where
|
||||
drop table t1,t2;
|
||||
create table t1 (id int primary key auto_increment, fk int, index index_fk (fk)) engine=innodb;
|
||||
insert into t1 (id) values (null),(null),(null),(null),(null);
|
||||
@ -3233,15 +3233,6 @@ drop trigger t2t;
|
||||
drop trigger t3t;
|
||||
drop trigger t4t;
|
||||
drop table t1, t2, t3, t4, t5;
|
||||
create table t1(a date) engine=innodb;
|
||||
create table t2(a date, key(a)) engine=innodb;
|
||||
insert into t1 values('2005-10-01');
|
||||
insert into t2 values('2005-10-01');
|
||||
select * from t1, t2
|
||||
where t2.a between t1.a - interval 2 day and t1.a + interval 2 day;
|
||||
a a
|
||||
2005-10-01 2005-10-01
|
||||
drop table t1, t2;
|
||||
CREATE TABLE t1 (
|
||||
field1 varchar(8) NOT NULL DEFAULT '',
|
||||
field2 varchar(8) NOT NULL DEFAULT '',
|
||||
@ -3291,3 +3282,176 @@ t1 CREATE TABLE `t1` (
|
||||
UNIQUE KEY `c2` (`c2`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=latin1
|
||||
drop table t1, t2;
|
||||
create table t1(a date) engine=innodb;
|
||||
create table t2(a date, key(a)) engine=innodb;
|
||||
insert into t1 values('2005-10-01');
|
||||
insert into t2 values('2005-10-01');
|
||||
select * from t1, t2
|
||||
where t2.a between t1.a - interval 2 day and t1.a + interval 2 day;
|
||||
a a
|
||||
2005-10-01 2005-10-01
|
||||
drop table t1, t2;
|
||||
create table t1 (id int not null, f_id int not null, f int not null,
|
||||
primary key(f_id, id)) engine=innodb;
|
||||
create table t2 (id int not null,s_id int not null,s varchar(200),
|
||||
primary key(id)) engine=innodb;
|
||||
INSERT INTO t1 VALUES (8, 1, 3);
|
||||
INSERT INTO t1 VALUES (1, 2, 1);
|
||||
INSERT INTO t2 VALUES (1, 0, '');
|
||||
INSERT INTO t2 VALUES (8, 1, '');
|
||||
commit;
|
||||
DELETE ml.* FROM t1 AS ml LEFT JOIN t2 AS mm ON (mm.id=ml.id)
|
||||
WHERE mm.id IS NULL;
|
||||
select ml.* from t1 as ml left join t2 as mm on (mm.id=ml.id)
|
||||
where mm.id is null lock in share mode;
|
||||
id f_id f
|
||||
drop table t1,t2;
|
||||
create table t1(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t1 values(1,1),(2,2),(3,1),(4,2),(5,1),(6,2),(7,3);
|
||||
commit;
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
update t1 set b = 5 where b = 1;
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
select * from t1 where a = 7 and b = 3 for update;
|
||||
a b
|
||||
7 3
|
||||
commit;
|
||||
commit;
|
||||
drop table t1;
|
||||
create table t1(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t1 values(1,1),(2,2),(3,1),(4,2),(5,1),(6,2);
|
||||
commit;
|
||||
set autocommit = 0;
|
||||
select * from t1 lock in share mode;
|
||||
a b
|
||||
1 1
|
||||
2 2
|
||||
3 1
|
||||
4 2
|
||||
5 1
|
||||
6 2
|
||||
update t1 set b = 5 where b = 1;
|
||||
set autocommit = 0;
|
||||
select * from t1 where a = 2 and b = 2 for update;
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
commit;
|
||||
commit;
|
||||
drop table t1;
|
||||
create table t1(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t1 values (1,2),(5,3),(4,2);
|
||||
create table t2(d int not null, e int, primary key(d)) engine=innodb;
|
||||
insert into t2 values (8,6),(12,1),(3,1);
|
||||
commit;
|
||||
set autocommit = 0;
|
||||
select * from t2 for update;
|
||||
d e
|
||||
3 1
|
||||
8 6
|
||||
12 1
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
insert into t1 select * from t2;
|
||||
update t1 set b = (select e from t2 where a = d);
|
||||
create table t3(d int not null, e int, primary key(d)) engine=innodb
|
||||
select * from t2;
|
||||
commit;
|
||||
commit;
|
||||
drop table t1, t2, t3;
|
||||
create table t1(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t1 values (1,2),(5,3),(4,2);
|
||||
create table t2(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t2 values (8,6),(12,1),(3,1);
|
||||
create table t3(d int not null, b int, primary key(d)) engine=innodb;
|
||||
insert into t3 values (8,6),(12,1),(3,1);
|
||||
create table t5(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t5 values (1,2),(5,3),(4,2);
|
||||
create table t6(d int not null, e int, primary key(d)) engine=innodb;
|
||||
insert into t6 values (8,6),(12,1),(3,1);
|
||||
create table t8(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t8 values (1,2),(5,3),(4,2);
|
||||
create table t9(d int not null, e int, primary key(d)) engine=innodb;
|
||||
insert into t9 values (8,6),(12,1),(3,1);
|
||||
commit;
|
||||
set autocommit = 0;
|
||||
select * from t2 for update;
|
||||
a b
|
||||
3 1
|
||||
8 6
|
||||
12 1
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
|
||||
insert into t1 select * from t2;
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
|
||||
update t3 set b = (select b from t2 where a = d);
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
|
||||
create table t4(a int not null, b int, primary key(a)) engine=innodb select * from t2;
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
insert into t5 (select * from t2 lock in share mode);
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
update t6 set e = (select b from t2 where a = d lock in share mode);
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
create table t7(a int not null, b int, primary key(a)) engine=innodb select * from t2 lock in share mode;
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
insert into t8 (select * from t2 for update);
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
update t9 set e = (select b from t2 where a = d for update);
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
create table t10(a int not null, b int, primary key(a)) engine=innodb select * from t2 for update;
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
commit;
|
||||
drop table t1, t2, t3, t5, t6, t8, t9;
|
||||
CREATE TABLE t1 (DB_ROW_ID int) engine=innodb;
|
||||
ERROR HY000: Can't create table 'test.t1' (errno: -1)
|
||||
CREATE TABLE t1 (
|
||||
a BIGINT(20) NOT NULL,
|
||||
PRIMARY KEY (a)
|
||||
) ENGINE=INNODB DEFAULT CHARSET=UTF8;
|
||||
CREATE TABLE t2 (
|
||||
a BIGINT(20) NOT NULL,
|
||||
b VARCHAR(128) NOT NULL,
|
||||
c TEXT NOT NULL,
|
||||
PRIMARY KEY (a,b),
|
||||
KEY idx_t2_b_c (b,c(200)),
|
||||
CONSTRAINT t_fk FOREIGN KEY (a) REFERENCES t1 (a)
|
||||
ON DELETE CASCADE
|
||||
) ENGINE=INNODB DEFAULT CHARSET=UTF8;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
INSERT INTO t2 VALUES (1, 'bar', 'vbar');
|
||||
INSERT INTO t2 VALUES (1, 'BAR2', 'VBAR');
|
||||
INSERT INTO t2 VALUES (1, 'bar_bar', 'bibi');
|
||||
INSERT INTO t2 VALUES (1, 'customer_over', '1');
|
||||
SELECT * FROM t2 WHERE b = 'customer_over';
|
||||
a b c
|
||||
1 customer_over 1
|
||||
SELECT * FROM t2 WHERE BINARY b = 'customer_over';
|
||||
a b c
|
||||
1 customer_over 1
|
||||
SELECT DISTINCT p0.a FROM t2 p0 WHERE p0.b = 'customer_over';
|
||||
a
|
||||
1
|
||||
/* Bang: Empty result set, above was expected: */
|
||||
SELECT DISTINCT p0.a FROM t2 p0 WHERE BINARY p0.b = 'customer_over';
|
||||
a
|
||||
1
|
||||
SELECT p0.a FROM t2 p0 WHERE BINARY p0.b = 'customer_over';
|
||||
a
|
||||
1
|
||||
drop table t2, t1;
|
||||
|
@ -15,7 +15,7 @@ where mm.id is null lock in share mode;
|
||||
id f_id f
|
||||
drop table t1,t2;
|
||||
create table t1(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t1 values(1,1),(2,2),(3,1),(4,2),(5,1),(6,2);
|
||||
insert into t1 values(1,1),(2,2),(3,1),(4,2),(5,1),(6,2),(7,3);
|
||||
commit;
|
||||
set autocommit = 0;
|
||||
select * from t1 lock in share mode;
|
||||
@ -26,6 +26,7 @@ a b
|
||||
4 2
|
||||
5 1
|
||||
6 2
|
||||
7 3
|
||||
update t1 set b = 5 where b = 1;
|
||||
set autocommit = 0;
|
||||
select * from t1 where a = 2 and b = 2 for update;
|
||||
@ -33,3 +34,87 @@ ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
commit;
|
||||
commit;
|
||||
drop table t1;
|
||||
create table t1(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t1 values(1,1),(2,2),(3,1),(4,2),(5,1),(6,2),(7,3);
|
||||
commit;
|
||||
set autocommit = 0;
|
||||
update t1 set b = 5 where b = 1;
|
||||
set autocommit = 0;
|
||||
select * from t1 where a = 7 and b = 3 for update;
|
||||
a b
|
||||
7 3
|
||||
commit;
|
||||
commit;
|
||||
drop table t1;
|
||||
create table t1(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t1 values (1,2),(5,3),(4,2);
|
||||
create table t2(d int not null, e int, primary key(d)) engine=innodb;
|
||||
insert into t2 values (8,6),(12,1),(3,1);
|
||||
commit;
|
||||
set autocommit = 0;
|
||||
select * from t2 for update;
|
||||
d e
|
||||
3 1
|
||||
8 6
|
||||
12 1
|
||||
set autocommit = 0;
|
||||
insert into t1 select * from t2;
|
||||
update t1 set b = (select e from t2 where a = d);
|
||||
create table t3(d int not null, e int, primary key(d)) engine=innodb
|
||||
select * from t2;
|
||||
commit;
|
||||
commit;
|
||||
drop table t1, t2, t3;
|
||||
create table t1(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t1 values (1,2),(5,3),(4,2);
|
||||
create table t2(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t2 values (8,6),(12,1),(3,1);
|
||||
create table t3(d int not null, b int, primary key(d)) engine=innodb;
|
||||
insert into t3 values (8,6),(12,1),(3,1);
|
||||
create table t5(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t5 values (1,2),(5,3),(4,2);
|
||||
create table t6(d int not null, e int, primary key(d)) engine=innodb;
|
||||
insert into t6 values (8,6),(12,1),(3,1);
|
||||
create table t8(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t8 values (1,2),(5,3),(4,2);
|
||||
create table t9(d int not null, e int, primary key(d)) engine=innodb;
|
||||
insert into t9 values (8,6),(12,1),(3,1);
|
||||
commit;
|
||||
set autocommit = 0;
|
||||
select * from t2 for update;
|
||||
a b
|
||||
3 1
|
||||
8 6
|
||||
12 1
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
|
||||
insert into t1 select * from t2;
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
|
||||
update t3 set b = (select b from t2 where a = d);
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
|
||||
create table t4(a int not null, b int, primary key(a)) engine=innodb select * from t2;
|
||||
set autocommit = 0;
|
||||
insert into t5 (select * from t2 lock in share mode);
|
||||
set autocommit = 0;
|
||||
update t6 set e = (select b from t2 where a = d lock in share mode);
|
||||
set autocommit = 0;
|
||||
create table t7(a int not null, b int, primary key(a)) engine=innodb select * from t2 lock in share mode;
|
||||
set autocommit = 0;
|
||||
insert into t8 (select * from t2 for update);
|
||||
set autocommit = 0;
|
||||
update t9 set e = (select b from t2 where a = d for update);
|
||||
set autocommit = 0;
|
||||
create table t10(a int not null, b int, primary key(a)) engine=innodb select * from t2 for update;
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
commit;
|
||||
drop table t1, t2, t3, t5, t6, t8, t9;
|
||||
|
@ -85,3 +85,15 @@ c_cp932
|
||||
| NULL | NULL | Τη γλώσσα |
|
||||
| NULL | NULL | ᛖᚴ ᚷᛖᛏ |
|
||||
+------+------+---------------------------+
|
||||
+------+---+------+
|
||||
| i | j | k |
|
||||
+------+---+------+
|
||||
| NULL | 1 | NULL |
|
||||
+------+---+------+
|
||||
+-------+---------+------+-----+---------+-------+
|
||||
| Field | Type | Null | Key | Default | Extra |
|
||||
+-------+---------+------+-----+---------+-------+
|
||||
| i | int(11) | YES | | NULL | |
|
||||
| j | int(11) | NO | | NULL | |
|
||||
| k | int(11) | YES | | NULL | |
|
||||
+-------+---------+------+-----+---------+-------+
|
||||
|
@ -165,6 +165,20 @@ ENGINE=NDB
|
||||
PARTITION BY KEY(c3) PARTITIONS 5;
|
||||
ALTER TABLE t1 COALESCE PARTITION 4;
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (a int primary key)
|
||||
ENGINE=NDB
|
||||
PARTITION BY KEY(a);
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION p0;
|
||||
ERROR HY000: Table storage engine for 't1' doesn't have this option
|
||||
ALTER TABLE t1 CHECK PARTITION p0;
|
||||
ERROR HY000: Table storage engine for 't1' doesn't have this option
|
||||
ALTER TABLE t1 REPAIR PARTITION p0;
|
||||
ERROR HY000: Table storage engine for 't1' doesn't have this option
|
||||
ALTER TABLE t1 ANALYZE PARTITION p0;
|
||||
ERROR HY000: Table storage engine for 't1' doesn't have this option
|
||||
ALTER TABLE t1 REBUILD PARTITION p0;
|
||||
ERROR HY000: Table storage engine for 't1' doesn't have this option
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
c1 MEDIUMINT NOT NULL AUTO_INCREMENT,
|
||||
c2 TEXT NOT NULL,
|
||||
|
@ -23,3 +23,23 @@ select 1;
|
||||
show status like 'last_query_cost';
|
||||
Variable_name Value
|
||||
Last_query_cost 0.000000
|
||||
FLUSH STATUS;
|
||||
SHOW STATUS LIKE 'max_used_connections';
|
||||
Variable_name Value
|
||||
Max_used_connections 1
|
||||
SET @save_thread_cache_size=@@thread_cache_size;
|
||||
SET GLOBAL thread_cache_size=3;
|
||||
SHOW STATUS LIKE 'max_used_connections';
|
||||
Variable_name Value
|
||||
Max_used_connections 3
|
||||
FLUSH STATUS;
|
||||
SHOW STATUS LIKE 'max_used_connections';
|
||||
Variable_name Value
|
||||
Max_used_connections 2
|
||||
SHOW STATUS LIKE 'max_used_connections';
|
||||
Variable_name Value
|
||||
Max_used_connections 3
|
||||
SHOW STATUS LIKE 'max_used_connections';
|
||||
Variable_name Value
|
||||
Max_used_connections 4
|
||||
SET GLOBAL thread_cache_size=@save_thread_cache_size;
|
||||
|
@ -952,3 +952,16 @@ load data infile '../std_data_ln/words.dat' into table t1 (a) set b:= f1();
|
||||
drop table t1;
|
||||
drop function f1;
|
||||
drop function f2;
|
||||
DROP TABLE IF EXISTS t1;
|
||||
CREATE TABLE t1 (
|
||||
conn_id INT,
|
||||
trigger_conn_id INT
|
||||
);
|
||||
CREATE TRIGGER t1_bi BEFORE INSERT ON t1 FOR EACH ROW
|
||||
SET NEW.trigger_conn_id = CONNECTION_ID();
|
||||
INSERT INTO t1 (conn_id, trigger_conn_id) VALUES (CONNECTION_ID(), -1);
|
||||
INSERT INTO t1 (conn_id, trigger_conn_id) VALUES (CONNECTION_ID(), -1);
|
||||
SELECT * FROM t1 WHERE conn_id != trigger_conn_id;
|
||||
conn_id trigger_conn_id
|
||||
DROP TRIGGER t1_bi;
|
||||
DROP TABLE t1;
|
||||
|
@ -2600,3 +2600,26 @@ id td
|
||||
5 2005-01-04
|
||||
DROP VIEW v1;
|
||||
DROP TABLE t1;
|
||||
create table t1 (a int);
|
||||
create view v1 as select * from t1;
|
||||
create view v2 as select * from v1;
|
||||
drop table t1;
|
||||
rename table v2 to t1;
|
||||
select * from v1;
|
||||
ERROR HY000: `test`.`v1` contain view recursion
|
||||
drop view t1, v1;
|
||||
create table t1 (a int);
|
||||
create function f1() returns int
|
||||
begin
|
||||
declare mx int;
|
||||
select max(a) from t1 into mx;
|
||||
return mx;
|
||||
end//
|
||||
create view v1 as select f1() as a;
|
||||
create view v2 as select * from v1;
|
||||
drop table t1;
|
||||
rename table v2 to t1;
|
||||
select * from v1;
|
||||
ERROR HY000: Recursive stored functions and triggers are not allowed.
|
||||
drop function f1;
|
||||
drop view t1, v1;
|
||||
|
@ -409,6 +409,14 @@ INSERT INTO t1 VALUES ('-1');
|
||||
DROP TABLE t1;
|
||||
SET NAMES latin1;
|
||||
|
||||
#
|
||||
# Bug#18691 Converting number to UNICODE string returns invalid result
|
||||
#
|
||||
SELECT CONVERT(103, CHAR(50) UNICODE);
|
||||
SELECT CONVERT(103.0, CHAR(50) UNICODE);
|
||||
SELECT CONVERT(-103, CHAR(50) UNICODE);
|
||||
SELECT CONVERT(-103.0, CHAR(50) UNICODE);
|
||||
|
||||
#
|
||||
# Bug#9557 MyISAM utf8 table crash
|
||||
#
|
||||
|
@ -24,13 +24,12 @@ rpl_deadlock_innodb : BUG#16920 2006-04-12 kent fails in show slave stat
|
||||
rpl_ndb_2innodb : BUG#19227 2006-04-20 pekka pk delete apparently not replicated
|
||||
rpl_ndb_2myisam : BUG#19227 2006-04-20 pekka pk delete apparently not replicated
|
||||
rpl_ndb_auto_inc : BUG#17086 2006-02-16 jmiller CR: auto_increment_increment and auto_increment_offset produce duplicate key er
|
||||
rpl_ndb_ddl : result file needs update + test needs to checked
|
||||
rpl_ndb_dd_partitions : BUG#19259 2006-04-21 rpl_ndb_dd_partitions fails on solaris
|
||||
rpl_ndb_ddl : BUG#18946 result file needs update + test needs to checked
|
||||
rpl_ndb_innodb2ndb : BUG#17400 2006-04-19 tomas Cluster Replication: delete & update of rows in table without pk fails on slave.
|
||||
rpl_ndb_log : BUG#18947 2006-03-21 tomas CRBR: order in binlog of create table and insert (on different table) not determ
|
||||
rpl_ndb_myisam2ndb : BUG#17400 2006-04-19 tomas Cluster Replication: delete & update of rows in table without pk fails on slave.
|
||||
#rpl_ndb_relay_space : BUG#16993 2006-02-16 jmiller RBR: ALTER TABLE ZEROFILL AUTO_INCREMENT is not replicated correctly
|
||||
rpl_switch_stm_row_mixed : BUG#18590 2006-03-28 brian
|
||||
#rpl_row_basic_7ndb : BUG#17400 2006-04-09 brian Cluster Replication: delete & update of rows in table without pk fails on slave.
|
||||
rpl_row_blob_innodb : BUG#18980 2006-04-10 kent Test fails randomly
|
||||
rpl_row_func003 : BUG#19074 2006-13-04 andrei test failed
|
||||
rpl_row_inexist_tbl : BUG#18948 2006-03-09 mats Disabled since patch makes this test wait forever
|
||||
@ -42,15 +41,3 @@ udf : BUG#18564 2006-03-27 ian (Permission by Brian)
|
||||
|
||||
# the below testcase have been reworked to avoid the bug, test contains comment, keep bug open
|
||||
#ndb_binlog_ddl_multi : BUG#18976 2006-04-10 kent CRBR: multiple binlog, second binlog may miss schema log events
|
||||
|
||||
# the below ndb failures have not been objerved for > 5 push builds, close bugs
|
||||
#ndb_gis : BUG#18600 2006-03-28 brian ndb_gis test failure
|
||||
#ndb_load : BUG#17233 2006-02-16 jmiller failed load data from infile causes mysqld dbug_assert, binlog not flushed
|
||||
#rpl_ndb_basic : BUG#18592 2006-03-28 brian rpl_ndb_basic failure
|
||||
#rpl_ndb_dd_advance : BUG#18924 2006-04-09 brian rpl_ndb_dd_advance failure
|
||||
rpl_ndb_dd_partitions : fails on solaris
|
||||
#rpl_ndb_dd_basic : BUG#18569 2006-03-28 brian rpl_ndb_dd_basic failure
|
||||
#rpl_ndb_insert_ignore : BUG#18567 2006-03-28 brian rpl_ndb_insert_ignore failure
|
||||
#rpl_ndb_multi_update2 : BUG#18928 2006-04-09 brian rpl_ndb_multi_update2 failed
|
||||
#rpl_ndb_multi_update3 : BUG#18627 2006-03-29 monty Cluster Replication: rpl_ndb_multi_update3 fails on Intel 64 bit
|
||||
#rpl_ndb_trig004 : BUG#18977 2006-04-10 kent Test fails randomly
|
||||
|
@ -17,4 +17,18 @@ select 0 | -1, 0 ^ -1, 0 & -1;
|
||||
select -1 >> 0, -1 << 0;
|
||||
select -1 >> 1, -1 << 1;
|
||||
|
||||
#
|
||||
# Bug 13044: wrong bit_count() results
|
||||
#
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t1,t2;
|
||||
--enable_warnings
|
||||
create table t1(a int);
|
||||
create table t2(a int, b int);
|
||||
insert into t1 values (1), (2), (3);
|
||||
insert into t2 values (1, 7), (3, 7);
|
||||
select t1.a, t2.a, t2.b, bit_count(t2.b) from t1 left join t2 on t1.a=t2.a;
|
||||
drop table t1, t2;
|
||||
|
||||
# End of 4.1 tests
|
||||
|
@ -684,4 +684,11 @@ insert into t1 values ('y,abc'),('y,abc');
|
||||
select c, substring_index(lcase(c), @q:=',', -1) as res from t1;
|
||||
drop table t1;
|
||||
|
||||
# End of 5.0 tests
|
||||
#
|
||||
# Bug #17043: Casting trimmed string to decimal loses precision
|
||||
#
|
||||
select cast(rtrim(' 20.06 ') as decimal(19,2));
|
||||
select cast(ltrim(' 20.06 ') as decimal(19,2));
|
||||
select cast(rtrim(ltrim(' 20.06 ')) as decimal(19,2));
|
||||
|
||||
--echo End of 5.0 tests
|
||||
|
@ -1 +1 @@
|
||||
--binlog_cache_size=32768
|
||||
--binlog_cache_size=32768 --innodb_lock_wait_timeout=1
|
||||
|
@ -901,10 +901,10 @@ insert into t2 (a) select b from t1;
|
||||
insert into t1 (a) select b from t2;
|
||||
select count(*) from t1;
|
||||
--replace_column 9 #
|
||||
explain select * from t1 where c between 1 and 10000;
|
||||
explain select * from t1 where c between 1 and 2500;
|
||||
update t1 set c=a;
|
||||
--replace_column 9 #
|
||||
explain select * from t1 where c between 1 and 10000;
|
||||
explain select * from t1 where c between 1 and 2500;
|
||||
drop table t1,t2;
|
||||
|
||||
#
|
||||
@ -2129,18 +2129,6 @@ connection default;
|
||||
disconnect a;
|
||||
disconnect b;
|
||||
|
||||
#
|
||||
# Bug #14360: problem with intervals
|
||||
#
|
||||
|
||||
create table t1(a date) engine=innodb;
|
||||
create table t2(a date, key(a)) engine=innodb;
|
||||
insert into t1 values('2005-10-01');
|
||||
insert into t2 values('2005-10-01');
|
||||
select * from t1, t2
|
||||
where t2.a between t1.a - interval 2 day and t1.a + interval 2 day;
|
||||
drop table t1, t2;
|
||||
|
||||
#
|
||||
# Test that cascading updates leading to duplicate keys give the correct
|
||||
# error message (bug #9680)
|
||||
@ -2193,3 +2181,303 @@ alter table t1 drop foreign key c2_fk;
|
||||
show create table t1;
|
||||
#
|
||||
drop table t1, t2;
|
||||
|
||||
#
|
||||
# Bug #14360: problem with intervals
|
||||
#
|
||||
|
||||
create table t1(a date) engine=innodb;
|
||||
create table t2(a date, key(a)) engine=innodb;
|
||||
insert into t1 values('2005-10-01');
|
||||
insert into t2 values('2005-10-01');
|
||||
select * from t1, t2
|
||||
where t2.a between t1.a - interval 2 day and t1.a + interval 2 day;
|
||||
drop table t1, t2;
|
||||
|
||||
create table t1 (id int not null, f_id int not null, f int not null,
|
||||
primary key(f_id, id)) engine=innodb;
|
||||
create table t2 (id int not null,s_id int not null,s varchar(200),
|
||||
primary key(id)) engine=innodb;
|
||||
INSERT INTO t1 VALUES (8, 1, 3);
|
||||
INSERT INTO t1 VALUES (1, 2, 1);
|
||||
INSERT INTO t2 VALUES (1, 0, '');
|
||||
INSERT INTO t2 VALUES (8, 1, '');
|
||||
commit;
|
||||
DELETE ml.* FROM t1 AS ml LEFT JOIN t2 AS mm ON (mm.id=ml.id)
|
||||
WHERE mm.id IS NULL;
|
||||
select ml.* from t1 as ml left join t2 as mm on (mm.id=ml.id)
|
||||
where mm.id is null lock in share mode;
|
||||
drop table t1,t2;
|
||||
|
||||
#
|
||||
# Test case where X-locks on unused rows should be released in a
|
||||
# update (because READ COMMITTED isolation level)
|
||||
#
|
||||
|
||||
connect (a,localhost,root,,);
|
||||
connect (b,localhost,root,,);
|
||||
connection a;
|
||||
create table t1(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t1 values(1,1),(2,2),(3,1),(4,2),(5,1),(6,2),(7,3);
|
||||
commit;
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
update t1 set b = 5 where b = 1;
|
||||
connection b;
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
#
|
||||
# X-lock to record (7,3) should be released in a update
|
||||
#
|
||||
select * from t1 where a = 7 and b = 3 for update;
|
||||
connection a;
|
||||
commit;
|
||||
connection b;
|
||||
commit;
|
||||
drop table t1;
|
||||
connection default;
|
||||
disconnect a;
|
||||
disconnect b;
|
||||
|
||||
#
|
||||
# Test case where no locks should be released (because we are not
|
||||
# using READ COMMITTED isolation level)
|
||||
#
|
||||
|
||||
connect (a,localhost,root,,);
|
||||
connect (b,localhost,root,,);
|
||||
connection a;
|
||||
create table t1(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t1 values(1,1),(2,2),(3,1),(4,2),(5,1),(6,2);
|
||||
commit;
|
||||
set autocommit = 0;
|
||||
select * from t1 lock in share mode;
|
||||
update t1 set b = 5 where b = 1;
|
||||
connection b;
|
||||
set autocommit = 0;
|
||||
#
|
||||
# S-lock to records (2,2),(4,2), and (6,2) should not be released in a update
|
||||
#
|
||||
--error 1205
|
||||
select * from t1 where a = 2 and b = 2 for update;
|
||||
#
|
||||
# X-lock to record (1,1),(3,1),(5,1) should not be released in a update
|
||||
#
|
||||
--error 1205
|
||||
connection a;
|
||||
commit;
|
||||
connection b;
|
||||
commit;
|
||||
connection default;
|
||||
disconnect a;
|
||||
disconnect b;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Consistent read should be used in following selects
|
||||
#
|
||||
# 1) INSERT INTO ... SELECT
|
||||
# 2) UPDATE ... = ( SELECT ...)
|
||||
# 3) CREATE ... SELECT
|
||||
|
||||
connect (a,localhost,root,,);
|
||||
connect (b,localhost,root,,);
|
||||
connection a;
|
||||
create table t1(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t1 values (1,2),(5,3),(4,2);
|
||||
create table t2(d int not null, e int, primary key(d)) engine=innodb;
|
||||
insert into t2 values (8,6),(12,1),(3,1);
|
||||
commit;
|
||||
set autocommit = 0;
|
||||
select * from t2 for update;
|
||||
connection b;
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
insert into t1 select * from t2;
|
||||
update t1 set b = (select e from t2 where a = d);
|
||||
create table t3(d int not null, e int, primary key(d)) engine=innodb
|
||||
select * from t2;
|
||||
commit;
|
||||
connection a;
|
||||
commit;
|
||||
connection default;
|
||||
disconnect a;
|
||||
disconnect b;
|
||||
drop table t1, t2, t3;
|
||||
|
||||
#
|
||||
# Consistent read should not be used if
|
||||
#
|
||||
# (a) isolation level is serializable OR
|
||||
# (b) select ... lock in share mode OR
|
||||
# (c) select ... for update
|
||||
#
|
||||
# in following queries:
|
||||
#
|
||||
# 1) INSERT INTO ... SELECT
|
||||
# 2) UPDATE ... = ( SELECT ...)
|
||||
# 3) CREATE ... SELECT
|
||||
|
||||
connect (a,localhost,root,,);
|
||||
connect (b,localhost,root,,);
|
||||
connect (c,localhost,root,,);
|
||||
connect (d,localhost,root,,);
|
||||
connect (e,localhost,root,,);
|
||||
connect (f,localhost,root,,);
|
||||
connect (g,localhost,root,,);
|
||||
connect (h,localhost,root,,);
|
||||
connect (i,localhost,root,,);
|
||||
connect (j,localhost,root,,);
|
||||
connection a;
|
||||
create table t1(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t1 values (1,2),(5,3),(4,2);
|
||||
create table t2(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t2 values (8,6),(12,1),(3,1);
|
||||
create table t3(d int not null, b int, primary key(d)) engine=innodb;
|
||||
insert into t3 values (8,6),(12,1),(3,1);
|
||||
create table t5(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t5 values (1,2),(5,3),(4,2);
|
||||
create table t6(d int not null, e int, primary key(d)) engine=innodb;
|
||||
insert into t6 values (8,6),(12,1),(3,1);
|
||||
create table t8(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t8 values (1,2),(5,3),(4,2);
|
||||
create table t9(d int not null, e int, primary key(d)) engine=innodb;
|
||||
insert into t9 values (8,6),(12,1),(3,1);
|
||||
commit;
|
||||
set autocommit = 0;
|
||||
select * from t2 for update;
|
||||
connection b;
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
|
||||
--send
|
||||
insert into t1 select * from t2;
|
||||
connection c;
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
|
||||
--send
|
||||
update t3 set b = (select b from t2 where a = d);
|
||||
connection d;
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
|
||||
--send
|
||||
create table t4(a int not null, b int, primary key(a)) engine=innodb select * from t2;
|
||||
connection e;
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
--send
|
||||
insert into t5 (select * from t2 lock in share mode);
|
||||
connection f;
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
--send
|
||||
update t6 set e = (select b from t2 where a = d lock in share mode);
|
||||
connection g;
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
--send
|
||||
create table t7(a int not null, b int, primary key(a)) engine=innodb select * from t2 lock in share mode;
|
||||
connection h;
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
--send
|
||||
insert into t8 (select * from t2 for update);
|
||||
connection i;
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
--send
|
||||
update t9 set e = (select b from t2 where a = d for update);
|
||||
connection j;
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
--send
|
||||
create table t10(a int not null, b int, primary key(a)) engine=innodb select * from t2 for update;
|
||||
|
||||
connection b;
|
||||
--error 1205
|
||||
reap;
|
||||
|
||||
connection c;
|
||||
--error 1205
|
||||
reap;
|
||||
|
||||
connection d;
|
||||
--error 1205
|
||||
reap;
|
||||
|
||||
connection e;
|
||||
--error 1205
|
||||
reap;
|
||||
|
||||
connection f;
|
||||
--error 1205
|
||||
reap;
|
||||
|
||||
connection g;
|
||||
--error 1205
|
||||
reap;
|
||||
|
||||
connection h;
|
||||
--error 1205
|
||||
reap;
|
||||
|
||||
connection i;
|
||||
--error 1205
|
||||
reap;
|
||||
|
||||
connection j;
|
||||
--error 1205
|
||||
reap;
|
||||
|
||||
connection a;
|
||||
commit;
|
||||
|
||||
connection default;
|
||||
disconnect a;
|
||||
disconnect b;
|
||||
disconnect c;
|
||||
disconnect d;
|
||||
disconnect e;
|
||||
disconnect f;
|
||||
disconnect g;
|
||||
disconnect h;
|
||||
disconnect i;
|
||||
disconnect j;
|
||||
drop table t1, t2, t3, t5, t6, t8, t9;
|
||||
|
||||
# bug 18934, "InnoDB crashes when table uses column names like DB_ROW_ID"
|
||||
--error 1005
|
||||
CREATE TABLE t1 (DB_ROW_ID int) engine=innodb;
|
||||
|
||||
#
|
||||
# Bug #17152: Wrong result with BINARY comparison on aliased column
|
||||
#
|
||||
|
||||
CREATE TABLE t1 (
|
||||
a BIGINT(20) NOT NULL,
|
||||
PRIMARY KEY (a)
|
||||
) ENGINE=INNODB DEFAULT CHARSET=UTF8;
|
||||
|
||||
CREATE TABLE t2 (
|
||||
a BIGINT(20) NOT NULL,
|
||||
b VARCHAR(128) NOT NULL,
|
||||
c TEXT NOT NULL,
|
||||
PRIMARY KEY (a,b),
|
||||
KEY idx_t2_b_c (b,c(200)),
|
||||
CONSTRAINT t_fk FOREIGN KEY (a) REFERENCES t1 (a)
|
||||
ON DELETE CASCADE
|
||||
) ENGINE=INNODB DEFAULT CHARSET=UTF8;
|
||||
|
||||
INSERT INTO t1 VALUES (1);
|
||||
INSERT INTO t2 VALUES (1, 'bar', 'vbar');
|
||||
INSERT INTO t2 VALUES (1, 'BAR2', 'VBAR');
|
||||
INSERT INTO t2 VALUES (1, 'bar_bar', 'bibi');
|
||||
INSERT INTO t2 VALUES (1, 'customer_over', '1');
|
||||
|
||||
SELECT * FROM t2 WHERE b = 'customer_over';
|
||||
SELECT * FROM t2 WHERE BINARY b = 'customer_over';
|
||||
SELECT DISTINCT p0.a FROM t2 p0 WHERE p0.b = 'customer_over';
|
||||
/* Bang: Empty result set, above was expected: */
|
||||
SELECT DISTINCT p0.a FROM t2 p0 WHERE BINARY p0.b = 'customer_over';
|
||||
SELECT p0.a FROM t2 p0 WHERE BINARY p0.b = 'customer_over';
|
||||
|
||||
drop table t2, t1;
|
||||
|
@ -1 +1 @@
|
||||
--innodb_locks_unsafe_for_binlog=true
|
||||
--innodb_locks_unsafe_for_binlog=true --innodb_lock_wait_timeout=1
|
||||
|
@ -1,7 +1,9 @@
|
||||
-- source include/have_innodb.inc
|
||||
#
|
||||
# Note that these tests uses a innodb_locks_unsafe_for_binlog option.
|
||||
#
|
||||
# Note that these tests uses options
|
||||
# innodb_locks_unsafe_for_binlog = true
|
||||
# innodb_lock_timeout = 5
|
||||
|
||||
#
|
||||
# Test cases for a bug #15650
|
||||
#
|
||||
@ -33,7 +35,7 @@ connect (a,localhost,root,,);
|
||||
connect (b,localhost,root,,);
|
||||
connection a;
|
||||
create table t1(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t1 values(1,1),(2,2),(3,1),(4,2),(5,1),(6,2);
|
||||
insert into t1 values(1,1),(2,2),(3,1),(4,2),(5,1),(6,2),(7,3);
|
||||
commit;
|
||||
set autocommit = 0;
|
||||
select * from t1 lock in share mode;
|
||||
@ -50,6 +52,197 @@ commit;
|
||||
connection b;
|
||||
commit;
|
||||
drop table t1;
|
||||
connection default;
|
||||
disconnect a;
|
||||
disconnect b;
|
||||
|
||||
#
|
||||
# unlock row test
|
||||
#
|
||||
|
||||
connect (a,localhost,root,,);
|
||||
connect (b,localhost,root,,);
|
||||
connection a;
|
||||
create table t1(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t1 values(1,1),(2,2),(3,1),(4,2),(5,1),(6,2),(7,3);
|
||||
commit;
|
||||
set autocommit = 0;
|
||||
update t1 set b = 5 where b = 1;
|
||||
connection b;
|
||||
set autocommit = 0;
|
||||
#
|
||||
# X-lock to record (7,3) should be released in a update
|
||||
#
|
||||
select * from t1 where a = 7 and b = 3 for update;
|
||||
commit;
|
||||
connection a;
|
||||
commit;
|
||||
drop table t1;
|
||||
connection default;
|
||||
disconnect a;
|
||||
disconnect b;
|
||||
|
||||
|
||||
#
|
||||
# Consistent read should be used in following selects
|
||||
#
|
||||
# 1) INSERT INTO ... SELECT
|
||||
# 2) UPDATE ... = ( SELECT ...)
|
||||
# 3) CREATE ... SELECT
|
||||
|
||||
connect (a,localhost,root,,);
|
||||
connect (b,localhost,root,,);
|
||||
connection a;
|
||||
create table t1(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t1 values (1,2),(5,3),(4,2);
|
||||
create table t2(d int not null, e int, primary key(d)) engine=innodb;
|
||||
insert into t2 values (8,6),(12,1),(3,1);
|
||||
commit;
|
||||
set autocommit = 0;
|
||||
select * from t2 for update;
|
||||
connection b;
|
||||
set autocommit = 0;
|
||||
insert into t1 select * from t2;
|
||||
update t1 set b = (select e from t2 where a = d);
|
||||
create table t3(d int not null, e int, primary key(d)) engine=innodb
|
||||
select * from t2;
|
||||
commit;
|
||||
connection a;
|
||||
commit;
|
||||
connection default;
|
||||
disconnect a;
|
||||
disconnect b;
|
||||
drop table t1, t2, t3;
|
||||
|
||||
#
|
||||
# Consistent read should not be used if
|
||||
#
|
||||
# (a) isolation level is serializable OR
|
||||
# (b) select ... lock in share mode OR
|
||||
# (c) select ... for update
|
||||
#
|
||||
# in following queries:
|
||||
#
|
||||
# 1) INSERT INTO ... SELECT
|
||||
# 2) UPDATE ... = ( SELECT ...)
|
||||
# 3) CREATE ... SELECT
|
||||
|
||||
connect (a,localhost,root,,);
|
||||
connect (b,localhost,root,,);
|
||||
connect (c,localhost,root,,);
|
||||
connect (d,localhost,root,,);
|
||||
connect (e,localhost,root,,);
|
||||
connect (f,localhost,root,,);
|
||||
connect (g,localhost,root,,);
|
||||
connect (h,localhost,root,,);
|
||||
connect (i,localhost,root,,);
|
||||
connect (j,localhost,root,,);
|
||||
connection a;
|
||||
create table t1(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t1 values (1,2),(5,3),(4,2);
|
||||
create table t2(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t2 values (8,6),(12,1),(3,1);
|
||||
create table t3(d int not null, b int, primary key(d)) engine=innodb;
|
||||
insert into t3 values (8,6),(12,1),(3,1);
|
||||
create table t5(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t5 values (1,2),(5,3),(4,2);
|
||||
create table t6(d int not null, e int, primary key(d)) engine=innodb;
|
||||
insert into t6 values (8,6),(12,1),(3,1);
|
||||
create table t8(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t8 values (1,2),(5,3),(4,2);
|
||||
create table t9(d int not null, e int, primary key(d)) engine=innodb;
|
||||
insert into t9 values (8,6),(12,1),(3,1);
|
||||
commit;
|
||||
set autocommit = 0;
|
||||
select * from t2 for update;
|
||||
connection b;
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
|
||||
--send
|
||||
insert into t1 select * from t2;
|
||||
connection c;
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
|
||||
--send
|
||||
update t3 set b = (select b from t2 where a = d);
|
||||
connection d;
|
||||
set autocommit = 0;
|
||||
SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
|
||||
--send
|
||||
create table t4(a int not null, b int, primary key(a)) engine=innodb select * from t2;
|
||||
connection e;
|
||||
set autocommit = 0;
|
||||
--send
|
||||
insert into t5 (select * from t2 lock in share mode);
|
||||
connection f;
|
||||
set autocommit = 0;
|
||||
--send
|
||||
update t6 set e = (select b from t2 where a = d lock in share mode);
|
||||
connection g;
|
||||
set autocommit = 0;
|
||||
--send
|
||||
create table t7(a int not null, b int, primary key(a)) engine=innodb select * from t2 lock in share mode;
|
||||
connection h;
|
||||
set autocommit = 0;
|
||||
--send
|
||||
insert into t8 (select * from t2 for update);
|
||||
connection i;
|
||||
set autocommit = 0;
|
||||
--send
|
||||
update t9 set e = (select b from t2 where a = d for update);
|
||||
connection j;
|
||||
set autocommit = 0;
|
||||
--send
|
||||
create table t10(a int not null, b int, primary key(a)) engine=innodb select * from t2 for update;
|
||||
|
||||
connection b;
|
||||
--error 1205
|
||||
reap;
|
||||
|
||||
connection c;
|
||||
--error 1205
|
||||
reap;
|
||||
|
||||
connection d;
|
||||
--error 1205
|
||||
reap;
|
||||
|
||||
connection e;
|
||||
--error 1205
|
||||
reap;
|
||||
|
||||
connection f;
|
||||
--error 1205
|
||||
reap;
|
||||
|
||||
connection g;
|
||||
--error 1205
|
||||
reap;
|
||||
|
||||
connection h;
|
||||
--error 1205
|
||||
reap;
|
||||
|
||||
connection i;
|
||||
--error 1205
|
||||
reap;
|
||||
|
||||
connection j;
|
||||
--error 1205
|
||||
reap;
|
||||
|
||||
connection a;
|
||||
commit;
|
||||
|
||||
connection default;
|
||||
disconnect a;
|
||||
disconnect b;
|
||||
disconnect c;
|
||||
disconnect d;
|
||||
disconnect e;
|
||||
disconnect f;
|
||||
disconnect g;
|
||||
disconnect h;
|
||||
disconnect i;
|
||||
disconnect j;
|
||||
drop table t1, t2, t3, t5, t6, t8, t9;
|
||||
|
@ -67,3 +67,8 @@ drop table t1;
|
||||
#
|
||||
--exec $MYSQL -t --default-character-set utf8 test -e "create table t1 (i int, j int, k char(25) charset utf8); insert into t1 (i) values (1); insert into t1 (k) values ('<----------------------->'); insert into t1 (k) values ('<-----'); insert into t1 (k) values ('Τη γλώσσα'); insert into t1 (k) values ('ᛖᚴ ᚷᛖᛏ'); select * from t1; DROP TABLE t1;"
|
||||
|
||||
#
|
||||
# "DESCRIBE" commands may return strange NULLness flags.
|
||||
#
|
||||
--exec $MYSQL -t --default-character-set utf8 test -e "create table t1 (i int, j int not null, k int); insert into t1 values (null, 1, null); select * from t1; describe t1; drop table t1;"
|
||||
|
||||
|
@ -154,6 +154,24 @@ ALTER TABLE t1 COALESCE PARTITION 4;
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# Bug 16822: OPTIMIZE TABLE hangs test
|
||||
#
|
||||
CREATE TABLE t1 (a int primary key)
|
||||
ENGINE=NDB
|
||||
PARTITION BY KEY(a);
|
||||
--error 1031
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION p0;
|
||||
--error 1031
|
||||
ALTER TABLE t1 CHECK PARTITION p0;
|
||||
--error 1031
|
||||
ALTER TABLE t1 REPAIR PARTITION p0;
|
||||
--error 1031
|
||||
ALTER TABLE t1 ANALYZE PARTITION p0;
|
||||
--error 1031
|
||||
ALTER TABLE t1 REBUILD PARTITION p0;
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# BUG 16806: ALTER TABLE fails
|
||||
#
|
||||
|
1
mysql-test/t/rpl_view-slave.opt
Normal file
1
mysql-test/t/rpl_view-slave.opt
Normal file
@ -0,0 +1 @@
|
||||
--replicate-ignore-table=test.foo
|
@ -36,11 +36,111 @@ reap;
|
||||
show status like 'Table_lock%';
|
||||
drop table t1;
|
||||
|
||||
disconnect con2;
|
||||
disconnect con1;
|
||||
connection default;
|
||||
|
||||
# End of 4.1 tests
|
||||
|
||||
#
|
||||
# lost_query_cost
|
||||
# last_query_cost
|
||||
#
|
||||
|
||||
select 1;
|
||||
show status like 'last_query_cost';
|
||||
|
||||
#
|
||||
# Test for Bug #15933 max_used_connections is wrong after FLUSH STATUS
|
||||
# if connections are cached
|
||||
#
|
||||
#
|
||||
# The first suggested fix from the bug report was chosen
|
||||
# (see http://bugs.mysql.com/bug.php?id=15933):
|
||||
#
|
||||
# a) On flushing the status, set max_used_connections to
|
||||
# threads_connected, not to 0.
|
||||
#
|
||||
# b) Check if it is necessary to increment max_used_connections when
|
||||
# taking a thread from the cache as well as when creating new threads
|
||||
#
|
||||
|
||||
# Wait for at most $disconnect_timeout seconds for disconnects to finish.
|
||||
let $disconnect_timeout = 10;
|
||||
|
||||
# Wait for any previous disconnects to finish.
|
||||
FLUSH STATUS;
|
||||
--disable_query_log
|
||||
--disable_result_log
|
||||
eval SET @wait_left = $disconnect_timeout;
|
||||
let $max_used_connections = `SHOW STATUS LIKE 'max_used_connections'`;
|
||||
eval SET @max_used_connections = SUBSTRING('$max_used_connections', 21)+0;
|
||||
let $wait_more = `SELECT @max_used_connections != 1 && @wait_left > 0`;
|
||||
while ($wait_more)
|
||||
{
|
||||
sleep 1;
|
||||
FLUSH STATUS;
|
||||
SET @wait_left = @wait_left - 1;
|
||||
let $max_used_connections = `SHOW STATUS LIKE 'max_used_connections'`;
|
||||
eval SET @max_used_connections = SUBSTRING('$max_used_connections', 21)+0;
|
||||
let $wait_more = `SELECT @max_used_connections != 1 && @wait_left > 0`;
|
||||
}
|
||||
--enable_query_log
|
||||
--enable_result_log
|
||||
|
||||
# Prerequisite.
|
||||
SHOW STATUS LIKE 'max_used_connections';
|
||||
|
||||
# Save original setting.
|
||||
SET @save_thread_cache_size=@@thread_cache_size;
|
||||
SET GLOBAL thread_cache_size=3;
|
||||
|
||||
connect (con1,localhost,root,,);
|
||||
connect (con2,localhost,root,,);
|
||||
|
||||
connection con1;
|
||||
disconnect con2;
|
||||
|
||||
# Check that max_used_connections still reflects maximum value.
|
||||
SHOW STATUS LIKE 'max_used_connections';
|
||||
|
||||
# Check that after flush max_used_connections equals to current number
|
||||
# of connections. First wait for previous disconnect to finish.
|
||||
FLUSH STATUS;
|
||||
--disable_query_log
|
||||
--disable_result_log
|
||||
eval SET @wait_left = $disconnect_timeout;
|
||||
let $max_used_connections = `SHOW STATUS LIKE 'max_used_connections'`;
|
||||
eval SET @max_used_connections = SUBSTRING('$max_used_connections', 21)+0;
|
||||
let $wait_more = `SELECT @max_used_connections != 2 && @wait_left > 0`;
|
||||
while ($wait_more)
|
||||
{
|
||||
sleep 1;
|
||||
FLUSH STATUS;
|
||||
SET @wait_left = @wait_left - 1;
|
||||
let $max_used_connections = `SHOW STATUS LIKE 'max_used_connections'`;
|
||||
eval SET @max_used_connections = SUBSTRING('$max_used_connections', 21)+0;
|
||||
let $wait_more = `SELECT @max_used_connections != 2 && @wait_left > 0`;
|
||||
}
|
||||
--enable_query_log
|
||||
--enable_result_log
|
||||
# Check that we don't count disconnected thread any longer.
|
||||
SHOW STATUS LIKE 'max_used_connections';
|
||||
|
||||
# Check that max_used_connections is updated when cached thread is
|
||||
# reused...
|
||||
connect (con2,localhost,root,,);
|
||||
SHOW STATUS LIKE 'max_used_connections';
|
||||
|
||||
# ...and when new thread is created.
|
||||
connect (con3,localhost,root,,);
|
||||
SHOW STATUS LIKE 'max_used_connections';
|
||||
|
||||
# Restore original setting.
|
||||
connection default;
|
||||
SET GLOBAL thread_cache_size=@save_thread_cache_size;
|
||||
|
||||
disconnect con3;
|
||||
disconnect con2;
|
||||
disconnect con1;
|
||||
|
||||
# End of 5.0 tests
|
||||
|
@ -1114,3 +1114,31 @@ load data infile '../std_data_ln/words.dat' into table t1 (a) set b:= f1();
|
||||
drop table t1;
|
||||
drop function f1;
|
||||
drop function f2;
|
||||
|
||||
#
|
||||
# Test for Bug #16461 connection_id() does not work properly inside trigger
|
||||
#
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1;
|
||||
--enable_warnings
|
||||
|
||||
CREATE TABLE t1 (
|
||||
conn_id INT,
|
||||
trigger_conn_id INT
|
||||
);
|
||||
CREATE TRIGGER t1_bi BEFORE INSERT ON t1 FOR EACH ROW
|
||||
SET NEW.trigger_conn_id = CONNECTION_ID();
|
||||
|
||||
INSERT INTO t1 (conn_id, trigger_conn_id) VALUES (CONNECTION_ID(), -1);
|
||||
|
||||
connect (con1,localhost,root,,);
|
||||
INSERT INTO t1 (conn_id, trigger_conn_id) VALUES (CONNECTION_ID(), -1);
|
||||
connection default;
|
||||
disconnect con1;
|
||||
|
||||
SELECT * FROM t1 WHERE conn_id != trigger_conn_id;
|
||||
|
||||
DROP TRIGGER t1_bi;
|
||||
DROP TABLE t1;
|
||||
|
||||
# End of 5.0 tests
|
||||
|
@ -2459,3 +2459,34 @@ SELECT * FROM v1 WHERE td BETWEEN '2005.01.02' AND '2005.01.04';
|
||||
|
||||
DROP VIEW v1;
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# BUG#14308: Recursive view definitions
|
||||
#
|
||||
# using view only
|
||||
create table t1 (a int);
|
||||
create view v1 as select * from t1;
|
||||
create view v2 as select * from v1;
|
||||
drop table t1;
|
||||
rename table v2 to t1;
|
||||
-- error ER_VIEW_RECURSIVE
|
||||
select * from v1;
|
||||
drop view t1, v1;
|
||||
# using SP function
|
||||
create table t1 (a int);
|
||||
delimiter //;
|
||||
create function f1() returns int
|
||||
begin
|
||||
declare mx int;
|
||||
select max(a) from t1 into mx;
|
||||
return mx;
|
||||
end//
|
||||
delimiter ;//
|
||||
create view v1 as select f1() as a;
|
||||
create view v2 as select * from v1;
|
||||
drop table t1;
|
||||
rename table v2 to t1;
|
||||
-- error ER_SP_NO_RECURSION
|
||||
select * from v1;
|
||||
drop function f1;
|
||||
drop view t1, v1;
|
||||
|
251
sql/ha_innodb.cc
251
sql/ha_innodb.cc
@ -132,6 +132,7 @@ extern "C" {
|
||||
#include "../storage/innobase/include/sync0sync.h"
|
||||
#include "../storage/innobase/include/fil0fil.h"
|
||||
#include "../storage/innobase/include/trx0xa.h"
|
||||
#include "../storage/innobase/include/thr0loc.h"
|
||||
}
|
||||
|
||||
#define HA_INNOBASE_ROWS_IN_TABLE 10000 /* to get optimization right */
|
||||
@ -237,7 +238,7 @@ handlerton innobase_hton = {
|
||||
NULL, /* Fill FILES table */
|
||||
HTON_NO_FLAGS,
|
||||
NULL, /* binlog_func */
|
||||
NULL, /* binlog_log_query */
|
||||
NULL, /* binlog_log_query */
|
||||
innobase_release_temporary_latches
|
||||
};
|
||||
|
||||
@ -535,18 +536,18 @@ convert_error_code_to_mysql(
|
||||
|
||||
} else if (error == (int) DB_CORRUPTION) {
|
||||
|
||||
return(HA_ERR_CRASHED);
|
||||
} else if (error == (int) DB_NO_SAVEPOINT) {
|
||||
return(HA_ERR_CRASHED);
|
||||
} else if (error == (int) DB_NO_SAVEPOINT) {
|
||||
|
||||
return(HA_ERR_NO_SAVEPOINT);
|
||||
} else if (error == (int) DB_LOCK_TABLE_FULL) {
|
||||
/* Since we rolled back the whole transaction, we must
|
||||
tell it also to MySQL so that MySQL knows to empty the
|
||||
cached binlog for this transaction */
|
||||
return(HA_ERR_NO_SAVEPOINT);
|
||||
} else if (error == (int) DB_LOCK_TABLE_FULL) {
|
||||
/* Since we rolled back the whole transaction, we must
|
||||
tell it also to MySQL so that MySQL knows to empty the
|
||||
cached binlog for this transaction */
|
||||
|
||||
if (thd) {
|
||||
ha_rollback(thd);
|
||||
}
|
||||
if (thd) {
|
||||
ha_rollback(thd);
|
||||
}
|
||||
|
||||
return(HA_ERR_LOCK_TABLE_FULL);
|
||||
} else {
|
||||
@ -1014,7 +1015,6 @@ innobase_query_caching_of_table_permitted(
|
||||
mutex_enter_noninline(&kernel_mutex);
|
||||
trx_print(stderr, trx, 1024);
|
||||
mutex_exit_noninline(&kernel_mutex);
|
||||
ut_error;
|
||||
}
|
||||
|
||||
innobase_release_stat_resources(trx);
|
||||
@ -1769,25 +1769,6 @@ innobase_report_binlog_offset_and_commit(
|
||||
trx->mysql_log_file_name = log_file_name;
|
||||
trx->mysql_log_offset = (ib_longlong)end_offset;
|
||||
|
||||
#ifdef HAVE_REPLICATION
|
||||
if (thd->variables.sync_replication) {
|
||||
/* Let us store the binlog file name and the position, so that
|
||||
we know how long to wait for the binlog to the replicated to
|
||||
the slave in synchronous replication. */
|
||||
|
||||
if (trx->repl_wait_binlog_name == NULL) {
|
||||
|
||||
trx->repl_wait_binlog_name =
|
||||
(char*)mem_alloc_noninline(FN_REFLEN + 100);
|
||||
}
|
||||
|
||||
ut_a(strlen(log_file_name) < FN_REFLEN + 100);
|
||||
|
||||
strcpy(trx->repl_wait_binlog_name, log_file_name);
|
||||
|
||||
trx->repl_wait_binlog_pos = (ib_longlong)end_offset;
|
||||
}
|
||||
#endif /* HAVE_REPLICATION */
|
||||
trx->flush_log_later = TRUE;
|
||||
|
||||
innobase_commit(thd, TRUE);
|
||||
@ -1856,121 +1837,9 @@ innobase_commit_complete(
|
||||
trx_commit_complete_for_mysql(trx);
|
||||
}
|
||||
|
||||
#ifdef HAVE_REPLICATION
|
||||
if (thd->variables.sync_replication
|
||||
&& trx->repl_wait_binlog_name
|
||||
&& innobase_repl_state != 0) {
|
||||
|
||||
struct timespec abstime;
|
||||
int cmp;
|
||||
int ret;
|
||||
|
||||
/* In synchronous replication, let us wait until the MySQL
|
||||
replication has sent the relevant binlog segment to the
|
||||
replication slave. */
|
||||
|
||||
pthread_mutex_lock(&innobase_repl_cond_mutex);
|
||||
try_again:
|
||||
if (innobase_repl_state == 0) {
|
||||
|
||||
pthread_mutex_unlock(&innobase_repl_cond_mutex);
|
||||
|
||||
return(0);
|
||||
}
|
||||
|
||||
cmp = strcmp(innobase_repl_file_name,
|
||||
trx->repl_wait_binlog_name);
|
||||
if (cmp > 0
|
||||
|| (cmp == 0 && innobase_repl_pos
|
||||
>= (my_off_t)trx->repl_wait_binlog_pos)) {
|
||||
/* We have already sent the relevant binlog to the
|
||||
slave: no need to wait here */
|
||||
|
||||
pthread_mutex_unlock(&innobase_repl_cond_mutex);
|
||||
|
||||
/* printf("Binlog now sent\n"); */
|
||||
|
||||
return(0);
|
||||
}
|
||||
|
||||
/* Let us update the info about the minimum binlog position
|
||||
of waiting threads in the innobase_repl_... variables */
|
||||
|
||||
if (innobase_repl_wait_file_name_inited != 0) {
|
||||
cmp = strcmp(trx->repl_wait_binlog_name,
|
||||
innobase_repl_wait_file_name);
|
||||
if (cmp < 0
|
||||
|| (cmp == 0
|
||||
&& (my_off_t)trx->repl_wait_binlog_pos
|
||||
<= innobase_repl_wait_pos)) {
|
||||
/* This thd has an even lower position, let
|
||||
us update the minimum info */
|
||||
|
||||
strcpy(innobase_repl_wait_file_name,
|
||||
trx->repl_wait_binlog_name);
|
||||
|
||||
innobase_repl_wait_pos =
|
||||
trx->repl_wait_binlog_pos;
|
||||
}
|
||||
} else {
|
||||
strcpy(innobase_repl_wait_file_name,
|
||||
trx->repl_wait_binlog_name);
|
||||
|
||||
innobase_repl_wait_pos = trx->repl_wait_binlog_pos;
|
||||
|
||||
innobase_repl_wait_file_name_inited = 1;
|
||||
}
|
||||
set_timespec(abstime, thd->variables.sync_replication_timeout);
|
||||
|
||||
/* Let us suspend this thread to wait on the condition;
|
||||
when replication has progressed far enough, we will release
|
||||
these waiting threads. The following call
|
||||
pthread_cond_timedwait also atomically unlocks
|
||||
innobase_repl_cond_mutex. */
|
||||
|
||||
innobase_repl_n_wait_threads++;
|
||||
|
||||
/* printf("Waiting for binlog to be sent\n"); */
|
||||
|
||||
ret = pthread_cond_timedwait(&innobase_repl_cond,
|
||||
&innobase_repl_cond_mutex, &abstime);
|
||||
innobase_repl_n_wait_threads--;
|
||||
|
||||
if (ret != 0) {
|
||||
ut_print_timestamp(stderr);
|
||||
|
||||
sql_print_error("MySQL synchronous replication was "
|
||||
"not able to send the binlog to the "
|
||||
"slave within the timeout %lu. We "
|
||||
"assume that the slave has become "
|
||||
"inaccessible, and switch off "
|
||||
"synchronous replication until the "
|
||||
"communication to the slave works "
|
||||
"again. MySQL synchronous replication "
|
||||
"has sent binlog to the slave up to "
|
||||
"file %s, position %lu. This "
|
||||
"transaction needs it to be sent up "
|
||||
"to file %s, position %lu.",
|
||||
thd->variables.sync_replication_timeout,
|
||||
innobase_repl_file_name,
|
||||
(ulong) innobase_repl_pos,
|
||||
trx->repl_wait_binlog_name,
|
||||
(ulong) trx->repl_wait_binlog_pos);
|
||||
|
||||
innobase_repl_state = 0;
|
||||
|
||||
pthread_mutex_unlock(&innobase_repl_cond_mutex);
|
||||
|
||||
return(0);
|
||||
}
|
||||
|
||||
goto try_again;
|
||||
}
|
||||
#endif // HAVE_REPLICATION
|
||||
return(0);
|
||||
}
|
||||
|
||||
|
||||
/*********************************************************************
|
||||
Rolls back a transaction or the latest SQL statement. */
|
||||
|
||||
@ -2196,6 +2065,7 @@ innobase_close_connection(
|
||||
|
||||
innobase_rollback_trx(trx);
|
||||
|
||||
thr_local_free(trx->mysql_thread_id);
|
||||
trx_free_for_mysql(trx);
|
||||
|
||||
return(0);
|
||||
@ -2216,7 +2086,7 @@ ha_innobase::get_row_type() const
|
||||
row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
|
||||
|
||||
if (prebuilt && prebuilt->table) {
|
||||
if (innodb_dict_table_is_comp(prebuilt->table)) {
|
||||
if (dict_table_is_comp_noninline(prebuilt->table)) {
|
||||
return(ROW_TYPE_COMPACT);
|
||||
} else {
|
||||
return(ROW_TYPE_REDUNDANT);
|
||||
@ -3609,7 +3479,8 @@ calc_row_difference(
|
||||
TRUE,
|
||||
new_mysql_row_col,
|
||||
col_pack_len,
|
||||
innodb_dict_table_is_comp(prebuilt->table));
|
||||
dict_table_is_comp_noninline(
|
||||
prebuilt->table));
|
||||
ufield->new_val.data = dfield.data;
|
||||
ufield->new_val.len = dfield.len;
|
||||
} else {
|
||||
@ -3769,9 +3640,17 @@ ha_innobase::unlock_row(void)
|
||||
ut_error;
|
||||
}
|
||||
|
||||
/* Consistent read does not take any locks, thus there is
|
||||
nothing to unlock. */
|
||||
|
||||
if (prebuilt->select_lock_type == LOCK_NONE) {
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
switch (prebuilt->row_read_type) {
|
||||
case ROW_READ_WITH_LOCKS:
|
||||
if (!srv_locks_unsafe_for_binlog) {
|
||||
if (!srv_locks_unsafe_for_binlog
|
||||
|| prebuilt->trx->isolation_level == TRX_ISO_READ_COMMITTED) {
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
@ -3803,7 +3682,13 @@ ha_innobase::try_semi_consistent_read(bool yes)
|
||||
{
|
||||
row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
|
||||
|
||||
if (yes && srv_locks_unsafe_for_binlog) {
|
||||
/* Row read type is set to semi consistent read if this was
|
||||
requested by the MySQL and either innodb_locks_unsafe_for_binlog
|
||||
option is used or this session is using READ COMMITTED isolation
|
||||
level. */
|
||||
|
||||
if (yes && (srv_locks_unsafe_for_binlog
|
||||
|| prebuilt->trx->isolation_level == TRX_ISO_READ_COMMITTED)) {
|
||||
prebuilt->row_read_type = ROW_READ_TRY_SEMI_CONSISTENT;
|
||||
} else {
|
||||
prebuilt->row_read_type = ROW_READ_WITH_LOCKS;
|
||||
@ -6286,12 +6171,6 @@ ha_innobase::external_lock(
|
||||
trx->n_mysql_tables_in_use++;
|
||||
prebuilt->mysql_has_locked = TRUE;
|
||||
|
||||
if (trx->n_mysql_tables_in_use == 1) {
|
||||
trx->isolation_level = innobase_map_isolation_level(
|
||||
(enum_tx_isolation)
|
||||
thd->variables.tx_isolation);
|
||||
}
|
||||
|
||||
if (trx->isolation_level == TRX_ISO_SERIALIZABLE
|
||||
&& prebuilt->select_lock_type == LOCK_NONE
|
||||
&& (thd->options
|
||||
@ -6765,11 +6644,22 @@ ha_innobase::store_lock(
|
||||
TL_IGNORE */
|
||||
{
|
||||
row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
|
||||
trx_t* trx = prebuilt->trx;
|
||||
|
||||
/* NOTE: MySQL can call this function with lock 'type' TL_IGNORE!
|
||||
Be careful to ignore TL_IGNORE if we are going to do something with
|
||||
only 'real' locks! */
|
||||
|
||||
/* If no MySQL tables is use we need to set isolation level
|
||||
of the transaction. */
|
||||
|
||||
if (lock_type != TL_IGNORE
|
||||
&& trx->n_mysql_tables_in_use == 0) {
|
||||
trx->isolation_level = innobase_map_isolation_level(
|
||||
(enum_tx_isolation)
|
||||
thd->variables.tx_isolation);
|
||||
}
|
||||
|
||||
if ((lock_type == TL_READ && thd->in_lock_tables) ||
|
||||
(lock_type == TL_READ_HIGH_PRIORITY && thd->in_lock_tables) ||
|
||||
lock_type == TL_READ_WITH_SHARED_LOCKS ||
|
||||
@ -6794,18 +6684,26 @@ ha_innobase::store_lock(
|
||||
unexpected if an obsolete consistent read view would be
|
||||
used. */
|
||||
|
||||
if (srv_locks_unsafe_for_binlog &&
|
||||
prebuilt->trx->isolation_level != TRX_ISO_SERIALIZABLE &&
|
||||
(lock_type == TL_READ || lock_type == TL_READ_NO_INSERT) &&
|
||||
(thd->lex->sql_command == SQLCOM_INSERT_SELECT ||
|
||||
thd->lex->sql_command == SQLCOM_UPDATE)) {
|
||||
ulint isolation_level;
|
||||
|
||||
/* In case we have innobase_locks_unsafe_for_binlog
|
||||
option set and isolation level of the transaction
|
||||
isolation_level = trx->isolation_level;
|
||||
|
||||
if ((srv_locks_unsafe_for_binlog
|
||||
|| isolation_level == TRX_ISO_READ_COMMITTED)
|
||||
&& isolation_level != TRX_ISO_SERIALIZABLE
|
||||
&& (lock_type == TL_READ || lock_type == TL_READ_NO_INSERT)
|
||||
&& (thd->lex->sql_command == SQLCOM_INSERT_SELECT
|
||||
|| thd->lex->sql_command == SQLCOM_UPDATE
|
||||
|| thd->lex->sql_command == SQLCOM_CREATE_TABLE)) {
|
||||
|
||||
/* If we either have innobase_locks_unsafe_for_binlog
|
||||
option set or this session is using READ COMMITTED
|
||||
isolation level and isolation level of the transaction
|
||||
is not set to serializable and MySQL is doing
|
||||
INSERT INTO...SELECT or UPDATE ... = (SELECT ...)
|
||||
without FOR UPDATE or IN SHARE MODE in select, then
|
||||
we use consistent read for select. */
|
||||
INSERT INTO...SELECT or UPDATE ... = (SELECT ...) or
|
||||
CREATE ... SELECT... without FOR UPDATE or
|
||||
IN SHARE MODE in select, then we use consistent
|
||||
read for select. */
|
||||
|
||||
prebuilt->select_lock_type = LOCK_NONE;
|
||||
prebuilt->stored_select_lock_type = LOCK_NONE;
|
||||
@ -6854,25 +6752,26 @@ ha_innobase::store_lock(
|
||||
}
|
||||
|
||||
/* If we are not doing a LOCK TABLE, DISCARD/IMPORT
|
||||
TABLESPACE or TRUNCATE TABLE then allow multiple
|
||||
TABLESPACE or TRUNCATE TABLE then allow multiple
|
||||
writers. Note that ALTER TABLE uses a TL_WRITE_ALLOW_READ
|
||||
< TL_WRITE_CONCURRENT_INSERT.
|
||||
|
||||
We especially allow multiple writers if MySQL is at the
|
||||
start of a stored procedure call (SQLCOM_CALL)
|
||||
(MySQL does have thd->in_lock_tables TRUE there). */
|
||||
We especially allow multiple writers if MySQL is at the
|
||||
start of a stored procedure call (SQLCOM_CALL) or a
|
||||
stored function call (MySQL does have thd->in_lock_tables
|
||||
TRUE there). */
|
||||
|
||||
if ((lock_type >= TL_WRITE_CONCURRENT_INSERT
|
||||
&& lock_type <= TL_WRITE)
|
||||
&& !(thd->in_lock_tables
|
||||
&& thd->lex->sql_command == SQLCOM_LOCK_TABLES)
|
||||
&& !thd->tablespace_op
|
||||
&& thd->lex->sql_command != SQLCOM_TRUNCATE
|
||||
&& thd->lex->sql_command != SQLCOM_OPTIMIZE
|
||||
&& thd->lex->sql_command != SQLCOM_CREATE_TABLE) {
|
||||
if ((lock_type >= TL_WRITE_CONCURRENT_INSERT
|
||||
&& lock_type <= TL_WRITE)
|
||||
&& !(thd->in_lock_tables
|
||||
&& thd->lex->sql_command == SQLCOM_LOCK_TABLES)
|
||||
&& !thd->tablespace_op
|
||||
&& thd->lex->sql_command != SQLCOM_TRUNCATE
|
||||
&& thd->lex->sql_command != SQLCOM_OPTIMIZE
|
||||
&& thd->lex->sql_command != SQLCOM_CREATE_TABLE) {
|
||||
|
||||
lock_type = TL_WRITE_ALLOW_WRITE;
|
||||
}
|
||||
}
|
||||
|
||||
/* In queries of type INSERT INTO t1 SELECT ... FROM t2 ...
|
||||
MySQL would use the lock TL_READ_NO_INSERT on t2, and that
|
||||
|
@ -316,9 +316,6 @@ int innobase_rollback_by_xid(
|
||||
XID *xid); /* in : X/Open XA Transaction Identification */
|
||||
|
||||
|
||||
int innobase_repl_report_sent_binlog(THD *thd, char *log_file_name,
|
||||
my_off_t end_offset);
|
||||
|
||||
/***********************************************************************
|
||||
Create a consistent view for a cursor based on current transaction
|
||||
which is created if the corresponding MySQL thread still lacks one.
|
||||
|
@ -384,14 +384,14 @@ Thd_ndb::get_open_table(THD *thd, const void *key)
|
||||
thd_ndb_share->key= key;
|
||||
thd_ndb_share->stat.last_count= count;
|
||||
thd_ndb_share->stat.no_uncommitted_rows_count= 0;
|
||||
thd_ndb_share->stat.records == ~(ha_rows)0;
|
||||
thd_ndb_share->stat.records= ~(ha_rows)0;
|
||||
my_hash_insert(&open_tables, (byte *)thd_ndb_share);
|
||||
}
|
||||
else if (thd_ndb_share->stat.last_count != count)
|
||||
{
|
||||
thd_ndb_share->stat.last_count= count;
|
||||
thd_ndb_share->stat.no_uncommitted_rows_count= 0;
|
||||
thd_ndb_share->stat.records == ~(ha_rows)0;
|
||||
thd_ndb_share->stat.records= ~(ha_rows)0;
|
||||
}
|
||||
DBUG_PRINT("exit", ("thd_ndb_share: 0x%x key: 0x%x", thd_ndb_share, key));
|
||||
DBUG_RETURN(thd_ndb_share);
|
||||
@ -4764,7 +4764,10 @@ int ha_ndbcluster::create(const char *name,
|
||||
DBUG_RETURN(my_errno);
|
||||
}
|
||||
|
||||
int ha_ndbcluster::create_handler_files(const char *file, HA_CREATE_INFO *info)
|
||||
int ha_ndbcluster::create_handler_files(const char *file,
|
||||
const char *old_name,
|
||||
int action_flag,
|
||||
HA_CREATE_INFO *info)
|
||||
{
|
||||
char path[FN_REFLEN];
|
||||
const char *name;
|
||||
@ -4776,6 +4779,10 @@ int ha_ndbcluster::create_handler_files(const char *file, HA_CREATE_INFO *info)
|
||||
|
||||
DBUG_ENTER("create_handler_files");
|
||||
|
||||
if (action_flag != CHF_INDEX_FLAG)
|
||||
{
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
DBUG_PRINT("enter", ("file: %s", file));
|
||||
if (!(ndb= get_ndb()))
|
||||
DBUG_RETURN(HA_ERR_NO_CONNECTION);
|
||||
|
@ -626,7 +626,8 @@ class ha_ndbcluster: public handler
|
||||
int rename_table(const char *from, const char *to);
|
||||
int delete_table(const char *name);
|
||||
int create(const char *name, TABLE *form, HA_CREATE_INFO *info);
|
||||
int create_handler_files(const char *file, HA_CREATE_INFO *info);
|
||||
int create_handler_files(const char *file, const char *old_name,
|
||||
int action_flag, HA_CREATE_INFO *info);
|
||||
int get_default_no_partitions(ulonglong max_rows);
|
||||
bool get_no_parts(const char *name, uint *no_parts);
|
||||
void set_auto_partitions(partition_info *part_info);
|
||||
|
@ -3095,6 +3095,9 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
Thd_ndb *thd_ndb=0;
|
||||
int ndb_update_binlog_index= 1;
|
||||
injector *inj= injector::instance();
|
||||
#ifdef RUN_NDB_BINLOG_TIMER
|
||||
Timer main_timer;
|
||||
#endif
|
||||
|
||||
pthread_mutex_lock(&injector_mutex);
|
||||
/*
|
||||
@ -3233,9 +3236,6 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
thd->db= db;
|
||||
}
|
||||
|
||||
#ifdef RUN_NDB_BINLOG_TIMER
|
||||
Timer main_timer;
|
||||
#endif
|
||||
for ( ; !((abort_loop || do_ndbcluster_binlog_close_connection) &&
|
||||
ndb_latest_handled_binlog_epoch >= g_latest_trans_gci); )
|
||||
{
|
||||
@ -3316,15 +3316,16 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
if (res > 0)
|
||||
{
|
||||
DBUG_PRINT("info", ("pollEvents res: %d", res));
|
||||
#ifdef RUN_NDB_BINLOG_TIMER
|
||||
Timer gci_timer, write_timer;
|
||||
int event_count= 0;
|
||||
#endif
|
||||
thd->proc_info= "Processing events";
|
||||
NdbEventOperation *pOp= i_ndb->nextEvent();
|
||||
Binlog_index_row row;
|
||||
while (pOp != NULL)
|
||||
{
|
||||
#ifdef RUN_NDB_BINLOG_TIMER
|
||||
Timer gci_timer, write_timer;
|
||||
int event_count= 0;
|
||||
gci_timer.start();
|
||||
#endif
|
||||
gci= pOp->getGCI();
|
||||
DBUG_PRINT("info", ("Handling gci: %d", (unsigned)gci));
|
||||
// sometimes get TE_ALTER with invalid table
|
||||
@ -3503,6 +3504,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
DBUG_PRINT("info", ("COMMIT gci: %lld", gci));
|
||||
if (ndb_update_binlog_index)
|
||||
ndb_add_binlog_index(thd, &row);
|
||||
ndb_latest_applied_binlog_epoch= gci;
|
||||
}
|
||||
ndb_latest_handled_binlog_epoch= gci;
|
||||
#ifdef RUN_NDB_BINLOG_TIMER
|
||||
|
@ -403,88 +403,6 @@ int ha_partition::ha_initialise()
|
||||
/****************************************************************************
|
||||
MODULE meta data changes
|
||||
****************************************************************************/
|
||||
/*
|
||||
Create partition names
|
||||
|
||||
SYNOPSIS
|
||||
create_partition_name()
|
||||
out:out Created partition name string
|
||||
in1 First part
|
||||
in2 Second part
|
||||
name_variant Normal, temporary or renamed partition name
|
||||
|
||||
RETURN VALUE
|
||||
NONE
|
||||
|
||||
DESCRIPTION
|
||||
This method is used to calculate the partition name, service routine to
|
||||
the del_ren_cre_table method.
|
||||
*/
|
||||
|
||||
#define NORMAL_PART_NAME 0
|
||||
#define TEMP_PART_NAME 1
|
||||
#define RENAMED_PART_NAME 2
|
||||
static void create_partition_name(char *out, const char *in1,
|
||||
const char *in2, uint name_variant,
|
||||
bool translate)
|
||||
{
|
||||
char transl_part_name[FN_REFLEN];
|
||||
const char *transl_part;
|
||||
|
||||
if (translate)
|
||||
{
|
||||
tablename_to_filename(in2, transl_part_name, FN_REFLEN);
|
||||
transl_part= transl_part_name;
|
||||
}
|
||||
else
|
||||
transl_part= in2;
|
||||
if (name_variant == NORMAL_PART_NAME)
|
||||
strxmov(out, in1, "#P#", transl_part, NullS);
|
||||
else if (name_variant == TEMP_PART_NAME)
|
||||
strxmov(out, in1, "#P#", transl_part, "#TMP#", NullS);
|
||||
else if (name_variant == RENAMED_PART_NAME)
|
||||
strxmov(out, in1, "#P#", transl_part, "#REN#", NullS);
|
||||
}
|
||||
|
||||
/*
|
||||
Create subpartition name
|
||||
|
||||
SYNOPSIS
|
||||
create_subpartition_name()
|
||||
out:out Created partition name string
|
||||
in1 First part
|
||||
in2 Second part
|
||||
in3 Third part
|
||||
name_variant Normal, temporary or renamed partition name
|
||||
|
||||
RETURN VALUE
|
||||
NONE
|
||||
|
||||
DESCRIPTION
|
||||
This method is used to calculate the subpartition name, service routine to
|
||||
the del_ren_cre_table method.
|
||||
*/
|
||||
|
||||
static void create_subpartition_name(char *out, const char *in1,
|
||||
const char *in2, const char *in3,
|
||||
uint name_variant)
|
||||
{
|
||||
char transl_part_name[FN_REFLEN], transl_subpart_name[FN_REFLEN];
|
||||
|
||||
tablename_to_filename(in2, transl_part_name, FN_REFLEN);
|
||||
tablename_to_filename(in3, transl_subpart_name, FN_REFLEN);
|
||||
if (name_variant == NORMAL_PART_NAME)
|
||||
strxmov(out, in1, "#P#", transl_part_name,
|
||||
"#SP#", transl_subpart_name, NullS);
|
||||
else if (name_variant == TEMP_PART_NAME)
|
||||
strxmov(out, in1, "#P#", transl_part_name,
|
||||
"#SP#", transl_subpart_name, "#TMP#", NullS);
|
||||
else if (name_variant == RENAMED_PART_NAME)
|
||||
strxmov(out, in1, "#P#", transl_part_name,
|
||||
"#SP#", transl_subpart_name, "#REN#", NullS);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Delete a table
|
||||
|
||||
@ -576,7 +494,9 @@ int ha_partition::rename_table(const char *from, const char *to)
|
||||
and types of engines in the partitions.
|
||||
*/
|
||||
|
||||
int ha_partition::create_handler_files(const char *name,
|
||||
int ha_partition::create_handler_files(const char *path,
|
||||
const char *old_path,
|
||||
int action_flag,
|
||||
HA_CREATE_INFO *create_info)
|
||||
{
|
||||
DBUG_ENTER("ha_partition::create_handler_files()");
|
||||
@ -585,10 +505,29 @@ int ha_partition::create_handler_files(const char *name,
|
||||
We need to update total number of parts since we might write the handler
|
||||
file as part of a partition management command
|
||||
*/
|
||||
if (create_handler_file(name))
|
||||
if (action_flag == CHF_DELETE_FLAG ||
|
||||
action_flag == CHF_RENAME_FLAG)
|
||||
{
|
||||
my_error(ER_CANT_CREATE_HANDLER_FILE, MYF(0));
|
||||
DBUG_RETURN(1);
|
||||
char name[FN_REFLEN];
|
||||
char old_name[FN_REFLEN];
|
||||
|
||||
strxmov(name, path, ha_par_ext, NullS);
|
||||
strxmov(old_name, old_path, ha_par_ext, NullS);
|
||||
if ((action_flag == CHF_DELETE_FLAG &&
|
||||
my_delete(name, MYF(MY_WME))) ||
|
||||
(action_flag == CHF_RENAME_FLAG &&
|
||||
my_rename(old_name, name, MYF(MY_WME))))
|
||||
{
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
}
|
||||
else if (action_flag == CHF_CREATE_FLAG)
|
||||
{
|
||||
if (create_handler_file(path))
|
||||
{
|
||||
my_error(ER_CANT_CREATE_HANDLER_FILE, MYF(0));
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
}
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
@ -654,45 +593,26 @@ int ha_partition::create(const char *name, TABLE *table_arg,
|
||||
int ha_partition::drop_partitions(const char *path)
|
||||
{
|
||||
List_iterator<partition_element> part_it(m_part_info->partitions);
|
||||
List_iterator<partition_element> temp_it(m_part_info->temp_partitions);
|
||||
char part_name_buff[FN_REFLEN];
|
||||
uint no_parts= m_part_info->partitions.elements;
|
||||
uint part_count= 0;
|
||||
uint no_subparts= m_part_info->no_subparts;
|
||||
uint i= 0;
|
||||
uint name_variant;
|
||||
int error= 1;
|
||||
bool reorged_parts= (m_reorged_parts > 0);
|
||||
bool temp_partitions= (m_part_info->temp_partitions.elements > 0);
|
||||
int ret_error;
|
||||
int error= 0;
|
||||
DBUG_ENTER("ha_partition::drop_partitions");
|
||||
|
||||
if (temp_partitions)
|
||||
no_parts= m_part_info->temp_partitions.elements;
|
||||
do
|
||||
{
|
||||
partition_element *part_elem;
|
||||
if (temp_partitions)
|
||||
{
|
||||
/*
|
||||
We need to remove the reorganised partitions that were put in the
|
||||
temp_partitions-list.
|
||||
*/
|
||||
part_elem= temp_it++;
|
||||
DBUG_ASSERT(part_elem->part_state == PART_TO_BE_DROPPED);
|
||||
}
|
||||
else
|
||||
part_elem= part_it++;
|
||||
if (part_elem->part_state == PART_TO_BE_DROPPED ||
|
||||
part_elem->part_state == PART_IS_CHANGED)
|
||||
partition_element *part_elem= part_it++;
|
||||
if (part_elem->part_state == PART_TO_BE_DROPPED)
|
||||
{
|
||||
handler *file;
|
||||
/*
|
||||
This part is to be dropped, meaning the part or all its subparts.
|
||||
*/
|
||||
name_variant= NORMAL_PART_NAME;
|
||||
if (part_elem->part_state == PART_IS_CHANGED ||
|
||||
(part_elem->part_state == PART_TO_BE_DROPPED && temp_partitions))
|
||||
name_variant= RENAMED_PART_NAME;
|
||||
if (m_is_sub_partitioned)
|
||||
{
|
||||
List_iterator<partition_element> sub_it(part_elem->subpartitions);
|
||||
@ -704,12 +624,10 @@ int ha_partition::drop_partitions(const char *path)
|
||||
create_subpartition_name(part_name_buff, path,
|
||||
part_elem->partition_name,
|
||||
sub_elem->partition_name, name_variant);
|
||||
if (reorged_parts)
|
||||
file= m_reorged_file[part_count++];
|
||||
else
|
||||
file= m_file[part];
|
||||
file= m_file[part];
|
||||
DBUG_PRINT("info", ("Drop subpartition %s", part_name_buff));
|
||||
error= file->delete_table((const char *) part_name_buff);
|
||||
if ((ret_error= file->delete_table((const char *) part_name_buff)))
|
||||
error= ret_error;
|
||||
} while (++j < no_subparts);
|
||||
}
|
||||
else
|
||||
@ -717,12 +635,10 @@ int ha_partition::drop_partitions(const char *path)
|
||||
create_partition_name(part_name_buff, path,
|
||||
part_elem->partition_name, name_variant,
|
||||
TRUE);
|
||||
if (reorged_parts)
|
||||
file= m_reorged_file[part_count++];
|
||||
else
|
||||
file= m_file[i];
|
||||
file= m_file[i];
|
||||
DBUG_PRINT("info", ("Drop partition %s", part_name_buff));
|
||||
error= file->delete_table((const char *) part_name_buff);
|
||||
if ((ret_error= file->delete_table((const char *) part_name_buff)))
|
||||
error= ret_error;
|
||||
}
|
||||
if (part_elem->part_state == PART_IS_CHANGED)
|
||||
part_elem->part_state= PART_NORMAL;
|
||||
@ -764,7 +680,8 @@ int ha_partition::rename_partitions(const char *path)
|
||||
uint no_subparts= m_part_info->no_subparts;
|
||||
uint i= 0;
|
||||
uint j= 0;
|
||||
int error= 1;
|
||||
int error= 0;
|
||||
int ret_error;
|
||||
uint temp_partitions= m_part_info->temp_partitions.elements;
|
||||
handler *file;
|
||||
partition_element *part_elem, *sub_elem;
|
||||
@ -772,6 +689,14 @@ int ha_partition::rename_partitions(const char *path)
|
||||
|
||||
if (temp_partitions)
|
||||
{
|
||||
/*
|
||||
These are the reorganised partitions that have already been copied.
|
||||
We delete the partitions and log the delete by inactivating the
|
||||
delete log entry in the table log. We only need to synchronise
|
||||
these writes before moving to the next loop since there is no
|
||||
interaction among reorganised partitions, they cannot have the
|
||||
same name.
|
||||
*/
|
||||
do
|
||||
{
|
||||
part_elem= temp_it++;
|
||||
@ -782,39 +707,59 @@ int ha_partition::rename_partitions(const char *path)
|
||||
{
|
||||
sub_elem= sub_it++;
|
||||
file= m_reorged_file[part_count++];
|
||||
create_subpartition_name(part_name_buff, path,
|
||||
part_elem->partition_name,
|
||||
sub_elem->partition_name,
|
||||
RENAMED_PART_NAME);
|
||||
create_subpartition_name(norm_name_buff, path,
|
||||
part_elem->partition_name,
|
||||
sub_elem->partition_name,
|
||||
NORMAL_PART_NAME);
|
||||
DBUG_PRINT("info", ("Rename subpartition from %s to %s",
|
||||
norm_name_buff, part_name_buff));
|
||||
error= file->rename_table((const char *) norm_name_buff,
|
||||
(const char *) part_name_buff);
|
||||
DBUG_PRINT("info", ("Delete subpartition %s", norm_name_buff));
|
||||
if ((ret_error= file->delete_table((const char *) norm_name_buff)))
|
||||
error= ret_error;
|
||||
else if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos))
|
||||
error= 1;
|
||||
else
|
||||
sub_elem->log_entry= NULL; /* Indicate success */
|
||||
} while (++j < no_subparts);
|
||||
}
|
||||
else
|
||||
{
|
||||
file= m_reorged_file[part_count++];
|
||||
create_partition_name(part_name_buff, path,
|
||||
part_elem->partition_name, RENAMED_PART_NAME,
|
||||
TRUE);
|
||||
create_partition_name(norm_name_buff, path,
|
||||
part_elem->partition_name, NORMAL_PART_NAME,
|
||||
TRUE);
|
||||
DBUG_PRINT("info", ("Rename partition from %s to %s",
|
||||
norm_name_buff, part_name_buff));
|
||||
error= file->rename_table((const char *) norm_name_buff,
|
||||
(const char *) part_name_buff);
|
||||
DBUG_PRINT("info", ("Delete partition %s", norm_name_buff));
|
||||
if ((ret_error= file->delete_table((const char *) norm_name_buff)))
|
||||
error= ret_error;
|
||||
else if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos))
|
||||
error= 1;
|
||||
else
|
||||
part_elem->log_entry= NULL; /* Indicate success */
|
||||
}
|
||||
} while (++i < temp_partitions);
|
||||
VOID(sync_ddl_log());
|
||||
}
|
||||
i= 0;
|
||||
do
|
||||
{
|
||||
/*
|
||||
When state is PART_IS_CHANGED it means that we have created a new
|
||||
TEMP partition that is to be renamed to normal partition name and
|
||||
we are to delete the old partition with currently the normal name.
|
||||
|
||||
We perform this operation by
|
||||
1) Delete old partition with normal partition name
|
||||
2) Signal this in table log entry
|
||||
3) Synch table log to ensure we have consistency in crashes
|
||||
4) Rename temporary partition name to normal partition name
|
||||
5) Signal this to table log entry
|
||||
It is not necessary to synch the last state since a new rename
|
||||
should not corrupt things if there was no temporary partition.
|
||||
|
||||
The only other parts we need to cater for are new parts that
|
||||
replace reorganised parts. The reorganised parts were deleted
|
||||
by the code above that goes through the temp_partitions list.
|
||||
Thus the synch above makes it safe to simply perform step 4 and 5
|
||||
for those entries.
|
||||
*/
|
||||
part_elem= part_it++;
|
||||
if (part_elem->part_state == PART_IS_CHANGED ||
|
||||
(part_elem->part_state == PART_IS_ADDED && temp_partitions))
|
||||
@ -836,14 +781,12 @@ int ha_partition::rename_partitions(const char *path)
|
||||
if (part_elem->part_state == PART_IS_CHANGED)
|
||||
{
|
||||
file= m_reorged_file[part_count++];
|
||||
create_subpartition_name(part_name_buff, path,
|
||||
part_elem->partition_name,
|
||||
sub_elem->partition_name,
|
||||
RENAMED_PART_NAME);
|
||||
DBUG_PRINT("info", ("Rename subpartition from %s to %s",
|
||||
norm_name_buff, part_name_buff));
|
||||
error= file->rename_table((const char *) norm_name_buff,
|
||||
(const char *) part_name_buff);
|
||||
DBUG_PRINT("info", ("Delete subpartition %s", norm_name_buff));
|
||||
if ((ret_error= file->delete_table((const char *) norm_name_buff)))
|
||||
error= ret_error;
|
||||
else if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos))
|
||||
error= 1;
|
||||
VOID(sync_ddl_log());
|
||||
}
|
||||
file= m_new_file[part];
|
||||
create_subpartition_name(part_name_buff, path,
|
||||
@ -852,8 +795,13 @@ int ha_partition::rename_partitions(const char *path)
|
||||
TEMP_PART_NAME);
|
||||
DBUG_PRINT("info", ("Rename subpartition from %s to %s",
|
||||
part_name_buff, norm_name_buff));
|
||||
error= file->rename_table((const char *) part_name_buff,
|
||||
(const char *) norm_name_buff);
|
||||
if ((ret_error= file->rename_table((const char *) part_name_buff,
|
||||
(const char *) norm_name_buff)))
|
||||
error= ret_error;
|
||||
else if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos))
|
||||
error= 1;
|
||||
else
|
||||
sub_elem->log_entry= NULL;
|
||||
} while (++j < no_subparts);
|
||||
}
|
||||
else
|
||||
@ -864,13 +812,12 @@ int ha_partition::rename_partitions(const char *path)
|
||||
if (part_elem->part_state == PART_IS_CHANGED)
|
||||
{
|
||||
file= m_reorged_file[part_count++];
|
||||
create_partition_name(part_name_buff, path,
|
||||
part_elem->partition_name, RENAMED_PART_NAME,
|
||||
TRUE);
|
||||
DBUG_PRINT("info", ("Rename partition from %s to %s",
|
||||
norm_name_buff, part_name_buff));
|
||||
error= file->rename_table((const char *) norm_name_buff,
|
||||
(const char *) part_name_buff);
|
||||
DBUG_PRINT("info", ("Delete partition %s", norm_name_buff));
|
||||
if ((ret_error= file->delete_table((const char *) norm_name_buff)))
|
||||
error= ret_error;
|
||||
else if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos))
|
||||
error= 1;
|
||||
VOID(sync_ddl_log());
|
||||
}
|
||||
file= m_new_file[i];
|
||||
create_partition_name(part_name_buff, path,
|
||||
@ -878,11 +825,17 @@ int ha_partition::rename_partitions(const char *path)
|
||||
TRUE);
|
||||
DBUG_PRINT("info", ("Rename partition from %s to %s",
|
||||
part_name_buff, norm_name_buff));
|
||||
error= file->rename_table((const char *) part_name_buff,
|
||||
(const char *) norm_name_buff);
|
||||
if ((ret_error= file->rename_table((const char *) part_name_buff,
|
||||
(const char *) norm_name_buff)))
|
||||
error= ret_error;
|
||||
else if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos))
|
||||
error= 1;
|
||||
else
|
||||
part_elem->log_entry= NULL;
|
||||
}
|
||||
}
|
||||
} while (++i < no_parts);
|
||||
VOID(sync_ddl_log());
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
@ -1204,7 +1157,6 @@ int ha_partition::prepare_new_partition(TABLE *table,
|
||||
error:
|
||||
if (create_flag)
|
||||
VOID(file->delete_table(part_name));
|
||||
print_error(error, MYF(0));
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
@ -1331,7 +1283,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
|
||||
(m_reorged_parts + 1))))
|
||||
{
|
||||
mem_alloc_error(sizeof(partition_element*)*(m_reorged_parts+1));
|
||||
DBUG_RETURN(TRUE);
|
||||
DBUG_RETURN(ER_OUTOFMEMORY);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1363,7 +1315,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
|
||||
(2*(no_remain_partitions + 1)))))
|
||||
{
|
||||
mem_alloc_error(sizeof(handler*)*2*(no_remain_partitions+1));
|
||||
DBUG_RETURN(TRUE);
|
||||
DBUG_RETURN(ER_OUTOFMEMORY);
|
||||
}
|
||||
m_added_file= &new_file_array[no_remain_partitions + 1];
|
||||
|
||||
@ -1435,7 +1387,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
|
||||
part_elem->engine_type)))
|
||||
{
|
||||
mem_alloc_error(sizeof(handler));
|
||||
DBUG_RETURN(TRUE);
|
||||
DBUG_RETURN(ER_OUTOFMEMORY);
|
||||
}
|
||||
} while (++j < no_subparts);
|
||||
}
|
||||
@ -1483,7 +1435,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
|
||||
(const char *)part_name_buff)))
|
||||
{
|
||||
cleanup_new_partition(part_count);
|
||||
DBUG_RETURN(TRUE);
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
m_added_file[part_count++]= new_file_array[part];
|
||||
} while (++j < no_subparts);
|
||||
@ -1499,7 +1451,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
|
||||
(const char *)part_name_buff)))
|
||||
{
|
||||
cleanup_new_partition(part_count);
|
||||
DBUG_RETURN(TRUE);
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
m_added_file[part_count++]= new_file_array[i];
|
||||
}
|
||||
@ -1605,8 +1557,7 @@ int ha_partition::copy_partitions(ulonglong *copied, ulonglong *deleted)
|
||||
}
|
||||
DBUG_RETURN(FALSE);
|
||||
error:
|
||||
print_error(result, MYF(0));
|
||||
DBUG_RETURN(TRUE);
|
||||
DBUG_RETURN(result);
|
||||
}
|
||||
|
||||
|
||||
@ -1873,8 +1824,8 @@ bool ha_partition::create_handler_file(const char *name)
|
||||
{
|
||||
part_elem= part_it++;
|
||||
if (part_elem->part_state != PART_NORMAL &&
|
||||
part_elem->part_state != PART_IS_ADDED &&
|
||||
part_elem->part_state != PART_IS_CHANGED)
|
||||
part_elem->part_state != PART_TO_BE_ADDED &&
|
||||
part_elem->part_state != PART_CHANGED)
|
||||
continue;
|
||||
tablename_to_filename(part_elem->partition_name, part_name,
|
||||
FN_REFLEN);
|
||||
@ -1925,8 +1876,8 @@ bool ha_partition::create_handler_file(const char *name)
|
||||
{
|
||||
part_elem= part_it++;
|
||||
if (part_elem->part_state != PART_NORMAL &&
|
||||
part_elem->part_state != PART_IS_ADDED &&
|
||||
part_elem->part_state != PART_IS_CHANGED)
|
||||
part_elem->part_state != PART_TO_BE_ADDED &&
|
||||
part_elem->part_state != PART_CHANGED)
|
||||
continue;
|
||||
if (!m_is_sub_partitioned)
|
||||
{
|
||||
|
@ -179,7 +179,8 @@ public:
|
||||
virtual int rename_table(const char *from, const char *to);
|
||||
virtual int create(const char *name, TABLE *form,
|
||||
HA_CREATE_INFO *create_info);
|
||||
virtual int create_handler_files(const char *name,
|
||||
virtual int create_handler_files(const char *name,
|
||||
const char *old_name, int action_flag,
|
||||
HA_CREATE_INFO *create_info);
|
||||
virtual void update_create_info(HA_CREATE_INFO *create_info);
|
||||
virtual char *update_table_comment(const char *comment);
|
||||
|
@ -632,6 +632,7 @@ typedef struct {
|
||||
|
||||
#define UNDEF_NODEGROUP 65535
|
||||
class Item;
|
||||
struct st_table_log_memory_entry;
|
||||
|
||||
class partition_info;
|
||||
|
||||
@ -639,7 +640,6 @@ struct st_partition_iter;
|
||||
#define NOT_A_PARTITION_ID ((uint32)-1)
|
||||
|
||||
|
||||
|
||||
typedef struct st_ha_create_information
|
||||
{
|
||||
CHARSET_INFO *table_charset, *default_table_charset;
|
||||
@ -1379,8 +1379,15 @@ public:
|
||||
virtual void drop_table(const char *name);
|
||||
|
||||
virtual int create(const char *name, TABLE *form, HA_CREATE_INFO *info)=0;
|
||||
virtual int create_handler_files(const char *name, HA_CREATE_INFO *info)
|
||||
{ return FALSE;}
|
||||
|
||||
#define CHF_CREATE_FLAG 0
|
||||
#define CHF_DELETE_FLAG 1
|
||||
#define CHF_RENAME_FLAG 2
|
||||
#define CHF_INDEX_FLAG 3
|
||||
|
||||
virtual int create_handler_files(const char *name, const char *old_name,
|
||||
int action_flag, HA_CREATE_INFO *info)
|
||||
{ return FALSE; }
|
||||
|
||||
virtual int change_partitions(HA_CREATE_INFO *create_info,
|
||||
const char *path,
|
||||
|
16
sql/item.cc
16
sql/item.cc
@ -644,22 +644,6 @@ Item *Item_num::safe_charset_converter(CHARSET_INFO *tocs)
|
||||
}
|
||||
|
||||
|
||||
Item *Item_static_int_func::safe_charset_converter(CHARSET_INFO *tocs)
|
||||
{
|
||||
Item_string *conv;
|
||||
char buf[64];
|
||||
String *s, tmp(buf, sizeof(buf), &my_charset_bin);
|
||||
s= val_str(&tmp);
|
||||
if ((conv= new Item_static_string_func(func_name, s->ptr(), s->length(),
|
||||
s->charset())))
|
||||
{
|
||||
conv->str_value.copy();
|
||||
conv->str_value.mark_as_const();
|
||||
}
|
||||
return conv;
|
||||
}
|
||||
|
||||
|
||||
Item *Item_static_float_func::safe_charset_converter(CHARSET_INFO *tocs)
|
||||
{
|
||||
Item_string *conv;
|
||||
|
12
sql/item.h
12
sql/item.h
@ -1431,18 +1431,6 @@ public:
|
||||
};
|
||||
|
||||
|
||||
class Item_static_int_func :public Item_int
|
||||
{
|
||||
const char *func_name;
|
||||
public:
|
||||
Item_static_int_func(const char *str_arg, longlong i, uint length)
|
||||
:Item_int(NullS, i, length), func_name(str_arg)
|
||||
{}
|
||||
Item *safe_charset_converter(CHARSET_INFO *tocs);
|
||||
void print(String *str) { str->append(func_name); }
|
||||
};
|
||||
|
||||
|
||||
class Item_uint :public Item_int
|
||||
{
|
||||
public:
|
||||
|
@ -71,14 +71,8 @@ Item *create_func_ceiling(Item* a)
|
||||
|
||||
Item *create_func_connection_id(void)
|
||||
{
|
||||
THD *thd=current_thd;
|
||||
thd->lex->safe_to_cache_query= 0;
|
||||
return new Item_static_int_func("connection_id()",
|
||||
(longlong)
|
||||
((thd->slave_thread) ?
|
||||
thd->variables.pseudo_thread_id :
|
||||
thd->thread_id),
|
||||
10);
|
||||
current_thd->lex->safe_to_cache_query= 0;
|
||||
return new Item_func_connection_id();
|
||||
}
|
||||
|
||||
Item *create_func_conv(Item* a, Item *b, Item *c)
|
||||
|
@ -561,6 +561,31 @@ String *Item_int_func::val_str(String *str)
|
||||
}
|
||||
|
||||
|
||||
void Item_func_connection_id::fix_length_and_dec()
|
||||
{
|
||||
Item_int_func::fix_length_and_dec();
|
||||
max_length= 10;
|
||||
}
|
||||
|
||||
|
||||
bool Item_func_connection_id::fix_fields(THD *thd, Item **ref)
|
||||
{
|
||||
if (Item_int_func::fix_fields(thd, ref))
|
||||
return TRUE;
|
||||
|
||||
/*
|
||||
To replicate CONNECTION_ID() properly we should use
|
||||
pseudo_thread_id on slave, which contains the value of thread_id
|
||||
on master.
|
||||
*/
|
||||
value= ((thd->slave_thread) ?
|
||||
thd->variables.pseudo_thread_id :
|
||||
thd->thread_id);
|
||||
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Check arguments here to determine result's type for a numeric
|
||||
function of two arguments.
|
||||
@ -2464,11 +2489,8 @@ longlong Item_func_bit_count::val_int()
|
||||
{
|
||||
DBUG_ASSERT(fixed == 1);
|
||||
ulonglong value= (ulonglong) args[0]->val_int();
|
||||
if (args[0]->null_value)
|
||||
{
|
||||
null_value=1; /* purecov: inspected */
|
||||
if ((null_value= args[0]->null_value))
|
||||
return 0; /* purecov: inspected */
|
||||
}
|
||||
return (longlong) my_count_bits(value);
|
||||
}
|
||||
|
||||
|
@ -279,6 +279,18 @@ public:
|
||||
};
|
||||
|
||||
|
||||
class Item_func_connection_id :public Item_int_func
|
||||
{
|
||||
longlong value;
|
||||
|
||||
public:
|
||||
const char *func_name() const { return "connection_id"; }
|
||||
void fix_length_and_dec();
|
||||
bool fix_fields(THD *thd, Item **ref);
|
||||
longlong val_int() { DBUG_ASSERT(fixed == 1); return value; }
|
||||
};
|
||||
|
||||
|
||||
class Item_func_signed :public Item_int_func
|
||||
{
|
||||
public:
|
||||
|
@ -80,6 +80,20 @@ String *Item_str_func::check_well_formed_result(String *str)
|
||||
}
|
||||
|
||||
|
||||
my_decimal *Item_str_func::val_decimal(my_decimal *decimal_value)
|
||||
{
|
||||
DBUG_ASSERT(fixed == 1);
|
||||
char buff[64];
|
||||
String *res, tmp(buff,sizeof(buff), &my_charset_bin);
|
||||
res= val_str(&tmp);
|
||||
if (!res)
|
||||
return 0;
|
||||
(void)str2my_decimal(E_DEC_FATAL_ERROR, (char*) res->ptr(),
|
||||
res->length(), res->charset(), decimal_value);
|
||||
return decimal_value;
|
||||
}
|
||||
|
||||
|
||||
double Item_str_func::val_real()
|
||||
{
|
||||
DBUG_ASSERT(fixed == 1);
|
||||
|
@ -33,6 +33,7 @@ public:
|
||||
Item_str_func(List<Item> &list) :Item_func(list) {decimals=NOT_FIXED_DEC; }
|
||||
longlong val_int();
|
||||
double val_real();
|
||||
my_decimal *val_decimal(my_decimal *);
|
||||
enum Item_result result_type () const { return STRING_RESULT; }
|
||||
void left_right_max_length();
|
||||
String *check_well_formed_result(String *str);
|
||||
|
@ -2255,8 +2255,8 @@ String *Item_char_typecast::val_str(String *str)
|
||||
// Convert character set if differ
|
||||
uint dummy_errors;
|
||||
if (!(res= args[0]->val_str(&tmp_value)) ||
|
||||
str->copy(res->ptr(), res->length(), res->charset(),
|
||||
cast_cs, &dummy_errors))
|
||||
str->copy(res->ptr(), res->length(), from_cs,
|
||||
cast_cs, &dummy_errors))
|
||||
{
|
||||
null_value= 1;
|
||||
return 0;
|
||||
@ -2311,21 +2311,40 @@ String *Item_char_typecast::val_str(String *str)
|
||||
void Item_char_typecast::fix_length_and_dec()
|
||||
{
|
||||
uint32 char_length;
|
||||
/*
|
||||
We always force character set conversion if cast_cs is a
|
||||
multi-byte character set. It garantees that the result of CAST is
|
||||
a well-formed string. For single-byte character sets we allow
|
||||
just to copy from the argument. A single-byte character sets
|
||||
string is always well-formed.
|
||||
/*
|
||||
We always force character set conversion if cast_cs
|
||||
is a multi-byte character set. It garantees that the
|
||||
result of CAST is a well-formed string.
|
||||
For single-byte character sets we allow just to copy
|
||||
from the argument. A single-byte character sets string
|
||||
is always well-formed.
|
||||
|
||||
There is a special trick to convert form a number to ucs2.
|
||||
As numbers have my_charset_bin as their character set,
|
||||
it wouldn't do conversion to ucs2 without an additional action.
|
||||
To force conversion, we should pretend to be non-binary.
|
||||
Let's choose from_cs this way:
|
||||
- If the argument in a number and cast_cs is ucs2 (i.e. mbminlen > 1),
|
||||
then from_cs is set to latin1, to perform latin1 -> ucs2 conversion.
|
||||
- If the argument is a number and cast_cs is ASCII-compatible
|
||||
(i.e. mbminlen == 1), then from_cs is set to cast_cs,
|
||||
which allows just to take over the args[0]->val_str() result
|
||||
and thus avoid unnecessary character set conversion.
|
||||
- If the argument is not a number, then from_cs is set to
|
||||
the argument's charset.
|
||||
*/
|
||||
charset_conversion= ((cast_cs->mbmaxlen > 1) ||
|
||||
!my_charset_same(args[0]->collation.collation,
|
||||
cast_cs) &&
|
||||
args[0]->collation.collation != &my_charset_bin &&
|
||||
cast_cs != &my_charset_bin);
|
||||
from_cs= (args[0]->result_type() == INT_RESULT ||
|
||||
args[0]->result_type() == DECIMAL_RESULT ||
|
||||
args[0]->result_type() == REAL_RESULT) ?
|
||||
(cast_cs->mbminlen == 1 ? cast_cs : &my_charset_latin1) :
|
||||
args[0]->collation.collation;
|
||||
charset_conversion= (cast_cs->mbmaxlen > 1) ||
|
||||
!my_charset_same(from_cs, cast_cs) &&
|
||||
from_cs != &my_charset_bin &&
|
||||
cast_cs != &my_charset_bin;
|
||||
collation.set(cast_cs, DERIVATION_IMPLICIT);
|
||||
char_length= (cast_length >= 0) ? cast_length :
|
||||
args[0]->max_length/args[0]->collation.collation->mbmaxlen;
|
||||
args[0]->max_length/from_cs->mbmaxlen;
|
||||
max_length= char_length * cast_cs->mbmaxlen;
|
||||
}
|
||||
|
||||
|
@ -708,7 +708,7 @@ public:
|
||||
class Item_char_typecast :public Item_typecast
|
||||
{
|
||||
int cast_length;
|
||||
CHARSET_INFO *cast_cs;
|
||||
CHARSET_INFO *cast_cs, *from_cs;
|
||||
bool charset_conversion;
|
||||
String tmp_value;
|
||||
public:
|
||||
|
191
sql/mysql_priv.h
191
sql/mysql_priv.h
@ -613,6 +613,100 @@ struct Query_cache_query_flags
|
||||
#define query_cache_invalidate_by_MyISAM_filename_ref NULL
|
||||
#endif /*HAVE_QUERY_CACHE*/
|
||||
|
||||
/*
|
||||
Error injector Macros to enable easy testing of recovery after failures
|
||||
in various error cases.
|
||||
*/
|
||||
#ifndef ERROR_INJECT_SUPPORT
|
||||
|
||||
#define ERROR_INJECT(x) 0
|
||||
#define ERROR_INJECT_ACTION(x,action) 0
|
||||
#define ERROR_INJECT_CRASH(x) 0
|
||||
#define ERROR_INJECT_VALUE(x) 0
|
||||
#define ERROR_INJECT_VALUE_ACTION(x,action) 0
|
||||
#define ERROR_INJECT_VALUE_CRASH(x) 0
|
||||
#define SET_ERROR_INJECT_VALUE(x)
|
||||
|
||||
#else
|
||||
|
||||
inline bool check_and_unset_keyword(const char *dbug_str)
|
||||
{
|
||||
const char *extra_str= "-d,";
|
||||
char total_str[200];
|
||||
if (_db_strict_keyword_ (dbug_str))
|
||||
{
|
||||
strxmov(total_str, extra_str, dbug_str, NullS);
|
||||
DBUG_SET(total_str);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
inline bool
|
||||
check_and_unset_inject_value(int value)
|
||||
{
|
||||
THD *thd= current_thd;
|
||||
if (thd->error_inject_value == (uint)value)
|
||||
{
|
||||
thd->error_inject_value= 0;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
ERROR INJECT MODULE:
|
||||
--------------------
|
||||
These macros are used to insert macros from the application code.
|
||||
The event that activates those error injections can be activated
|
||||
from SQL by using:
|
||||
SET SESSION dbug=+d,code;
|
||||
|
||||
After the error has been injected, the macros will automatically
|
||||
remove the debug code, thus similar to using:
|
||||
SET SESSION dbug=-d,code
|
||||
from SQL.
|
||||
|
||||
ERROR_INJECT_CRASH will inject a crash of the MySQL Server if code
|
||||
is set when macro is called. ERROR_INJECT_CRASH can be used in
|
||||
if-statements, it will always return FALSE unless of course it
|
||||
crashes in which case it doesn't return at all.
|
||||
|
||||
ERROR_INJECT_ACTION will inject the action specified in the action
|
||||
parameter of the macro, before performing the action the code will
|
||||
be removed such that no more events occur. ERROR_INJECT_ACTION
|
||||
can also be used in if-statements and always returns FALSE.
|
||||
ERROR_INJECT can be used in a normal if-statement, where the action
|
||||
part is performed in the if-block. The macro returns TRUE if the
|
||||
error was activated and otherwise returns FALSE. If activated the
|
||||
code is removed.
|
||||
|
||||
Sometimes it is necessary to perform error inject actions as a serie
|
||||
of events. In this case one can use one variable on the THD object.
|
||||
Thus one sets this value by using e.g. SET_ERROR_INJECT_VALUE(100).
|
||||
Then one can later test for it by using ERROR_INJECT_CRASH_VALUE,
|
||||
ERROR_INJECT_ACTION_VALUE and ERROR_INJECT_VALUE. This have the same
|
||||
behaviour as the above described macros except that they use the
|
||||
error inject value instead of a code used by DBUG macros.
|
||||
*/
|
||||
#define SET_ERROR_INJECT_VALUE(x) \
|
||||
current_thd->error_inject_value= (x)
|
||||
#define ERROR_INJECT_CRASH(code) \
|
||||
DBUG_EVALUATE_IF(code, (abort(), 0), 0)
|
||||
#define ERROR_INJECT_ACTION(code, action) \
|
||||
(check_and_unset_keyword(code) ? ((action), 0) : 0)
|
||||
#define ERROR_INJECT(code) \
|
||||
check_and_unset_keyword(code)
|
||||
#define ERROR_INJECT_VALUE(value) \
|
||||
check_and_unset_inject_value(value)
|
||||
#define ERROR_INJECT_VALUE_ACTION(value,action) \
|
||||
(check_and_unset_inject_value(value) ? (action) : 0)
|
||||
#define ERROR_INJECT_VALUE_CRASH(value) \
|
||||
ERROR_INJECT_VALUE_ACTION(value, (abort(), 0))
|
||||
|
||||
#endif
|
||||
|
||||
uint build_table_path(char *buff, size_t bufflen, const char *db,
|
||||
const char *table, const char *ext);
|
||||
void write_bin_log(THD *thd, bool clear_error,
|
||||
@ -1090,6 +1184,16 @@ uint prep_alter_part_table(THD *thd, TABLE *table, ALTER_INFO *alter_info,
|
||||
bool remove_table_from_cache(THD *thd, const char *db, const char *table,
|
||||
uint flags);
|
||||
|
||||
#define NORMAL_PART_NAME 0
|
||||
#define TEMP_PART_NAME 1
|
||||
#define RENAMED_PART_NAME 2
|
||||
void create_partition_name(char *out, const char *in1,
|
||||
const char *in2, uint name_variant,
|
||||
bool translate);
|
||||
void create_subpartition_name(char *out, const char *in1,
|
||||
const char *in2, const char *in3,
|
||||
uint name_variant);
|
||||
|
||||
typedef struct st_lock_param_type
|
||||
{
|
||||
ulonglong copied;
|
||||
@ -1109,14 +1213,94 @@ typedef struct st_lock_param_type
|
||||
uint key_count;
|
||||
uint db_options;
|
||||
uint pack_frm_len;
|
||||
partition_info *part_info;
|
||||
} ALTER_PARTITION_PARAM_TYPE;
|
||||
|
||||
void mem_alloc_error(size_t size);
|
||||
#define WFRM_INITIAL_WRITE 1
|
||||
#define WFRM_CREATE_HANDLER_FILES 2
|
||||
|
||||
enum ddl_log_entry_code
|
||||
{
|
||||
/*
|
||||
DDL_LOG_EXECUTE_CODE:
|
||||
This is a code that indicates that this is a log entry to
|
||||
be executed, from this entry a linked list of log entries
|
||||
can be found and executed.
|
||||
DDL_LOG_ENTRY_CODE:
|
||||
An entry to be executed in a linked list from an execute log
|
||||
entry.
|
||||
DDL_IGNORE_LOG_ENTRY_CODE:
|
||||
An entry that is to be ignored
|
||||
*/
|
||||
DDL_LOG_EXECUTE_CODE = 'e',
|
||||
DDL_LOG_ENTRY_CODE = 'l',
|
||||
DDL_IGNORE_LOG_ENTRY_CODE = 'i'
|
||||
};
|
||||
|
||||
enum ddl_log_action_code
|
||||
{
|
||||
/*
|
||||
The type of action that a DDL_LOG_ENTRY_CODE entry is to
|
||||
perform.
|
||||
DDL_LOG_DELETE_ACTION:
|
||||
Delete an entity
|
||||
DDL_LOG_RENAME_ACTION:
|
||||
Rename an entity
|
||||
DDL_LOG_REPLACE_ACTION:
|
||||
Rename an entity after removing the previous entry with the
|
||||
new name, that is replace this entry.
|
||||
*/
|
||||
DDL_LOG_DELETE_ACTION = 'd',
|
||||
DDL_LOG_RENAME_ACTION = 'r',
|
||||
DDL_LOG_REPLACE_ACTION = 's'
|
||||
};
|
||||
|
||||
|
||||
typedef struct st_ddl_log_entry
|
||||
{
|
||||
const char *name;
|
||||
const char *from_name;
|
||||
const char *handler_name;
|
||||
uint next_entry;
|
||||
uint entry_pos;
|
||||
enum ddl_log_entry_code entry_type;
|
||||
enum ddl_log_action_code action_type;
|
||||
/*
|
||||
Most actions have only one phase. REPLACE does however have two
|
||||
phases. The first phase removes the file with the new name if
|
||||
there was one there before and the second phase renames the
|
||||
old name to the new name.
|
||||
*/
|
||||
char phase;
|
||||
} DDL_LOG_ENTRY;
|
||||
|
||||
typedef struct st_ddl_log_memory_entry
|
||||
{
|
||||
uint entry_pos;
|
||||
struct st_ddl_log_memory_entry *next_log_entry;
|
||||
struct st_ddl_log_memory_entry *prev_log_entry;
|
||||
struct st_ddl_log_memory_entry *next_active_log_entry;
|
||||
} DDL_LOG_MEMORY_ENTRY;
|
||||
|
||||
|
||||
bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry,
|
||||
DDL_LOG_MEMORY_ENTRY **active_entry);
|
||||
bool write_execute_ddl_log_entry(uint first_entry,
|
||||
bool complete,
|
||||
DDL_LOG_MEMORY_ENTRY **active_entry);
|
||||
bool deactivate_ddl_log_entry(uint entry_no);
|
||||
void release_ddl_log_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry);
|
||||
bool sync_ddl_log();
|
||||
void release_ddl_log();
|
||||
void execute_ddl_log_recovery();
|
||||
bool execute_ddl_log_entry(THD *thd, uint first_entry);
|
||||
|
||||
extern pthread_mutex_t LOCK_gdl;
|
||||
|
||||
#define WFRM_WRITE_SHADOW 1
|
||||
#define WFRM_INSTALL_SHADOW 2
|
||||
#define WFRM_PACK_FRM 4
|
||||
bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags);
|
||||
bool abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt);
|
||||
int abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt);
|
||||
void close_open_tables_and_downgrade(ALTER_PARTITION_PARAM_TYPE *lpt);
|
||||
void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table);
|
||||
|
||||
@ -1224,6 +1408,7 @@ File open_binlog(IO_CACHE *log, const char *log_file_name,
|
||||
|
||||
/* mysqld.cc */
|
||||
extern void MYSQLerror(const char*);
|
||||
void refresh_status(THD *thd);
|
||||
|
||||
/* item_func.cc */
|
||||
extern bool check_reserved_words(LEX_STRING *name);
|
||||
|
@ -1755,17 +1755,6 @@ void end_thread(THD *thd, bool put_in_cache)
|
||||
}
|
||||
|
||||
|
||||
/* Start a cached thread. LOCK_thread_count is locked on entry */
|
||||
|
||||
static void start_cached_thread(THD *thd)
|
||||
{
|
||||
thread_cache.append(thd);
|
||||
wake_thread++;
|
||||
thread_count++;
|
||||
pthread_cond_signal(&COND_thread_cache);
|
||||
}
|
||||
|
||||
|
||||
void flush_thread_cache()
|
||||
{
|
||||
(void) pthread_mutex_lock(&LOCK_thread_count);
|
||||
@ -3605,6 +3594,7 @@ we force server id to 2, but this MySQL server will not act as a slave.");
|
||||
unireg_abort(1);
|
||||
}
|
||||
}
|
||||
execute_ddl_log_recovery();
|
||||
|
||||
create_shutdown_thread();
|
||||
create_maintenance_thread();
|
||||
@ -3656,6 +3646,7 @@ we force server id to 2, but this MySQL server will not act as a slave.");
|
||||
pthread_cond_wait(&COND_thread_count,&LOCK_thread_count);
|
||||
(void) pthread_mutex_unlock(&LOCK_thread_count);
|
||||
|
||||
release_ddl_log();
|
||||
#if defined(__WIN__) && !defined(EMBEDDED_LIBRARY)
|
||||
if (Service.IsNT() && start_mode)
|
||||
Service.Stop();
|
||||
@ -3927,6 +3918,25 @@ static bool read_init_file(char *file_name)
|
||||
|
||||
|
||||
#ifndef EMBEDDED_LIBRARY
|
||||
/*
|
||||
Create new thread to handle incoming connection.
|
||||
|
||||
SYNOPSIS
|
||||
create_new_thread()
|
||||
thd in/out Thread handle of future thread.
|
||||
|
||||
DESCRIPTION
|
||||
This function will create new thread to handle the incoming
|
||||
connection. If there are idle cached threads one will be used.
|
||||
'thd' will be pushed into 'threads'.
|
||||
|
||||
In single-threaded mode (#define ONE_THREAD) connection will be
|
||||
handled inside this function.
|
||||
|
||||
RETURN VALUE
|
||||
none
|
||||
*/
|
||||
|
||||
static void create_new_thread(THD *thd)
|
||||
{
|
||||
DBUG_ENTER("create_new_thread");
|
||||
@ -3950,11 +3960,12 @@ static void create_new_thread(THD *thd)
|
||||
thd->real_id=pthread_self(); // Keep purify happy
|
||||
|
||||
/* Start a new thread to handle connection */
|
||||
thread_count++;
|
||||
|
||||
#ifdef ONE_THREAD
|
||||
if (test_flags & TEST_NO_THREADS) // For debugging under Linux
|
||||
{
|
||||
thread_cache_size=0; // Safety
|
||||
thread_count++;
|
||||
threads.append(thd);
|
||||
thd->real_id=pthread_self();
|
||||
(void) pthread_mutex_unlock(&LOCK_thread_count);
|
||||
@ -3963,18 +3974,20 @@ static void create_new_thread(THD *thd)
|
||||
else
|
||||
#endif
|
||||
{
|
||||
if (thread_count-delayed_insert_threads > max_used_connections)
|
||||
max_used_connections=thread_count-delayed_insert_threads;
|
||||
|
||||
if (cached_thread_count > wake_thread)
|
||||
{
|
||||
start_cached_thread(thd);
|
||||
thread_cache.append(thd);
|
||||
wake_thread++;
|
||||
pthread_cond_signal(&COND_thread_cache);
|
||||
}
|
||||
else
|
||||
{
|
||||
int error;
|
||||
thread_count++;
|
||||
thread_created++;
|
||||
threads.append(thd);
|
||||
if (thread_count-delayed_insert_threads > max_used_connections)
|
||||
max_used_connections=thread_count-delayed_insert_threads;
|
||||
DBUG_PRINT("info",(("creating thread %d"), thd->thread_id));
|
||||
thd->connect_time = time(NULL);
|
||||
if ((error=pthread_create(&thd->real_id,&connection_attrib,
|
||||
@ -5918,7 +5931,7 @@ The minimum value for this variable is 4096.",
|
||||
(gptr*) &max_system_variables.max_length_for_sort_data, 0, GET_ULONG,
|
||||
REQUIRED_ARG, 1024, 4, 8192*1024L, 0, 1, 0},
|
||||
{"max_prepared_stmt_count", OPT_MAX_PREPARED_STMT_COUNT,
|
||||
"Maximum numbrer of prepared statements in the server.",
|
||||
"Maximum number of prepared statements in the server.",
|
||||
(gptr*) &max_prepared_stmt_count, (gptr*) &max_prepared_stmt_count,
|
||||
0, GET_ULONG, REQUIRED_ARG, 16382, 0, 1*1024*1024, 0, 1, 0},
|
||||
{"max_relay_log_size", OPT_MAX_RELAY_LOG_SIZE,
|
||||
@ -6160,23 +6173,6 @@ The minimum value for this variable is 4096.",
|
||||
{"sync-frm", OPT_SYNC_FRM, "Sync .frm to disk on create. Enabled by default.",
|
||||
(gptr*) &opt_sync_frm, (gptr*) &opt_sync_frm, 0, GET_BOOL, NO_ARG, 1, 0,
|
||||
0, 0, 0, 0},
|
||||
#ifdef HAVE_REPLICATION
|
||||
{"sync-replication", OPT_SYNC_REPLICATION,
|
||||
"Enable synchronous replication.",
|
||||
(gptr*) &global_system_variables.sync_replication,
|
||||
(gptr*) &global_system_variables.sync_replication,
|
||||
0, GET_ULONG, REQUIRED_ARG, 0, 0, 1, 0, 1, 0},
|
||||
{"sync-replication-slave-id", OPT_SYNC_REPLICATION_SLAVE_ID,
|
||||
"Synchronous replication is wished for this slave.",
|
||||
(gptr*) &global_system_variables.sync_replication_slave_id,
|
||||
(gptr*) &global_system_variables.sync_replication_slave_id,
|
||||
0, GET_ULONG, REQUIRED_ARG, 0, 0, ~0L, 0, 1, 0},
|
||||
{"sync-replication-timeout", OPT_SYNC_REPLICATION_TIMEOUT,
|
||||
"Synchronous replication timeout.",
|
||||
(gptr*) &global_system_variables.sync_replication_timeout,
|
||||
(gptr*) &global_system_variables.sync_replication_timeout,
|
||||
0, GET_ULONG, REQUIRED_ARG, 10, 0, ~0L, 0, 1, 0},
|
||||
#endif /* HAVE_REPLICATION */
|
||||
{"table_cache", OPT_TABLE_OPEN_CACHE,
|
||||
"Deprecated; use --table_open_cache instead.",
|
||||
(gptr*) &table_cache_size, (gptr*) &table_cache_size, 0, GET_ULONG,
|
||||
@ -8062,7 +8058,38 @@ static void create_pid_file()
|
||||
(void) my_close(file, MYF(0));
|
||||
}
|
||||
sql_perror("Can't start server: can't create PID file");
|
||||
exit(1);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
||||
/* Clear most status variables */
|
||||
void refresh_status(THD *thd)
|
||||
{
|
||||
pthread_mutex_lock(&LOCK_status);
|
||||
|
||||
/* We must update the global status before cleaning up the thread */
|
||||
add_to_status(&global_status_var, &thd->status_var);
|
||||
bzero((char*) &thd->status_var, sizeof(thd->status_var));
|
||||
|
||||
for (SHOW_VAR *ptr= status_vars; ptr->name; ptr++)
|
||||
{
|
||||
/* Note that SHOW_LONG_NOFLUSH variables are not reset */
|
||||
if (ptr->type == SHOW_LONG)
|
||||
*(ulong*) ptr->value= 0;
|
||||
}
|
||||
/* Reset the counters of all key caches (default and named). */
|
||||
process_key_caches(reset_key_cache_counters);
|
||||
pthread_mutex_unlock(&LOCK_status);
|
||||
|
||||
/*
|
||||
Set max_used_connections to the number of currently open
|
||||
connections. Lock LOCK_thread_count out of LOCK_status to avoid
|
||||
deadlocks. Status reset becomes not atomic, but status data is
|
||||
not exact anyway.
|
||||
*/
|
||||
pthread_mutex_lock(&LOCK_thread_count);
|
||||
max_used_connections= thread_count-delayed_insert_threads;
|
||||
pthread_mutex_unlock(&LOCK_thread_count);
|
||||
}
|
||||
|
||||
|
||||
|
@ -36,6 +36,8 @@ enum partition_state {
|
||||
PART_IS_ADDED= 8
|
||||
};
|
||||
|
||||
struct st_ddl_log_memory_entry;
|
||||
|
||||
class partition_element :public Sql_alloc {
|
||||
public:
|
||||
List<partition_element> subpartitions;
|
||||
@ -44,6 +46,7 @@ public:
|
||||
ulonglong part_min_rows;
|
||||
char *partition_name;
|
||||
char *tablespace_name;
|
||||
struct st_ddl_log_memory_entry *log_entry;
|
||||
longlong range_value;
|
||||
char* part_comment;
|
||||
char* data_file_name;
|
||||
@ -55,7 +58,8 @@ public:
|
||||
|
||||
partition_element()
|
||||
: part_max_rows(0), part_min_rows(0), partition_name(NULL),
|
||||
tablespace_name(NULL), range_value(0), part_comment(NULL),
|
||||
tablespace_name(NULL), log_entry(NULL),
|
||||
range_value(0), part_comment(NULL),
|
||||
data_file_name(NULL), index_file_name(NULL),
|
||||
engine_type(NULL),part_state(PART_NORMAL),
|
||||
nodegroup_id(UNDEF_NODEGROUP), has_null_value(FALSE)
|
||||
|
@ -28,7 +28,7 @@ typedef int (*get_part_id_func)(partition_info *part_info,
|
||||
longlong *func_value);
|
||||
typedef uint32 (*get_subpart_id_func)(partition_info *part_info);
|
||||
|
||||
|
||||
struct st_ddl_log_memory_entry;
|
||||
|
||||
class partition_info : public Sql_alloc
|
||||
{
|
||||
@ -76,7 +76,11 @@ public:
|
||||
Item *subpart_expr;
|
||||
|
||||
Item *item_free_list;
|
||||
|
||||
|
||||
struct st_ddl_log_memory_entry *first_log_entry;
|
||||
struct st_ddl_log_memory_entry *exec_log_entry;
|
||||
struct st_ddl_log_memory_entry *frm_log_entry;
|
||||
|
||||
/*
|
||||
A bitmap of partitions used by the current query.
|
||||
Usage pattern:
|
||||
@ -191,6 +195,7 @@ public:
|
||||
part_field_array(NULL), subpart_field_array(NULL),
|
||||
full_part_field_array(NULL),
|
||||
part_expr(NULL), subpart_expr(NULL), item_free_list(NULL),
|
||||
first_log_entry(NULL), exec_log_entry(NULL), frm_log_entry(NULL),
|
||||
list_array(NULL),
|
||||
part_info_string(NULL),
|
||||
part_func_string(NULL), subpart_func_string(NULL),
|
||||
|
@ -436,14 +436,6 @@ sys_var_thd_storage_engine sys_storage_engine("storage_engine",
|
||||
&SV::table_type);
|
||||
#ifdef HAVE_REPLICATION
|
||||
sys_var_sync_binlog_period sys_sync_binlog_period("sync_binlog", &sync_binlog_period);
|
||||
sys_var_thd_ulong sys_sync_replication("sync_replication",
|
||||
&SV::sync_replication);
|
||||
sys_var_thd_ulong sys_sync_replication_slave_id(
|
||||
"sync_replication_slave_id",
|
||||
&SV::sync_replication_slave_id);
|
||||
sys_var_thd_ulong sys_sync_replication_timeout(
|
||||
"sync_replication_timeout",
|
||||
&SV::sync_replication_timeout);
|
||||
#endif
|
||||
sys_var_bool_ptr sys_sync_frm("sync_frm", &opt_sync_frm);
|
||||
sys_var_long_ptr sys_table_def_size("table_definition_cache",
|
||||
@ -966,11 +958,6 @@ SHOW_VAR init_vars[]= {
|
||||
{sys_sync_binlog_period.name,(char*) &sys_sync_binlog_period, SHOW_SYS},
|
||||
#endif
|
||||
{sys_sync_frm.name, (char*) &sys_sync_frm, SHOW_SYS},
|
||||
#ifdef HAVE_REPLICATION
|
||||
{sys_sync_replication.name, (char*) &sys_sync_replication, SHOW_SYS},
|
||||
{sys_sync_replication_slave_id.name, (char*) &sys_sync_replication_slave_id,SHOW_SYS},
|
||||
{sys_sync_replication_timeout.name, (char*) &sys_sync_replication_timeout,SHOW_SYS},
|
||||
#endif
|
||||
#ifdef HAVE_TZNAME
|
||||
{"system_time_zone", system_time_zone, SHOW_CHAR},
|
||||
#endif
|
||||
|
@ -5826,6 +5826,8 @@ ER_NDB_CANT_SWITCH_BINLOG_FORMAT
|
||||
eng "The NDB cluster engine does not support changing the binlog format on the fly yet"
|
||||
ER_PARTITION_NO_TEMPORARY
|
||||
eng "Cannot create temporary table with partitions"
|
||||
ER_DDL_LOG_ERROR
|
||||
eng "Error in DDL log"
|
||||
ER_NULL_IN_VALUES_LESS_THAN
|
||||
eng "Not allowed to use NULL value in VALUES LESS THAN"
|
||||
swe "Det är inte tillåtet att använda NULL-värden i VALUES LESS THAN"
|
||||
@ -5834,3 +5836,5 @@ ER_WRONG_PARTITION_NAME
|
||||
swe "Felaktigt partitionsnamn"
|
||||
ER_MAX_PREPARED_STMT_COUNT_REACHED 42000
|
||||
eng "Can't create more than max_prepared_stmt_count statements (current value: %lu)"
|
||||
ER_VIEW_RECURSIVE
|
||||
eng "`%-.64s`.`%-.64s` contain view recursion"
|
||||
|
@ -1229,7 +1229,7 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount,
|
||||
DBUG_PRINT("info", ("function %s", m_name.str));
|
||||
|
||||
LINT_INIT(binlog_save_options);
|
||||
params = m_pcont->context_pvars();
|
||||
params= m_pcont->context_var_count();
|
||||
|
||||
/*
|
||||
Check that the function is called with all specified arguments.
|
||||
@ -1412,7 +1412,7 @@ bool
|
||||
sp_head::execute_procedure(THD *thd, List<Item> *args)
|
||||
{
|
||||
bool err_status= FALSE;
|
||||
uint params = m_pcont->context_pvars();
|
||||
uint params = m_pcont->context_var_count();
|
||||
sp_rcontext *save_spcont, *octx;
|
||||
sp_rcontext *nctx = NULL;
|
||||
bool save_enable_slow_log= false;
|
||||
@ -1466,15 +1466,15 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
|
||||
for (uint i= 0 ; i < params ; i++)
|
||||
{
|
||||
Item *arg_item= it_args++;
|
||||
sp_pvar_t *pvar= m_pcont->find_pvar(i);
|
||||
sp_variable_t *spvar= m_pcont->find_variable(i);
|
||||
|
||||
if (!arg_item)
|
||||
break;
|
||||
|
||||
if (!pvar)
|
||||
if (!spvar)
|
||||
continue;
|
||||
|
||||
if (pvar->mode != sp_param_in)
|
||||
if (spvar->mode != sp_param_in)
|
||||
{
|
||||
if (!arg_item->is_splocal() && !item_is_user_var(arg_item))
|
||||
{
|
||||
@ -1484,7 +1484,7 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
|
||||
}
|
||||
}
|
||||
|
||||
if (pvar->mode == sp_param_out)
|
||||
if (spvar->mode == sp_param_out)
|
||||
{
|
||||
Item_null *null_item= new Item_null();
|
||||
|
||||
@ -1560,9 +1560,9 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
|
||||
if (!arg_item)
|
||||
break;
|
||||
|
||||
sp_pvar_t *pvar= m_pcont->find_pvar(i);
|
||||
sp_variable_t *spvar= m_pcont->find_variable(i);
|
||||
|
||||
if (pvar->mode == sp_param_in)
|
||||
if (spvar->mode == sp_param_in)
|
||||
continue;
|
||||
|
||||
if (arg_item->is_splocal())
|
||||
@ -2432,7 +2432,7 @@ sp_instr_set::print(String *str)
|
||||
{
|
||||
/* set name@offset ... */
|
||||
int rsrv = SP_INSTR_UINT_MAXLEN+6;
|
||||
sp_pvar_t *var = m_ctx->find_pvar(m_offset);
|
||||
sp_variable_t *var = m_ctx->find_variable(m_offset);
|
||||
|
||||
/* 'var' should always be non-null, but just in case... */
|
||||
if (var)
|
||||
@ -3048,8 +3048,8 @@ sp_instr_cfetch::execute(THD *thd, uint *nextp)
|
||||
void
|
||||
sp_instr_cfetch::print(String *str)
|
||||
{
|
||||
List_iterator_fast<struct sp_pvar> li(m_varlist);
|
||||
sp_pvar_t *pv;
|
||||
List_iterator_fast<struct sp_variable> li(m_varlist);
|
||||
sp_variable_t *pv;
|
||||
LEX_STRING n;
|
||||
my_bool found= m_ctx->find_cursor(m_cursor, &n);
|
||||
/* cfetch name@offset vars... */
|
||||
|
@ -44,7 +44,7 @@ class sp_instr;
|
||||
class sp_instr_opt_meta;
|
||||
class sp_instr_jump_if_not;
|
||||
struct sp_cond_type;
|
||||
struct sp_pvar;
|
||||
struct sp_variable;
|
||||
|
||||
class sp_name : public Sql_alloc
|
||||
{
|
||||
@ -1075,7 +1075,7 @@ public:
|
||||
|
||||
virtual void print(String *str);
|
||||
|
||||
void add_to_varlist(struct sp_pvar *var)
|
||||
void add_to_varlist(struct sp_variable *var)
|
||||
{
|
||||
m_varlist.push_back(var);
|
||||
}
|
||||
@ -1083,7 +1083,7 @@ public:
|
||||
private:
|
||||
|
||||
uint m_cursor;
|
||||
List<struct sp_pvar> m_varlist;
|
||||
List<struct sp_variable> m_varlist;
|
||||
|
||||
}; // class sp_instr_cfetch : public sp_instr
|
||||
|
||||
|
@ -27,10 +27,10 @@
|
||||
#include "sp_head.h"
|
||||
|
||||
/*
|
||||
* Sanity check for SQLSTATEs. Will not check if it's really an existing
|
||||
* state (there are just too many), but will check length and bad characters.
|
||||
* Returns TRUE if it's ok, FALSE if it's bad.
|
||||
*/
|
||||
Sanity check for SQLSTATEs. Will not check if it's really an existing
|
||||
state (there are just too many), but will check length and bad characters.
|
||||
Returns TRUE if it's ok, FALSE if it's bad.
|
||||
*/
|
||||
bool
|
||||
sp_cond_check(LEX_STRING *sqlstate)
|
||||
{
|
||||
@ -51,25 +51,25 @@ sp_cond_check(LEX_STRING *sqlstate)
|
||||
}
|
||||
|
||||
sp_pcontext::sp_pcontext(sp_pcontext *prev)
|
||||
:Sql_alloc(), m_total_pvars(0), m_csubsize(0), m_hsubsize(0),
|
||||
m_handlers(0), m_parent(prev), m_pboundary(0)
|
||||
:Sql_alloc(), m_max_var_index(0), m_max_cursor_index(0), m_max_handler_index(0),
|
||||
m_context_handlers(0), m_parent(prev), m_pboundary(0)
|
||||
{
|
||||
VOID(my_init_dynamic_array(&m_pvar, sizeof(sp_pvar_t *), 16, 8));
|
||||
VOID(my_init_dynamic_array(&m_vars, sizeof(sp_variable_t *), 16, 8));
|
||||
VOID(my_init_dynamic_array(&m_case_expr_id_lst, sizeof(int), 16, 8));
|
||||
VOID(my_init_dynamic_array(&m_cond, sizeof(sp_cond_type_t *), 16, 8));
|
||||
VOID(my_init_dynamic_array(&m_cursor, sizeof(LEX_STRING), 16, 8));
|
||||
VOID(my_init_dynamic_array(&m_handler, sizeof(sp_cond_type_t *), 16, 8));
|
||||
VOID(my_init_dynamic_array(&m_conds, sizeof(sp_cond_type_t *), 16, 8));
|
||||
VOID(my_init_dynamic_array(&m_cursors, sizeof(LEX_STRING), 16, 8));
|
||||
VOID(my_init_dynamic_array(&m_handlers, sizeof(sp_cond_type_t *), 16, 8));
|
||||
m_label.empty();
|
||||
m_children.empty();
|
||||
if (!prev)
|
||||
{
|
||||
m_poffset= m_coffset= 0;
|
||||
m_var_offset= m_cursor_offset= 0;
|
||||
m_num_case_exprs= 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
m_poffset= prev->m_poffset + prev->m_total_pvars;
|
||||
m_coffset= prev->current_cursors();
|
||||
m_var_offset= prev->m_var_offset + prev->m_max_var_index;
|
||||
m_cursor_offset= prev->current_cursor_count();
|
||||
m_num_case_exprs= prev->get_num_case_exprs();
|
||||
}
|
||||
}
|
||||
@ -85,11 +85,11 @@ sp_pcontext::destroy()
|
||||
|
||||
m_children.empty();
|
||||
m_label.empty();
|
||||
delete_dynamic(&m_pvar);
|
||||
delete_dynamic(&m_vars);
|
||||
delete_dynamic(&m_case_expr_id_lst);
|
||||
delete_dynamic(&m_cond);
|
||||
delete_dynamic(&m_cursor);
|
||||
delete_dynamic(&m_handler);
|
||||
delete_dynamic(&m_conds);
|
||||
delete_dynamic(&m_cursors);
|
||||
delete_dynamic(&m_handlers);
|
||||
}
|
||||
|
||||
sp_pcontext *
|
||||
@ -105,15 +105,15 @@ sp_pcontext::push_context()
|
||||
sp_pcontext *
|
||||
sp_pcontext::pop_context()
|
||||
{
|
||||
m_parent->m_total_pvars= m_parent->m_total_pvars + m_total_pvars;
|
||||
m_parent->m_max_var_index+= m_max_var_index;
|
||||
|
||||
uint submax= max_handlers();
|
||||
if (submax > m_parent->m_hsubsize)
|
||||
m_parent->m_hsubsize= submax;
|
||||
uint submax= max_handler_index();
|
||||
if (submax > m_parent->m_max_handler_index)
|
||||
m_parent->m_max_handler_index= submax;
|
||||
|
||||
submax= max_cursors();
|
||||
if (submax > m_parent->m_csubsize)
|
||||
m_parent->m_csubsize= submax;
|
||||
submax= max_cursor_index();
|
||||
if (submax > m_parent->m_max_cursor_index)
|
||||
m_parent->m_max_cursor_index= submax;
|
||||
|
||||
if (m_num_case_exprs > m_parent->m_num_case_exprs)
|
||||
m_parent->m_num_case_exprs= m_num_case_exprs;
|
||||
@ -130,12 +130,12 @@ sp_pcontext::diff_handlers(sp_pcontext *ctx, bool exclusive)
|
||||
|
||||
while (pctx && pctx != ctx)
|
||||
{
|
||||
n+= pctx->m_handlers;
|
||||
n+= pctx->m_context_handlers;
|
||||
last_ctx= pctx;
|
||||
pctx= pctx->parent_context();
|
||||
}
|
||||
if (pctx)
|
||||
return (exclusive && last_ctx ? n - last_ctx->m_handlers : n);
|
||||
return (exclusive && last_ctx ? n - last_ctx->m_context_handlers : n);
|
||||
return 0; // Didn't find ctx
|
||||
}
|
||||
|
||||
@ -148,32 +148,33 @@ sp_pcontext::diff_cursors(sp_pcontext *ctx, bool exclusive)
|
||||
|
||||
while (pctx && pctx != ctx)
|
||||
{
|
||||
n+= pctx->m_cursor.elements;
|
||||
n+= pctx->m_cursors.elements;
|
||||
last_ctx= pctx;
|
||||
pctx= pctx->parent_context();
|
||||
}
|
||||
if (pctx)
|
||||
return (exclusive && last_ctx ? n - last_ctx->m_cursor.elements : n);
|
||||
return (exclusive && last_ctx ? n - last_ctx->m_cursors.elements : n);
|
||||
return 0; // Didn't find ctx
|
||||
}
|
||||
|
||||
/* This does a linear search (from newer to older variables, in case
|
||||
** we have shadowed names).
|
||||
** It's possible to have a more efficient allocation and search method,
|
||||
** but it might not be worth it. The typical number of parameters and
|
||||
** variables will in most cases be low (a handfull).
|
||||
** ...and, this is only called during parsing.
|
||||
/*
|
||||
This does a linear search (from newer to older variables, in case
|
||||
we have shadowed names).
|
||||
It's possible to have a more efficient allocation and search method,
|
||||
but it might not be worth it. The typical number of parameters and
|
||||
variables will in most cases be low (a handfull).
|
||||
...and, this is only called during parsing.
|
||||
*/
|
||||
sp_pvar_t *
|
||||
sp_pcontext::find_pvar(LEX_STRING *name, my_bool scoped)
|
||||
sp_variable_t *
|
||||
sp_pcontext::find_variable(LEX_STRING *name, my_bool scoped)
|
||||
{
|
||||
uint i= m_pvar.elements - m_pboundary;
|
||||
uint i= m_vars.elements - m_pboundary;
|
||||
|
||||
while (i--)
|
||||
{
|
||||
sp_pvar_t *p;
|
||||
sp_variable_t *p;
|
||||
|
||||
get_dynamic(&m_pvar, (gptr)&p, i);
|
||||
get_dynamic(&m_vars, (gptr)&p, i);
|
||||
if (my_strnncoll(system_charset_info,
|
||||
(const uchar *)name->str, name->length,
|
||||
(const uchar *)p->name.str, p->name.length) == 0)
|
||||
@ -182,7 +183,7 @@ sp_pcontext::find_pvar(LEX_STRING *name, my_bool scoped)
|
||||
}
|
||||
}
|
||||
if (!scoped && m_parent)
|
||||
return m_parent->find_pvar(name, scoped);
|
||||
return m_parent->find_variable(name, scoped);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -192,40 +193,40 @@ sp_pcontext::find_pvar(LEX_STRING *name, my_bool scoped)
|
||||
- When evaluating parameters at the beginning, and setting out parameters
|
||||
at the end, of invokation. (Top frame only, so no recursion then.)
|
||||
- For printing of sp_instr_set. (Debug mode only.)
|
||||
*/
|
||||
sp_pvar_t *
|
||||
sp_pcontext::find_pvar(uint offset)
|
||||
*/
|
||||
sp_variable_t *
|
||||
sp_pcontext::find_variable(uint offset)
|
||||
{
|
||||
if (m_poffset <= offset && offset < m_poffset + m_pvar.elements)
|
||||
if (m_var_offset <= offset && offset < m_var_offset + m_vars.elements)
|
||||
{ // This frame
|
||||
sp_pvar_t *p;
|
||||
sp_variable_t *p;
|
||||
|
||||
get_dynamic(&m_pvar, (gptr)&p, offset - m_poffset);
|
||||
get_dynamic(&m_vars, (gptr)&p, offset - m_var_offset);
|
||||
return p;
|
||||
}
|
||||
if (m_parent)
|
||||
return m_parent->find_pvar(offset); // Some previous frame
|
||||
return m_parent->find_variable(offset); // Some previous frame
|
||||
return NULL; // index out of bounds
|
||||
}
|
||||
|
||||
sp_pvar_t *
|
||||
sp_pcontext::push_pvar(LEX_STRING *name, enum enum_field_types type,
|
||||
sp_param_mode_t mode)
|
||||
sp_variable_t *
|
||||
sp_pcontext::push_variable(LEX_STRING *name, enum enum_field_types type,
|
||||
sp_param_mode_t mode)
|
||||
{
|
||||
sp_pvar_t *p= (sp_pvar_t *)sql_alloc(sizeof(sp_pvar_t));
|
||||
sp_variable_t *p= (sp_variable_t *)sql_alloc(sizeof(sp_variable_t));
|
||||
|
||||
if (!p)
|
||||
return NULL;
|
||||
|
||||
++m_total_pvars;
|
||||
++m_max_var_index;
|
||||
|
||||
p->name.str= name->str;
|
||||
p->name.length= name->length;
|
||||
p->type= type;
|
||||
p->mode= mode;
|
||||
p->offset= current_pvars();
|
||||
p->offset= current_var_count();
|
||||
p->dflt= NULL;
|
||||
insert_dynamic(&m_pvar, (gptr)&p);
|
||||
insert_dynamic(&m_vars, (gptr)&p);
|
||||
|
||||
return p;
|
||||
}
|
||||
@ -272,23 +273,23 @@ sp_pcontext::push_cond(LEX_STRING *name, sp_cond_type_t *val)
|
||||
p->name.str= name->str;
|
||||
p->name.length= name->length;
|
||||
p->val= val;
|
||||
insert_dynamic(&m_cond, (gptr)&p);
|
||||
insert_dynamic(&m_conds, (gptr)&p);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* See comment for find_pvar() above
|
||||
*/
|
||||
See comment for find_variable() above
|
||||
*/
|
||||
sp_cond_type_t *
|
||||
sp_pcontext::find_cond(LEX_STRING *name, my_bool scoped)
|
||||
{
|
||||
uint i= m_cond.elements;
|
||||
uint i= m_conds.elements;
|
||||
|
||||
while (i--)
|
||||
{
|
||||
sp_cond_t *p;
|
||||
|
||||
get_dynamic(&m_cond, (gptr)&p, i);
|
||||
get_dynamic(&m_conds, (gptr)&p, i);
|
||||
if (my_strnncoll(system_charset_info,
|
||||
(const uchar *)name->str, name->length,
|
||||
(const uchar *)p->name.str, p->name.length) == 0)
|
||||
@ -302,20 +303,20 @@ sp_pcontext::find_cond(LEX_STRING *name, my_bool scoped)
|
||||
}
|
||||
|
||||
/*
|
||||
* This only searches the current context, for error checking of
|
||||
* duplicates.
|
||||
* Returns TRUE if found.
|
||||
*/
|
||||
This only searches the current context, for error checking of
|
||||
duplicates.
|
||||
Returns TRUE if found.
|
||||
*/
|
||||
bool
|
||||
sp_pcontext::find_handler(sp_cond_type_t *cond)
|
||||
{
|
||||
uint i= m_handler.elements;
|
||||
uint i= m_handlers.elements;
|
||||
|
||||
while (i--)
|
||||
{
|
||||
sp_cond_type_t *p;
|
||||
|
||||
get_dynamic(&m_handler, (gptr)&p, i);
|
||||
get_dynamic(&m_handlers, (gptr)&p, i);
|
||||
if (cond->type == p->type)
|
||||
{
|
||||
switch (p->type)
|
||||
@ -341,31 +342,31 @@ sp_pcontext::push_cursor(LEX_STRING *name)
|
||||
{
|
||||
LEX_STRING n;
|
||||
|
||||
if (m_cursor.elements == m_csubsize)
|
||||
m_csubsize+= 1;
|
||||
if (m_cursors.elements == m_max_cursor_index)
|
||||
m_max_cursor_index+= 1;
|
||||
n.str= name->str;
|
||||
n.length= name->length;
|
||||
insert_dynamic(&m_cursor, (gptr)&n);
|
||||
insert_dynamic(&m_cursors, (gptr)&n);
|
||||
}
|
||||
|
||||
/*
|
||||
* See comment for find_pvar() above
|
||||
*/
|
||||
See comment for find_variable() above
|
||||
*/
|
||||
my_bool
|
||||
sp_pcontext::find_cursor(LEX_STRING *name, uint *poff, my_bool scoped)
|
||||
{
|
||||
uint i= m_cursor.elements;
|
||||
uint i= m_cursors.elements;
|
||||
|
||||
while (i--)
|
||||
{
|
||||
LEX_STRING n;
|
||||
|
||||
get_dynamic(&m_cursor, (gptr)&n, i);
|
||||
get_dynamic(&m_cursors, (gptr)&n, i);
|
||||
if (my_strnncoll(system_charset_info,
|
||||
(const uchar *)name->str, name->length,
|
||||
(const uchar *)n.str, n.length) == 0)
|
||||
{
|
||||
*poff= m_coffset + i;
|
||||
*poff= m_cursor_offset + i;
|
||||
return TRUE;
|
||||
}
|
||||
}
|
||||
@ -380,10 +381,10 @@ sp_pcontext::retrieve_field_definitions(List<create_field> *field_def_lst)
|
||||
{
|
||||
/* Put local/context fields in the result list. */
|
||||
|
||||
for (uint i = 0; i < m_pvar.elements; ++i)
|
||||
for (uint i = 0; i < m_vars.elements; ++i)
|
||||
{
|
||||
sp_pvar_t *var_def;
|
||||
get_dynamic(&m_pvar, (gptr) &var_def, i);
|
||||
sp_variable_t *var_def;
|
||||
get_dynamic(&m_vars, (gptr) &var_def, i);
|
||||
|
||||
field_def_lst->push_back(&var_def->field_def);
|
||||
}
|
||||
@ -400,17 +401,17 @@ sp_pcontext::retrieve_field_definitions(List<create_field> *field_def_lst)
|
||||
/*
|
||||
Find a cursor by offset from the top.
|
||||
This is only used for debugging.
|
||||
*/
|
||||
*/
|
||||
my_bool
|
||||
sp_pcontext::find_cursor(uint offset, LEX_STRING *n)
|
||||
{
|
||||
if (m_coffset <= offset && offset < m_coffset + m_cursor.elements)
|
||||
if (m_cursor_offset <= offset &&
|
||||
offset < m_cursor_offset + m_cursors.elements)
|
||||
{ // This frame
|
||||
get_dynamic(&m_cursor, (gptr)n, offset - m_coffset);
|
||||
get_dynamic(&m_cursors, (gptr)n, offset - m_cursor_offset);
|
||||
return TRUE;
|
||||
}
|
||||
if (m_parent)
|
||||
return m_parent->find_cursor(offset, n); // Some previous frame
|
||||
return FALSE; // index out of bounds
|
||||
}
|
||||
|
||||
|
@ -29,22 +29,23 @@ typedef enum
|
||||
sp_param_inout
|
||||
} sp_param_mode_t;
|
||||
|
||||
typedef struct sp_pvar
|
||||
typedef struct sp_variable
|
||||
{
|
||||
LEX_STRING name;
|
||||
enum enum_field_types type;
|
||||
sp_param_mode_t mode;
|
||||
|
||||
/*
|
||||
offset -- basically, this is an index of variable in the scope of root
|
||||
parsing context. This means, that all variables in a stored routine
|
||||
have distinct indexes/offsets.
|
||||
offset -- this the index to the variable's value in the runtime frame.
|
||||
This is calculated during parsing and used when creating sp_instr_set
|
||||
instructions and Item_splocal items.
|
||||
I.e. values are set/referred by array indexing in runtime.
|
||||
*/
|
||||
uint offset;
|
||||
|
||||
Item *dflt;
|
||||
create_field field_def;
|
||||
} sp_pvar_t;
|
||||
} sp_variable_t;
|
||||
|
||||
|
||||
#define SP_LAB_REF 0 // Unresolved reference (for goto)
|
||||
@ -76,9 +77,10 @@ typedef struct sp_cond_type
|
||||
uint mysqlerr;
|
||||
} sp_cond_type_t;
|
||||
|
||||
/* Sanity check for SQLSTATEs. Will not check if it's really an existing
|
||||
* state (there are just too many), but will check length bad characters.
|
||||
*/
|
||||
/*
|
||||
Sanity check for SQLSTATEs. Will not check if it's really an existing
|
||||
state (there are just too many), but will check length bad characters.
|
||||
*/
|
||||
extern bool
|
||||
sp_cond_check(LEX_STRING *sqlstate);
|
||||
|
||||
@ -90,7 +92,17 @@ typedef struct sp_cond
|
||||
|
||||
|
||||
/*
|
||||
This seems to be an "SP parsing context" or something.
|
||||
The parse-time context, used to keep track on declared variables/parameters,
|
||||
conditions, handlers, cursors and labels, during parsing.
|
||||
sp_contexts are organized as a tree, with one object for each begin-end
|
||||
block, plus a root-context for the parameters.
|
||||
This is used during parsing for looking up defined names (e.g. declared
|
||||
variables and visible labels), for error checking, and to calculate offsets
|
||||
to be used at runtime. (During execution variable values, active handlers
|
||||
and cursors, etc, are referred to by an index in a stack.)
|
||||
The pcontext tree is also kept during execution and is used for error
|
||||
checking (e.g. correct number of parameters), and in the future, used by
|
||||
the debugger.
|
||||
*/
|
||||
|
||||
class sp_pcontext : public Sql_alloc
|
||||
@ -134,50 +146,64 @@ class sp_pcontext : public Sql_alloc
|
||||
// Parameters and variables
|
||||
//
|
||||
|
||||
/*
|
||||
The maximum number of variables used in this and all child contexts
|
||||
In the root, this gives us the number of slots needed for variables
|
||||
during execution.
|
||||
*/
|
||||
inline uint
|
||||
total_pvars()
|
||||
max_var_index()
|
||||
{
|
||||
return m_total_pvars;
|
||||
return m_max_var_index;
|
||||
}
|
||||
|
||||
/*
|
||||
The current number of variables used in the parents (from the root),
|
||||
including this context.
|
||||
*/
|
||||
inline uint
|
||||
current_pvars()
|
||||
current_var_count()
|
||||
{
|
||||
return m_poffset + m_pvar.elements;
|
||||
return m_var_offset + m_vars.elements;
|
||||
}
|
||||
|
||||
/* The number of variables in this context alone */
|
||||
inline uint
|
||||
context_pvars()
|
||||
context_var_count()
|
||||
{
|
||||
return m_pvar.elements;
|
||||
return m_vars.elements;
|
||||
}
|
||||
|
||||
/* Map index in this pcontext to runtime offset */
|
||||
inline uint
|
||||
pvar_context2index(uint i)
|
||||
var_context2runtime(uint i)
|
||||
{
|
||||
return m_poffset + i;
|
||||
return m_var_offset + i;
|
||||
}
|
||||
|
||||
/* Set type of variable. 'i' is the offset from the top */
|
||||
inline void
|
||||
set_type(uint i, enum enum_field_types type)
|
||||
{
|
||||
sp_pvar_t *p= find_pvar(i);
|
||||
sp_variable_t *p= find_variable(i);
|
||||
|
||||
if (p)
|
||||
p->type= type;
|
||||
}
|
||||
|
||||
/* Set default value of variable. 'i' is the offset from the top */
|
||||
inline void
|
||||
set_default(uint i, Item *it)
|
||||
{
|
||||
sp_pvar_t *p= find_pvar(i);
|
||||
sp_variable_t *p= find_variable(i);
|
||||
|
||||
if (p)
|
||||
p->dflt= it;
|
||||
}
|
||||
|
||||
sp_pvar_t *
|
||||
push_pvar(LEX_STRING *name, enum enum_field_types type, sp_param_mode_t mode);
|
||||
sp_variable_t *
|
||||
push_variable(LEX_STRING *name, enum enum_field_types type,
|
||||
sp_param_mode_t mode);
|
||||
|
||||
/*
|
||||
Retrieve definitions of fields from the current context and its
|
||||
@ -187,12 +213,12 @@ class sp_pcontext : public Sql_alloc
|
||||
retrieve_field_definitions(List<create_field> *field_def_lst);
|
||||
|
||||
// Find by name
|
||||
sp_pvar_t *
|
||||
find_pvar(LEX_STRING *name, my_bool scoped=0);
|
||||
sp_variable_t *
|
||||
find_variable(LEX_STRING *name, my_bool scoped=0);
|
||||
|
||||
// Find by offset
|
||||
sp_pvar_t *
|
||||
find_pvar(uint offset);
|
||||
// Find by offset (from the top)
|
||||
sp_variable_t *
|
||||
find_variable(uint offset);
|
||||
|
||||
/*
|
||||
Set the current scope boundary (for default values).
|
||||
@ -280,7 +306,7 @@ class sp_pcontext : public Sql_alloc
|
||||
pop_cond(uint num)
|
||||
{
|
||||
while (num--)
|
||||
pop_dynamic(&m_cond);
|
||||
pop_dynamic(&m_conds);
|
||||
}
|
||||
|
||||
sp_cond_type_t *
|
||||
@ -293,22 +319,22 @@ class sp_pcontext : public Sql_alloc
|
||||
inline void
|
||||
push_handler(sp_cond_type_t *cond)
|
||||
{
|
||||
insert_dynamic(&m_handler, (gptr)&cond);
|
||||
insert_dynamic(&m_handlers, (gptr)&cond);
|
||||
}
|
||||
|
||||
bool
|
||||
find_handler(sp_cond_type *cond);
|
||||
|
||||
inline uint
|
||||
max_handlers()
|
||||
max_handler_index()
|
||||
{
|
||||
return m_hsubsize + m_handlers;
|
||||
return m_max_handler_index + m_context_handlers;
|
||||
}
|
||||
|
||||
inline void
|
||||
add_handlers(uint n)
|
||||
{
|
||||
m_handlers+= n;
|
||||
m_context_handlers+= n;
|
||||
}
|
||||
|
||||
//
|
||||
@ -326,51 +352,51 @@ class sp_pcontext : public Sql_alloc
|
||||
find_cursor(uint offset, LEX_STRING *n);
|
||||
|
||||
inline uint
|
||||
max_cursors()
|
||||
max_cursor_index()
|
||||
{
|
||||
return m_csubsize + m_cursor.elements;
|
||||
return m_max_cursor_index + m_cursors.elements;
|
||||
}
|
||||
|
||||
inline uint
|
||||
current_cursors()
|
||||
current_cursor_count()
|
||||
{
|
||||
return m_coffset + m_cursor.elements;
|
||||
return m_cursor_offset + m_cursors.elements;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
/*
|
||||
m_total_pvars -- number of variables (including all types of arguments)
|
||||
m_max_var_index -- number of variables (including all types of arguments)
|
||||
in this context including all children contexts.
|
||||
|
||||
m_total_pvars >= m_pvar.elements.
|
||||
m_max_var_index >= m_vars.elements.
|
||||
|
||||
m_total_pvars of the root parsing context contains number of all
|
||||
m_max_var_index of the root parsing context contains number of all
|
||||
variables (including arguments) in all enclosed contexts.
|
||||
*/
|
||||
uint m_total_pvars;
|
||||
uint m_max_var_index;
|
||||
|
||||
// The maximum sub context's framesizes
|
||||
uint m_csubsize;
|
||||
uint m_hsubsize;
|
||||
uint m_handlers; // No. of handlers in this context
|
||||
uint m_max_cursor_index;
|
||||
uint m_max_handler_index;
|
||||
uint m_context_handlers; // No. of handlers in this context
|
||||
|
||||
private:
|
||||
|
||||
sp_pcontext *m_parent; // Parent context
|
||||
|
||||
/*
|
||||
m_poffset -- basically, this is an index of the first variable in this
|
||||
parsing context.
|
||||
m_var_offset -- this is an index of the first variable in this
|
||||
parsing context.
|
||||
|
||||
m_poffset is 0 for root context.
|
||||
m_var_offset is 0 for root context.
|
||||
|
||||
Since now each variable is stored in separate place, no reuse is done,
|
||||
so m_poffset is different for all enclosed contexts.
|
||||
so m_var_offset is different for all enclosed contexts.
|
||||
*/
|
||||
uint m_poffset;
|
||||
uint m_var_offset;
|
||||
|
||||
uint m_coffset; // Cursor offset for this context
|
||||
uint m_cursor_offset; // Cursor offset for this context
|
||||
|
||||
/*
|
||||
Boundary for finding variables in this context. This is the number
|
||||
@ -382,11 +408,11 @@ private:
|
||||
|
||||
int m_num_case_exprs;
|
||||
|
||||
DYNAMIC_ARRAY m_pvar; // Parameters/variables
|
||||
DYNAMIC_ARRAY m_vars; // Parameters/variables
|
||||
DYNAMIC_ARRAY m_case_expr_id_lst; /* Stack of CASE expression ids. */
|
||||
DYNAMIC_ARRAY m_cond; // Conditions
|
||||
DYNAMIC_ARRAY m_cursor; // Cursors
|
||||
DYNAMIC_ARRAY m_handler; // Handlers, for checking of duplicates
|
||||
DYNAMIC_ARRAY m_conds; // Conditions
|
||||
DYNAMIC_ARRAY m_cursors; // Cursors
|
||||
DYNAMIC_ARRAY m_handlers; // Handlers, for checking for duplicates
|
||||
|
||||
List<sp_label_t> m_label; // The label list
|
||||
|
||||
|
@ -73,16 +73,16 @@ bool sp_rcontext::init(THD *thd)
|
||||
|
||||
return
|
||||
!(m_handler=
|
||||
(sp_handler_t*)thd->alloc(m_root_parsing_ctx->max_handlers() *
|
||||
(sp_handler_t*)thd->alloc(m_root_parsing_ctx->max_handler_index() *
|
||||
sizeof(sp_handler_t))) ||
|
||||
!(m_hstack=
|
||||
(uint*)thd->alloc(m_root_parsing_ctx->max_handlers() *
|
||||
(uint*)thd->alloc(m_root_parsing_ctx->max_handler_index() *
|
||||
sizeof(uint))) ||
|
||||
!(m_in_handler=
|
||||
(uint*)thd->alloc(m_root_parsing_ctx->max_handlers() *
|
||||
(uint*)thd->alloc(m_root_parsing_ctx->max_handler_index() *
|
||||
sizeof(uint))) ||
|
||||
!(m_cstack=
|
||||
(sp_cursor**)thd->alloc(m_root_parsing_ctx->max_cursors() *
|
||||
(sp_cursor**)thd->alloc(m_root_parsing_ctx->max_cursor_index() *
|
||||
sizeof(sp_cursor*))) ||
|
||||
!(m_case_expr_holders=
|
||||
(Item_cache**)thd->calloc(m_root_parsing_ctx->get_num_case_exprs() *
|
||||
@ -105,12 +105,12 @@ sp_rcontext::init_var_table(THD *thd)
|
||||
{
|
||||
List<create_field> field_def_lst;
|
||||
|
||||
if (!m_root_parsing_ctx->total_pvars())
|
||||
if (!m_root_parsing_ctx->max_var_index())
|
||||
return FALSE;
|
||||
|
||||
m_root_parsing_ctx->retrieve_field_definitions(&field_def_lst);
|
||||
|
||||
DBUG_ASSERT(field_def_lst.elements == m_root_parsing_ctx->total_pvars());
|
||||
DBUG_ASSERT(field_def_lst.elements == m_root_parsing_ctx->max_var_index());
|
||||
|
||||
if (!(m_var_table= create_virtual_tmp_table(thd, field_def_lst)))
|
||||
return TRUE;
|
||||
@ -134,7 +134,7 @@ bool
|
||||
sp_rcontext::init_var_items()
|
||||
{
|
||||
uint idx;
|
||||
uint num_vars= m_root_parsing_ctx->total_pvars();
|
||||
uint num_vars= m_root_parsing_ctx->max_var_index();
|
||||
|
||||
if (!(m_var_items= (Item**) sql_alloc(num_vars * sizeof (Item *))))
|
||||
return TRUE;
|
||||
@ -381,7 +381,7 @@ sp_cursor::destroy()
|
||||
|
||||
|
||||
int
|
||||
sp_cursor::fetch(THD *thd, List<struct sp_pvar> *vars)
|
||||
sp_cursor::fetch(THD *thd, List<struct sp_variable> *vars)
|
||||
{
|
||||
if (! server_side_cursor)
|
||||
{
|
||||
@ -528,9 +528,9 @@ int Select_fetch_into_spvars::prepare(List<Item> &fields, SELECT_LEX_UNIT *u)
|
||||
|
||||
bool Select_fetch_into_spvars::send_data(List<Item> &items)
|
||||
{
|
||||
List_iterator_fast<struct sp_pvar> pv_iter(*spvar_list);
|
||||
List_iterator_fast<struct sp_variable> spvar_iter(*spvar_list);
|
||||
List_iterator_fast<Item> item_iter(items);
|
||||
sp_pvar_t *pv;
|
||||
sp_variable_t *spvar;
|
||||
Item *item;
|
||||
|
||||
/* Must be ensured by the caller */
|
||||
@ -540,9 +540,9 @@ bool Select_fetch_into_spvars::send_data(List<Item> &items)
|
||||
Assign the row fetched from a server side cursor to stored
|
||||
procedure variables.
|
||||
*/
|
||||
for (; pv= pv_iter++, item= item_iter++; )
|
||||
for (; spvar= spvar_iter++, item= item_iter++; )
|
||||
{
|
||||
if (thd->spcont->set_variable(thd, pv->offset, item))
|
||||
if (thd->spcont->set_variable(thd, spvar->offset, item))
|
||||
return TRUE;
|
||||
}
|
||||
return FALSE;
|
||||
|
@ -24,7 +24,7 @@
|
||||
|
||||
struct sp_cond_type;
|
||||
class sp_cursor;
|
||||
struct sp_pvar;
|
||||
struct sp_variable;
|
||||
class sp_lex_keeper;
|
||||
class sp_instr_cpush;
|
||||
|
||||
@ -265,12 +265,12 @@ private:
|
||||
|
||||
class Select_fetch_into_spvars: public select_result_interceptor
|
||||
{
|
||||
List<struct sp_pvar> *spvar_list;
|
||||
List<struct sp_variable> *spvar_list;
|
||||
uint field_count;
|
||||
public:
|
||||
Select_fetch_into_spvars() {} /* Remove gcc warning */
|
||||
uint get_field_count() { return field_count; }
|
||||
void set_spvar_list(List<struct sp_pvar> *vars) { spvar_list= vars; }
|
||||
void set_spvar_list(List<struct sp_variable> *vars) { spvar_list= vars; }
|
||||
|
||||
virtual bool send_eof() { return FALSE; }
|
||||
virtual bool send_data(List<Item> &items);
|
||||
@ -307,7 +307,7 @@ public:
|
||||
}
|
||||
|
||||
int
|
||||
fetch(THD *, List<struct sp_pvar> *vars);
|
||||
fetch(THD *, List<struct sp_variable> *vars);
|
||||
|
||||
inline sp_instr_cpush *
|
||||
get_instr()
|
||||
|
@ -6140,9 +6140,8 @@ bool is_equal(const LEX_STRING *a, const LEX_STRING *b)
|
||||
abort_and_upgrade_lock()
|
||||
lpt Parameter passing struct
|
||||
All parameters passed through the ALTER_PARTITION_PARAM_TYPE object
|
||||
RETURN VALUES
|
||||
TRUE Failure
|
||||
FALSE Success
|
||||
RETURN VALUE
|
||||
0
|
||||
DESCRIPTION
|
||||
Remember old lock level (for possible downgrade later on), abort all
|
||||
waiting threads and ensure that all keeping locks currently are
|
||||
@ -6156,23 +6155,17 @@ bool is_equal(const LEX_STRING *a, const LEX_STRING *b)
|
||||
old_lock_level Old lock level
|
||||
*/
|
||||
|
||||
bool abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt)
|
||||
int abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt)
|
||||
{
|
||||
uint flags= RTFC_WAIT_OTHER_THREAD_FLAG | RTFC_CHECK_KILLED_FLAG;
|
||||
int error= FALSE;
|
||||
DBUG_ENTER("abort_and_upgrade_locks");
|
||||
|
||||
lpt->old_lock_type= lpt->table->reginfo.lock_type;
|
||||
VOID(pthread_mutex_lock(&LOCK_open));
|
||||
mysql_lock_abort(lpt->thd, lpt->table, TRUE);
|
||||
VOID(remove_table_from_cache(lpt->thd, lpt->db, lpt->table_name, flags));
|
||||
if (lpt->thd->killed)
|
||||
{
|
||||
lpt->thd->no_warnings_for_error= 0;
|
||||
error= TRUE;
|
||||
}
|
||||
VOID(pthread_mutex_unlock(&LOCK_open));
|
||||
DBUG_RETURN(error);
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
|
||||
|
@ -223,6 +223,9 @@ THD::THD()
|
||||
cuted_fields= sent_row_count= 0L;
|
||||
limit_found_rows= 0;
|
||||
statement_id_counter= 0UL;
|
||||
#ifdef ERROR_INJECT_SUPPORT
|
||||
error_inject_value= 0UL;
|
||||
#endif
|
||||
// Must be reset to handle error with THD's created for init of mysqld
|
||||
lex->current_select= 0;
|
||||
start_time=(time_t) 0;
|
||||
|
@ -241,11 +241,6 @@ struct system_variables
|
||||
my_bool new_mode;
|
||||
my_bool query_cache_wlock_invalidate;
|
||||
my_bool engine_condition_pushdown;
|
||||
#ifdef HAVE_REPLICATION
|
||||
ulong sync_replication;
|
||||
ulong sync_replication_slave_id;
|
||||
ulong sync_replication_timeout;
|
||||
#endif /* HAVE_REPLICATION */
|
||||
my_bool innodb_table_locks;
|
||||
my_bool innodb_support_xa;
|
||||
my_bool ndb_force_send;
|
||||
@ -1099,6 +1094,9 @@ public:
|
||||
query_id_t query_id, warn_id;
|
||||
ulong thread_id, col_access;
|
||||
|
||||
#ifdef ERROR_INJECT_SUPPORT
|
||||
ulong error_inject_value;
|
||||
#endif
|
||||
/* Statement id is thread-wide. This counter is used to generate ids */
|
||||
ulong statement_id_counter;
|
||||
ulong rand_saved_seed1, rand_saved_seed2;
|
||||
|
@ -68,7 +68,6 @@ static void decrease_user_connections(USER_CONN *uc);
|
||||
static bool check_db_used(THD *thd,TABLE_LIST *tables);
|
||||
static bool check_multi_update_lock(THD *thd);
|
||||
static void remove_escape(char *name);
|
||||
static void refresh_status(THD *thd);
|
||||
|
||||
const char *any_db="*any*"; // Special symbol for check_access
|
||||
|
||||
@ -6905,26 +6904,6 @@ void kill_one_thread(THD *thd, ulong id, bool only_kill_query)
|
||||
}
|
||||
|
||||
|
||||
/* Clear most status variables */
|
||||
|
||||
static void refresh_status(THD *thd)
|
||||
{
|
||||
pthread_mutex_lock(&LOCK_status);
|
||||
|
||||
/* We must update the global status before cleaning up the thread */
|
||||
add_to_status(&global_status_var, &thd->status_var);
|
||||
bzero((char*) &thd->status_var, sizeof(thd->status_var));
|
||||
|
||||
for (SHOW_VAR *ptr= status_vars; ptr->name; ptr++)
|
||||
if (ptr->type == SHOW_LONG) // note that SHOW_LONG_NOFLUSH variables are not reset
|
||||
*(ulong*) ptr->value= 0;
|
||||
|
||||
/* Reset the counters of all key caches (default and named). */
|
||||
process_key_caches(reset_key_cache_counters);
|
||||
pthread_mutex_unlock(&LOCK_status);
|
||||
}
|
||||
|
||||
|
||||
/* If pointer is not a null pointer, append filename to it */
|
||||
|
||||
bool append_file_to_dir(THD *thd, const char **filename_ptr,
|
||||
|
1176
sql/sql_partition.cc
1176
sql/sql_partition.cc
File diff suppressed because it is too large
Load Diff
@ -694,7 +694,7 @@ impossible position";
|
||||
|
||||
if (loop_breaker)
|
||||
break;
|
||||
|
||||
|
||||
end_io_cache(&log);
|
||||
(void) my_close(file, MYF(MY_WME));
|
||||
|
||||
@ -834,7 +834,7 @@ int start_slave(THD* thd , MASTER_INFO* mi, bool net_report)
|
||||
/* Issuing warning then started without --skip-slave-start */
|
||||
if (!opt_skip_slave_start)
|
||||
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
|
||||
ER_MISSING_SKIP_SLAVE,
|
||||
ER_MISSING_SKIP_SLAVE,
|
||||
ER(ER_MISSING_SKIP_SLAVE));
|
||||
}
|
||||
|
||||
@ -860,7 +860,7 @@ int start_slave(THD* thd , MASTER_INFO* mi, bool net_report)
|
||||
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_SLAVE_WAS_RUNNING,
|
||||
ER(ER_SLAVE_WAS_RUNNING));
|
||||
}
|
||||
|
||||
|
||||
unlock_slave_threads(mi);
|
||||
|
||||
if (slave_errno)
|
||||
@ -1023,7 +1023,7 @@ err:
|
||||
slave_server_id the slave's server id
|
||||
|
||||
*/
|
||||
|
||||
|
||||
|
||||
void kill_zombie_dump_threads(uint32 slave_server_id)
|
||||
{
|
||||
@ -1088,9 +1088,9 @@ bool change_master(THD* thd, MASTER_INFO* mi)
|
||||
*/
|
||||
|
||||
/*
|
||||
If the user specified host or port without binlog or position,
|
||||
If the user specified host or port without binlog or position,
|
||||
reset binlog's name to FIRST and position to 4.
|
||||
*/
|
||||
*/
|
||||
|
||||
if ((lex_mi->host || lex_mi->port) && !lex_mi->log_file_name && !lex_mi->pos)
|
||||
{
|
||||
@ -1117,7 +1117,7 @@ bool change_master(THD* thd, MASTER_INFO* mi)
|
||||
mi->port = lex_mi->port;
|
||||
if (lex_mi->connect_retry)
|
||||
mi->connect_retry = lex_mi->connect_retry;
|
||||
|
||||
|
||||
if (lex_mi->ssl != LEX_MASTER_INFO::SSL_UNCHANGED)
|
||||
mi->ssl= (lex_mi->ssl == LEX_MASTER_INFO::SSL_ENABLE);
|
||||
if (lex_mi->ssl_ca)
|
||||
@ -1133,7 +1133,7 @@ bool change_master(THD* thd, MASTER_INFO* mi)
|
||||
#ifndef HAVE_OPENSSL
|
||||
if (lex_mi->ssl || lex_mi->ssl_ca || lex_mi->ssl_capath ||
|
||||
lex_mi->ssl_cert || lex_mi->ssl_cipher || lex_mi->ssl_key )
|
||||
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
|
||||
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
|
||||
ER_SLAVE_IGNORED_SSL_PARAMS, ER(ER_SLAVE_IGNORED_SSL_PARAMS));
|
||||
#endif
|
||||
|
||||
@ -1500,7 +1500,7 @@ bool show_binlogs(THD* thd)
|
||||
}
|
||||
|
||||
field_list.push_back(new Item_empty_string("Log_name", 255));
|
||||
field_list.push_back(new Item_return_int("File_size", 20,
|
||||
field_list.push_back(new Item_return_int("File_size", 20,
|
||||
MYSQL_TYPE_LONGLONG));
|
||||
if (protocol->send_fields(&field_list,
|
||||
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
|
||||
|
1073
sql/sql_table.cc
1073
sql/sql_table.cc
File diff suppressed because it is too large
Load Diff
@ -773,6 +773,7 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table)
|
||||
SELECT_LEX *end, *view_select;
|
||||
LEX *old_lex, *lex;
|
||||
Query_arena *arena, backup;
|
||||
TABLE_LIST *top_view= table->top_table();
|
||||
int res;
|
||||
bool result;
|
||||
DBUG_ENTER("mysql_make_view");
|
||||
@ -800,6 +801,24 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table)
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
/* check loop via view definition */
|
||||
for (TABLE_LIST *precedent= table->referencing_view;
|
||||
precedent;
|
||||
precedent= precedent->referencing_view)
|
||||
{
|
||||
if (precedent->view_name.length == table->table_name_length &&
|
||||
precedent->view_db.length == table->db_length &&
|
||||
my_strcasecmp(system_charset_info,
|
||||
precedent->view_name.str, table->table_name) == 0 &&
|
||||
my_strcasecmp(system_charset_info,
|
||||
precedent->view_db.str, table->db) == 0)
|
||||
{
|
||||
my_error(ER_VIEW_RECURSIVE, MYF(0),
|
||||
top_view->view_db.str, top_view->view_name.str);
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
For now we assume that tables will not be changed during PS life (it
|
||||
will be TRUE as far as we make new table cache).
|
||||
@ -898,7 +917,6 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table)
|
||||
}
|
||||
if (!res && !thd->is_fatal_error)
|
||||
{
|
||||
TABLE_LIST *top_view= table->top_table();
|
||||
TABLE_LIST *view_tables= lex->query_tables;
|
||||
TABLE_LIST *view_tables_tail= 0;
|
||||
TABLE_LIST *tbl;
|
||||
|
161
sql/sql_yacc.yy
161
sql/sql_yacc.yy
@ -1819,22 +1819,23 @@ sp_fdparam:
|
||||
LEX *lex= Lex;
|
||||
sp_pcontext *spc= lex->spcont;
|
||||
|
||||
if (spc->find_pvar(&$1, TRUE))
|
||||
if (spc->find_variable(&$1, TRUE))
|
||||
{
|
||||
my_error(ER_SP_DUP_PARAM, MYF(0), $1.str);
|
||||
YYABORT;
|
||||
}
|
||||
sp_pvar_t *pvar= spc->push_pvar(&$1, (enum enum_field_types)$3,
|
||||
sp_param_in);
|
||||
sp_variable_t *spvar= spc->push_variable(&$1,
|
||||
(enum enum_field_types)$3,
|
||||
sp_param_in);
|
||||
|
||||
if (lex->sphead->fill_field_definition(YYTHD, lex,
|
||||
(enum enum_field_types) $3,
|
||||
&pvar->field_def))
|
||||
&spvar->field_def))
|
||||
{
|
||||
YYABORT;
|
||||
}
|
||||
pvar->field_def.field_name= pvar->name.str;
|
||||
pvar->field_def.pack_flag |= FIELDFLAG_MAYBE_NULL;
|
||||
spvar->field_def.field_name= spvar->name.str;
|
||||
spvar->field_def.pack_flag |= FIELDFLAG_MAYBE_NULL;
|
||||
}
|
||||
;
|
||||
|
||||
@ -1855,22 +1856,23 @@ sp_pdparam:
|
||||
LEX *lex= Lex;
|
||||
sp_pcontext *spc= lex->spcont;
|
||||
|
||||
if (spc->find_pvar(&$3, TRUE))
|
||||
if (spc->find_variable(&$3, TRUE))
|
||||
{
|
||||
my_error(ER_SP_DUP_PARAM, MYF(0), $3.str);
|
||||
YYABORT;
|
||||
}
|
||||
sp_pvar_t *pvar= spc->push_pvar(&$3, (enum enum_field_types)$4,
|
||||
(sp_param_mode_t)$1);
|
||||
sp_variable_t *spvar= spc->push_variable(&$3,
|
||||
(enum enum_field_types)$4,
|
||||
(sp_param_mode_t)$1);
|
||||
|
||||
if (lex->sphead->fill_field_definition(YYTHD, lex,
|
||||
(enum enum_field_types) $4,
|
||||
&pvar->field_def))
|
||||
&spvar->field_def))
|
||||
{
|
||||
YYABORT;
|
||||
}
|
||||
pvar->field_def.field_name= pvar->name.str;
|
||||
pvar->field_def.pack_flag |= FIELDFLAG_MAYBE_NULL;
|
||||
spvar->field_def.field_name= spvar->name.str;
|
||||
spvar->field_def.pack_flag |= FIELDFLAG_MAYBE_NULL;
|
||||
}
|
||||
;
|
||||
|
||||
@ -1934,7 +1936,7 @@ sp_decl:
|
||||
{
|
||||
LEX *lex= Lex;
|
||||
sp_pcontext *pctx= lex->spcont;
|
||||
uint num_vars= pctx->context_pvars();
|
||||
uint num_vars= pctx->context_var_count();
|
||||
enum enum_field_types var_type= (enum enum_field_types) $4;
|
||||
Item *dflt_value_item= $5;
|
||||
create_field *create_field_op;
|
||||
@ -1947,23 +1949,23 @@ sp_decl:
|
||||
|
||||
for (uint i = num_vars-$2 ; i < num_vars ; i++)
|
||||
{
|
||||
uint var_idx= pctx->pvar_context2index(i);
|
||||
sp_pvar_t *pvar= pctx->find_pvar(var_idx);
|
||||
uint var_idx= pctx->var_context2runtime(i);
|
||||
sp_variable_t *spvar= pctx->find_variable(var_idx);
|
||||
|
||||
if (!pvar)
|
||||
if (!spvar)
|
||||
YYABORT;
|
||||
|
||||
pvar->type= var_type;
|
||||
pvar->dflt= dflt_value_item;
|
||||
spvar->type= var_type;
|
||||
spvar->dflt= dflt_value_item;
|
||||
|
||||
if (lex->sphead->fill_field_definition(YYTHD, lex, var_type,
|
||||
&pvar->field_def))
|
||||
&spvar->field_def))
|
||||
{
|
||||
YYABORT;
|
||||
}
|
||||
|
||||
pvar->field_def.field_name= pvar->name.str;
|
||||
pvar->field_def.pack_flag |= FIELDFLAG_MAYBE_NULL;
|
||||
spvar->field_def.field_name= spvar->name.str;
|
||||
spvar->field_def.pack_flag |= FIELDFLAG_MAYBE_NULL;
|
||||
|
||||
/* The last instruction is responsible for freeing LEX. */
|
||||
|
||||
@ -2000,7 +2002,7 @@ sp_decl:
|
||||
sp_pcontext *ctx= lex->spcont;
|
||||
sp_instr_hpush_jump *i=
|
||||
new sp_instr_hpush_jump(sp->instructions(), ctx, $2,
|
||||
ctx->current_pvars());
|
||||
ctx->current_var_count());
|
||||
|
||||
sp->add_instr(i);
|
||||
sp->push_backpatch(i, ctx->push_label((char *)"", 0));
|
||||
@ -2017,7 +2019,7 @@ sp_decl:
|
||||
if ($2 == SP_HANDLER_CONTINUE)
|
||||
{
|
||||
i= new sp_instr_hreturn(sp->instructions(), ctx,
|
||||
ctx->current_pvars());
|
||||
ctx->current_var_count());
|
||||
sp->add_instr(i);
|
||||
}
|
||||
else
|
||||
@ -2048,7 +2050,7 @@ sp_decl:
|
||||
YYABORT;
|
||||
}
|
||||
i= new sp_instr_cpush(sp->instructions(), ctx, $5,
|
||||
ctx->current_cursors());
|
||||
ctx->current_cursor_count());
|
||||
sp->add_instr(i);
|
||||
ctx->push_cursor(&$2);
|
||||
$$.vars= $$.conds= $$.hndlrs= 0;
|
||||
@ -2203,12 +2205,12 @@ sp_decl_idents:
|
||||
LEX *lex= Lex;
|
||||
sp_pcontext *spc= lex->spcont;
|
||||
|
||||
if (spc->find_pvar(&$1, TRUE))
|
||||
if (spc->find_variable(&$1, TRUE))
|
||||
{
|
||||
my_error(ER_SP_DUP_VAR, MYF(0), $1.str);
|
||||
YYABORT;
|
||||
}
|
||||
spc->push_pvar(&$1, (enum_field_types)0, sp_param_in);
|
||||
spc->push_variable(&$1, (enum_field_types)0, sp_param_in);
|
||||
$$= 1;
|
||||
}
|
||||
| sp_decl_idents ',' ident
|
||||
@ -2218,12 +2220,12 @@ sp_decl_idents:
|
||||
LEX *lex= Lex;
|
||||
sp_pcontext *spc= lex->spcont;
|
||||
|
||||
if (spc->find_pvar(&$3, TRUE))
|
||||
if (spc->find_variable(&$3, TRUE))
|
||||
{
|
||||
my_error(ER_SP_DUP_VAR, MYF(0), $3.str);
|
||||
YYABORT;
|
||||
}
|
||||
spc->push_pvar(&$3, (enum_field_types)0, sp_param_in);
|
||||
spc->push_variable(&$3, (enum_field_types)0, sp_param_in);
|
||||
$$= $1 + 1;
|
||||
}
|
||||
;
|
||||
@ -2606,9 +2608,9 @@ sp_fetch_list:
|
||||
LEX *lex= Lex;
|
||||
sp_head *sp= lex->sphead;
|
||||
sp_pcontext *spc= lex->spcont;
|
||||
sp_pvar_t *spv;
|
||||
sp_variable_t *spv;
|
||||
|
||||
if (!spc || !(spv = spc->find_pvar(&$1)))
|
||||
if (!spc || !(spv = spc->find_variable(&$1)))
|
||||
{
|
||||
my_error(ER_SP_UNDECLARED_VAR, MYF(0), $1.str);
|
||||
YYABORT;
|
||||
@ -2627,9 +2629,9 @@ sp_fetch_list:
|
||||
LEX *lex= Lex;
|
||||
sp_head *sp= lex->sphead;
|
||||
sp_pcontext *spc= lex->spcont;
|
||||
sp_pvar_t *spv;
|
||||
sp_variable_t *spv;
|
||||
|
||||
if (!spc || !(spv = spc->find_pvar(&$3)))
|
||||
if (!spc || !(spv = spc->find_variable(&$3)))
|
||||
{
|
||||
my_error(ER_SP_UNDECLARED_VAR, MYF(0), $3.str);
|
||||
YYABORT;
|
||||
@ -3558,75 +3560,14 @@ part_definition:
|
||||
LEX *lex= Lex;
|
||||
partition_info *part_info= lex->part_info;
|
||||
partition_element *p_elem= new partition_element();
|
||||
uint part_id= part_info->partitions.elements +
|
||||
part_info->temp_partitions.elements;
|
||||
enum partition_state part_state;
|
||||
uint part_id= part_info->partitions.elements;
|
||||
|
||||
if (part_info->part_state)
|
||||
part_state= (enum partition_state)part_info->part_state[part_id];
|
||||
else
|
||||
part_state= PART_NORMAL;
|
||||
switch (part_state)
|
||||
if (!p_elem || part_info->partitions.push_back(p_elem))
|
||||
{
|
||||
case PART_TO_BE_DROPPED:
|
||||
/*
|
||||
This part is currently removed so we keep it in a
|
||||
temporary list for REPAIR TABLE to be able to handle
|
||||
failures during drop partition process.
|
||||
*/
|
||||
case PART_TO_BE_ADDED:
|
||||
/*
|
||||
This part is currently being added so we keep it in a
|
||||
temporary list for REPAIR TABLE to be able to handle
|
||||
failures during add partition process.
|
||||
*/
|
||||
if (!p_elem || part_info->temp_partitions.push_back(p_elem))
|
||||
{
|
||||
mem_alloc_error(sizeof(partition_element));
|
||||
YYABORT;
|
||||
}
|
||||
break;
|
||||
case PART_IS_ADDED:
|
||||
/*
|
||||
Part has been added and is now a normal partition
|
||||
*/
|
||||
case PART_TO_BE_REORGED:
|
||||
/*
|
||||
This part is currently reorganised, it is still however
|
||||
used so we keep it in the list of partitions. We do
|
||||
however need the state to be able to handle REPAIR TABLE
|
||||
after failures in the reorganisation process.
|
||||
*/
|
||||
case PART_REORGED_DROPPED:
|
||||
/*
|
||||
This part is currently reorganised as part of a
|
||||
COALESCE PARTITION and it will be dropped without a new
|
||||
replacement partition after completing the reorganisation.
|
||||
*/
|
||||
case PART_CHANGED:
|
||||
/*
|
||||
This part is currently split or merged as part of ADD
|
||||
PARTITION for a hash partition or as part of COALESCE
|
||||
PARTITION for a hash partitioned table.
|
||||
*/
|
||||
case PART_IS_CHANGED:
|
||||
/*
|
||||
This part has been split or merged as part of ADD
|
||||
PARTITION for a hash partition or as part of COALESCE
|
||||
PARTITION for a hash partitioned table.
|
||||
*/
|
||||
case PART_NORMAL:
|
||||
if (!p_elem || part_info->partitions.push_back(p_elem))
|
||||
{
|
||||
mem_alloc_error(sizeof(partition_element));
|
||||
YYABORT;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
mem_alloc_error((part_id * 1000) + part_state);
|
||||
YYABORT;
|
||||
mem_alloc_error(sizeof(partition_element));
|
||||
YYABORT;
|
||||
}
|
||||
p_elem->part_state= part_state;
|
||||
p_elem->part_state= PART_NORMAL;
|
||||
part_info->curr_part_elem= p_elem;
|
||||
part_info->current_partition= p_elem;
|
||||
part_info->use_default_partitions= FALSE;
|
||||
@ -4801,7 +4742,7 @@ alter:
|
||||
lex->sql_command= SQLCOM_CREATE_VIEW;
|
||||
lex->create_view_mode= VIEW_ALTER;
|
||||
/* first table in list is target VIEW name */
|
||||
lex->select_lex.add_table_to_list(thd, $6, NULL, 0);
|
||||
lex->select_lex.add_table_to_list(thd, $6, NULL, TL_OPTION_UPDATING);
|
||||
}
|
||||
view_list_opt AS view_select view_check_option
|
||||
{}
|
||||
@ -7595,9 +7536,9 @@ select_var_ident:
|
||||
| ident_or_text
|
||||
{
|
||||
LEX *lex=Lex;
|
||||
sp_pvar_t *t;
|
||||
sp_variable_t *t;
|
||||
|
||||
if (!lex->spcont || !(t=lex->spcont->find_pvar(&$1)))
|
||||
if (!lex->spcont || !(t=lex->spcont->find_variable(&$1)))
|
||||
{
|
||||
my_error(ER_SP_UNDECLARED_VAR, MYF(0), $1.str);
|
||||
YYABORT;
|
||||
@ -9034,10 +8975,10 @@ order_ident:
|
||||
simple_ident:
|
||||
ident
|
||||
{
|
||||
sp_pvar_t *spv;
|
||||
sp_variable_t *spv;
|
||||
LEX *lex = Lex;
|
||||
sp_pcontext *spc = lex->spcont;
|
||||
if (spc && (spv = spc->find_pvar(&$1)))
|
||||
if (spc && (spv = spc->find_variable(&$1)))
|
||||
{
|
||||
/* We're compiling a stored procedure and found a variable */
|
||||
Item_splocal *splocal;
|
||||
@ -9825,7 +9766,7 @@ sys_option_value:
|
||||
{
|
||||
/* An SP local variable */
|
||||
sp_pcontext *ctx= lex->spcont;
|
||||
sp_pvar_t *spv;
|
||||
sp_variable_t *spv;
|
||||
sp_instr_set *sp_set;
|
||||
Item *it;
|
||||
if ($1)
|
||||
@ -9834,7 +9775,7 @@ sys_option_value:
|
||||
YYABORT;
|
||||
}
|
||||
|
||||
spv= ctx->find_pvar(&$2.base_name);
|
||||
spv= ctx->find_variable(&$2.base_name);
|
||||
|
||||
if ($4)
|
||||
it= $4;
|
||||
@ -9883,7 +9824,7 @@ option_value:
|
||||
|
||||
names.str= (char *)"names";
|
||||
names.length= 5;
|
||||
if (spc && spc->find_pvar(&names))
|
||||
if (spc && spc->find_variable(&names))
|
||||
my_error(ER_SP_BAD_VAR_SHADOW, MYF(0), names.str);
|
||||
else
|
||||
yyerror(ER(ER_SYNTAX_ERROR));
|
||||
@ -9913,7 +9854,7 @@ option_value:
|
||||
|
||||
pw.str= (char *)"password";
|
||||
pw.length= 8;
|
||||
if (spc && spc->find_pvar(&pw))
|
||||
if (spc && spc->find_variable(&pw))
|
||||
{
|
||||
my_error(ER_SP_BAD_VAR_SHADOW, MYF(0), pw.str);
|
||||
YYABORT;
|
||||
@ -9935,10 +9876,10 @@ internal_variable_name:
|
||||
{
|
||||
LEX *lex= Lex;
|
||||
sp_pcontext *spc= lex->spcont;
|
||||
sp_pvar_t *spv;
|
||||
sp_variable_t *spv;
|
||||
|
||||
/* We have to lookup here since local vars can shadow sysvars */
|
||||
if (!spc || !(spv = spc->find_pvar(&$1)))
|
||||
if (!spc || !(spv = spc->find_variable(&$1)))
|
||||
{
|
||||
/* Not an SP local variable */
|
||||
sys_var *tmp=find_sys_var($1.str, $1.length);
|
||||
@ -10904,7 +10845,7 @@ view_tail:
|
||||
LEX *lex= thd->lex;
|
||||
lex->sql_command= SQLCOM_CREATE_VIEW;
|
||||
/* first table in list is target VIEW name */
|
||||
if (!lex->select_lex.add_table_to_list(thd, $3, NULL, 0))
|
||||
if (!lex->select_lex.add_table_to_list(thd, $3, NULL, TL_OPTION_UPDATING))
|
||||
YYABORT;
|
||||
}
|
||||
view_list_opt AS view_select view_check_option
|
||||
|
35
sql/table.cc
35
sql/table.cc
@ -667,36 +667,17 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
|
||||
#endif
|
||||
next_chunk+= 5 + partition_info_len;
|
||||
}
|
||||
if (share->mysql_version > 50105 && next_chunk + 5 < buff_end)
|
||||
#if MYSQL_VERSION_ID < 50200
|
||||
if (share->mysql_version >= 50106 && share->mysql_version <= 50109)
|
||||
{
|
||||
/*
|
||||
Partition state was introduced to support partition management in version 5.1.5
|
||||
Partition state array was here in version 5.1.6 to 5.1.9, this code
|
||||
makes it possible to load a 5.1.6 table in later versions. Can most
|
||||
likely be removed at some point in time. Will only be used for
|
||||
upgrades within 5.1 series of versions. Upgrade to 5.2 can only be
|
||||
done from newer 5.1 versions.
|
||||
*/
|
||||
uint32 part_state_len= uint4korr(next_chunk);
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
if ((share->part_state_len= part_state_len))
|
||||
if (!(share->part_state=
|
||||
(uchar*) memdup_root(&share->mem_root, next_chunk + 4,
|
||||
part_state_len)))
|
||||
{
|
||||
my_free(buff, MYF(0));
|
||||
goto err;
|
||||
}
|
||||
#else
|
||||
if (part_state_len)
|
||||
{
|
||||
DBUG_PRINT("info", ("WITH_PARTITION_STORAGE_ENGINE is not defined"));
|
||||
my_free(buff, MYF(0));
|
||||
goto err;
|
||||
}
|
||||
#endif
|
||||
next_chunk+= 4 + part_state_len;
|
||||
}
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
else
|
||||
{
|
||||
share->part_state_len= 0;
|
||||
share->part_state= NULL;
|
||||
next_chunk+= 4;
|
||||
}
|
||||
#endif
|
||||
keyinfo= share->key_info;
|
||||
|
@ -136,7 +136,6 @@ bool mysql_create_frm(THD *thd, const char *file_name,
|
||||
if (part_info)
|
||||
{
|
||||
create_info->extra_size+= part_info->part_info_len;
|
||||
create_info->extra_size+= part_info->part_state_len;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -209,12 +208,6 @@ bool mysql_create_frm(THD *thd, const char *file_name,
|
||||
my_write(file, (const byte*)part_info->part_info_string,
|
||||
part_info->part_info_len + 1, MYF_RW))
|
||||
goto err;
|
||||
DBUG_PRINT("info", ("Part state len = %d", part_info->part_state_len));
|
||||
int4store(buff, part_info->part_state_len);
|
||||
if (my_write(file, (const byte*)buff, 4, MYF_RW) ||
|
||||
my_write(file, (const byte*)part_info->part_state,
|
||||
part_info->part_state_len, MYF_RW))
|
||||
goto err;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
@ -330,7 +323,7 @@ int rea_create_table(THD *thd, const char *path,
|
||||
|
||||
// Make sure mysql_create_frm din't remove extension
|
||||
DBUG_ASSERT(*fn_rext(frm_name));
|
||||
if (file->create_handler_files(path, create_info))
|
||||
if (file->create_handler_files(path, NULL, CHF_CREATE_FLAG, create_info))
|
||||
goto err_handler;
|
||||
if (!create_info->frm_only && ha_create_table(thd, path, db, table_name,
|
||||
create_info,0))
|
||||
|
@ -76,7 +76,7 @@ EXTRA_DIST = include/btr0btr.h include/btr0btr.ic include/btr0cur.h include/btr
|
||||
include/univ.i include/usr0sess.h include/usr0sess.ic include/usr0types.h \
|
||||
include/ut0byte.h include/ut0byte.ic include/ut0dbg.h include/ut0lst.h \
|
||||
include/ut0mem.h include/ut0mem.ic include/ut0rnd.h include/ut0rnd.ic \
|
||||
include/ut0sort.h include/ut0ut.h include/ut0ut.ic \
|
||||
include/ut0sort.h include/ut0ut.h include/ut0ut.ic include/ut0vec.h include/ut0vec.ic \
|
||||
cmakelists.txt
|
||||
|
||||
# Don't update the files from bitkeeper
|
||||
|
@ -144,7 +144,7 @@ btr_root_get(
|
||||
|
||||
root = btr_page_get(space, root_page_no, RW_X_LATCH, mtr);
|
||||
ut_a((ibool)!!page_is_comp(root) ==
|
||||
dict_table_is_comp(UT_LIST_GET_FIRST(tree->tree_indexes)->table));
|
||||
dict_table_is_comp(tree->tree_index->table));
|
||||
|
||||
return(root);
|
||||
}
|
||||
@ -259,7 +259,7 @@ btr_page_create(
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
page_create(page, mtr,
|
||||
dict_table_is_comp(UT_LIST_GET_FIRST(tree->tree_indexes)->table));
|
||||
dict_table_is_comp(tree->tree_index->table));
|
||||
buf_block_align(page)->check_index_page_at_flush = TRUE;
|
||||
|
||||
btr_page_set_index_id(page, tree->id, mtr);
|
||||
@ -574,7 +574,7 @@ btr_page_get_father_for_rec(
|
||||
|
||||
tuple = dict_tree_build_node_ptr(tree, user_rec, 0, heap,
|
||||
btr_page_get_level(page, mtr));
|
||||
index = UT_LIST_GET_FIRST(tree->tree_indexes);
|
||||
index = tree->tree_index;
|
||||
|
||||
/* In the following, we choose just any index from the tree as the
|
||||
first parameter for btr_cur_search_to_nth_level. */
|
||||
@ -1073,8 +1073,7 @@ btr_root_raise_and_insert(
|
||||
/* fprintf(stderr, "Root raise new page no %lu\n",
|
||||
buf_frame_get_page_no(new_page)); */
|
||||
|
||||
ibuf_reset_free_bits(UT_LIST_GET_FIRST(tree->tree_indexes),
|
||||
new_page);
|
||||
ibuf_reset_free_bits(tree->tree_index, new_page);
|
||||
/* Reposition the cursor to the child node */
|
||||
page_cur_search(new_page, cursor->index, tuple,
|
||||
PAGE_CUR_LE, page_cursor);
|
||||
@ -1415,7 +1414,7 @@ btr_insert_on_non_leaf_level(
|
||||
/* In the following, choose just any index from the tree as the
|
||||
first parameter for btr_cur_search_to_nth_level. */
|
||||
|
||||
btr_cur_search_to_nth_level(UT_LIST_GET_FIRST(tree->tree_indexes),
|
||||
btr_cur_search_to_nth_level(tree->tree_index,
|
||||
level, tuple, PAGE_CUR_LE, BTR_CONT_MODIFY_TREE,
|
||||
&cursor, 0, mtr);
|
||||
|
||||
@ -1479,7 +1478,7 @@ btr_attach_half_pages(
|
||||
|
||||
btr_node_ptr_set_child_page_no(node_ptr,
|
||||
rec_get_offsets(node_ptr,
|
||||
UT_LIST_GET_FIRST(tree->tree_indexes),
|
||||
tree->tree_index,
|
||||
NULL, ULINT_UNDEFINED, &heap),
|
||||
lower_page_no, mtr);
|
||||
mem_heap_empty(heap);
|
||||
@ -1768,8 +1767,8 @@ func_start:
|
||||
buf_frame_get_page_no(left_page),
|
||||
buf_frame_get_page_no(right_page)); */
|
||||
|
||||
ut_ad(page_validate(left_page, UT_LIST_GET_FIRST(tree->tree_indexes)));
|
||||
ut_ad(page_validate(right_page, UT_LIST_GET_FIRST(tree->tree_indexes)));
|
||||
ut_ad(page_validate(left_page, tree->tree_index));
|
||||
ut_ad(page_validate(right_page, tree->tree_index));
|
||||
|
||||
mem_heap_free(heap);
|
||||
return(rec);
|
||||
@ -1910,8 +1909,7 @@ btr_node_ptr_delete(
|
||||
|
||||
node_ptr = btr_page_get_father_node_ptr(tree, page, mtr);
|
||||
|
||||
btr_cur_position(UT_LIST_GET_FIRST(tree->tree_indexes), node_ptr,
|
||||
&cursor);
|
||||
btr_cur_position(tree->tree_index, node_ptr, &cursor);
|
||||
compressed = btr_cur_pessimistic_delete(&err, TRUE, &cursor, FALSE,
|
||||
mtr);
|
||||
ut_a(err == DB_SUCCESS);
|
||||
@ -1947,7 +1945,7 @@ btr_lift_page_up(
|
||||
btr_page_get_father_node_ptr(tree, page, mtr));
|
||||
|
||||
page_level = btr_page_get_level(page, mtr);
|
||||
index = UT_LIST_GET_FIRST(tree->tree_indexes);
|
||||
index = tree->tree_index;
|
||||
|
||||
btr_search_drop_page_hash_index(page);
|
||||
|
||||
@ -2180,8 +2178,7 @@ btr_discard_only_page_on_level(
|
||||
btr_page_empty(father_page, mtr);
|
||||
|
||||
/* We play safe and reset the free bits for the father */
|
||||
ibuf_reset_free_bits(UT_LIST_GET_FIRST(tree->tree_indexes),
|
||||
father_page);
|
||||
ibuf_reset_free_bits(tree->tree_index, father_page);
|
||||
} else {
|
||||
ut_ad(page_get_n_recs(father_page) == 1);
|
||||
|
||||
@ -2449,7 +2446,7 @@ btr_check_node_ptr(
|
||||
|
||||
ut_a(cmp_dtuple_rec(node_ptr_tuple, node_ptr,
|
||||
rec_get_offsets(node_ptr,
|
||||
dict_tree_find_index(tree, node_ptr),
|
||||
tree->tree_index,
|
||||
NULL, ULINT_UNDEFINED, &heap)) == 0);
|
||||
|
||||
mem_heap_free(heap);
|
||||
@ -2692,7 +2689,7 @@ btr_validate_level(
|
||||
|
||||
space = buf_frame_get_space_id(page);
|
||||
|
||||
index = UT_LIST_GET_FIRST(tree->tree_indexes);
|
||||
index = tree->tree_index;
|
||||
|
||||
while (level != btr_page_get_level(page, &mtr)) {
|
||||
|
||||
|
@ -1606,7 +1606,7 @@ btr_cur_optimistic_update(
|
||||
new_entry = row_rec_to_index_entry(ROW_COPY_DATA, index, rec, heap);
|
||||
|
||||
row_upd_index_replace_new_col_vals_index_pos(new_entry, index, update,
|
||||
NULL);
|
||||
FALSE, NULL);
|
||||
old_rec_size = rec_offs_size(offsets);
|
||||
new_rec_size = rec_get_converted_size(index, new_entry);
|
||||
|
||||
@ -1846,7 +1846,7 @@ btr_cur_pessimistic_update(
|
||||
new_entry = row_rec_to_index_entry(ROW_COPY_DATA, index, rec, heap);
|
||||
|
||||
row_upd_index_replace_new_col_vals_index_pos(new_entry, index, update,
|
||||
heap);
|
||||
FALSE, heap);
|
||||
if (!(flags & BTR_KEEP_SYS_FLAG)) {
|
||||
row_upd_index_entry_sys_field(new_entry, index, DATA_ROLL_PTR,
|
||||
roll_ptr);
|
||||
@ -1915,13 +1915,13 @@ btr_cur_pessimistic_update(
|
||||
ut_a(rec || optim_err != DB_UNDERFLOW);
|
||||
|
||||
if (rec) {
|
||||
offsets = rec_get_offsets(rec, index, offsets,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
lock_rec_restore_from_page_infimum(rec, page);
|
||||
rec_set_field_extern_bits(rec, index,
|
||||
ext_vect, n_ext_vect, mtr);
|
||||
|
||||
offsets = rec_get_offsets(rec, index, offsets,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
if (!rec_get_deleted_flag(rec, rec_offs_comp(offsets))) {
|
||||
/* The new inserted record owns its possible externally
|
||||
stored fields */
|
||||
@ -2371,8 +2371,7 @@ btr_cur_compress(
|
||||
ut_ad(mtr_memo_contains(mtr,
|
||||
dict_tree_get_lock(btr_cur_get_tree(cursor)),
|
||||
MTR_MEMO_X_LOCK));
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(
|
||||
btr_cur_get_page(cursor)),
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(btr_cur_get_rec(cursor)),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
ut_ad(btr_page_get_level(btr_cur_get_page(cursor), mtr) == 0);
|
||||
|
||||
@ -2398,8 +2397,7 @@ btr_cur_compress_if_useful(
|
||||
ut_ad(mtr_memo_contains(mtr,
|
||||
dict_tree_get_lock(btr_cur_get_tree(cursor)),
|
||||
MTR_MEMO_X_LOCK));
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(
|
||||
btr_cur_get_page(cursor)),
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(btr_cur_get_rec(cursor)),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
|
||||
if (btr_cur_compress_recommendation(cursor, mtr)) {
|
||||
@ -2437,7 +2435,7 @@ btr_cur_optimistic_delete(
|
||||
ibool no_compress_needed;
|
||||
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
|
||||
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(btr_cur_get_page(cursor)),
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(btr_cur_get_rec(cursor)),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
/* This is intended only for leaf page deletions */
|
||||
|
||||
@ -3330,7 +3328,10 @@ btr_store_big_rec_extern_fields(
|
||||
dict_index_t* index, /* in: index of rec; the index tree
|
||||
MUST be X-latched */
|
||||
rec_t* rec, /* in: record */
|
||||
const ulint* offsets, /* in: rec_get_offsets(rec, index) */
|
||||
const ulint* offsets, /* in: rec_get_offsets(rec, index);
|
||||
the "external storage" flags in offsets
|
||||
will not correspond to rec when
|
||||
this function returns */
|
||||
big_rec_t* big_rec_vec, /* in: vector containing fields
|
||||
to be stored externally */
|
||||
mtr_t* local_mtr __attribute__((unused))) /* in: mtr
|
||||
|
@ -259,10 +259,7 @@ btr_pcur_restore_position(
|
||||
cursor->latch_mode = latch_mode;
|
||||
#ifdef UNIV_DEBUG
|
||||
rec = btr_pcur_get_rec(cursor);
|
||||
index = dict_tree_find_index(
|
||||
btr_cur_get_tree(
|
||||
btr_pcur_get_btr_cur(cursor)),
|
||||
rec);
|
||||
index = btr_pcur_get_btr_cur(cursor)->index;
|
||||
|
||||
heap = mem_heap_create(256);
|
||||
offsets1 = rec_get_offsets(cursor->old_rec,
|
||||
|
@ -24,8 +24,8 @@ ulint btr_search_this_is_zero = 0; /* A dummy variable to fool the
|
||||
|
||||
#ifdef UNIV_SEARCH_PERF_STAT
|
||||
ulint btr_search_n_succ = 0;
|
||||
#endif /* UNIV_SEARCH_PERF_STAT */
|
||||
ulint btr_search_n_hash_fail = 0;
|
||||
#endif /* UNIV_SEARCH_PERF_STAT */
|
||||
|
||||
byte btr_sea_pad1[64]; /* padding to prevent other memory update
|
||||
hotspots from residing on the same memory
|
||||
@ -59,9 +59,6 @@ before hash index building is started */
|
||||
|
||||
#define BTR_SEARCH_BUILD_LIMIT 100
|
||||
|
||||
/* How many cells to check before temporarily releasing btr_search_latch */
|
||||
#define BTR_CHUNK_SIZE 10000
|
||||
|
||||
/************************************************************************
|
||||
Builds a hash index on a page with the given parameters. If the page already
|
||||
has a hash index with different parameters, the old hash index is removed.
|
||||
@ -172,10 +169,12 @@ btr_search_info_create(
|
||||
|
||||
info->last_hash_succ = FALSE;
|
||||
|
||||
#ifdef UNIV_SEARCH_PERF_STAT
|
||||
info->n_hash_succ = 0;
|
||||
info->n_hash_fail = 0;
|
||||
info->n_patt_succ = 0;
|
||||
info->n_searches = 0;
|
||||
#endif /* UNIV_SEARCH_PERF_STAT */
|
||||
|
||||
/* Set some sensible values */
|
||||
info->n_fields = 1;
|
||||
@ -487,7 +486,9 @@ btr_search_info_update_slow(
|
||||
if (cursor->flag == BTR_CUR_HASH_FAIL) {
|
||||
/* Update the hash node reference, if appropriate */
|
||||
|
||||
#ifdef UNIV_SEARCH_PERF_STAT
|
||||
btr_search_n_hash_fail++;
|
||||
#endif /* UNIV_SEARCH_PERF_STAT */
|
||||
|
||||
rw_lock_x_lock(&btr_search_latch);
|
||||
|
||||
@ -872,11 +873,11 @@ failure_unlock:
|
||||
rw_lock_s_unlock(&btr_search_latch);
|
||||
}
|
||||
failure:
|
||||
info->n_hash_fail++;
|
||||
|
||||
cursor->flag = BTR_CUR_HASH_FAIL;
|
||||
|
||||
#ifdef UNIV_SEARCH_PERF_STAT
|
||||
info->n_hash_fail++;
|
||||
|
||||
if (info->n_hash_succ > 0) {
|
||||
info->n_hash_succ--;
|
||||
}
|
||||
@ -1607,21 +1608,26 @@ btr_search_validate(void)
|
||||
mem_heap_t* heap = NULL;
|
||||
ulint offsets_[REC_OFFS_NORMAL_SIZE];
|
||||
ulint* offsets = offsets_;
|
||||
|
||||
/* How many cells to check before temporarily releasing
|
||||
btr_search_latch. */
|
||||
ulint chunk_size = 10000;
|
||||
|
||||
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
|
||||
|
||||
rw_lock_x_lock(&btr_search_latch);
|
||||
|
||||
cell_count = hash_get_n_cells(btr_search_sys->hash_index);
|
||||
|
||||
|
||||
for (i = 0; i < cell_count; i++) {
|
||||
/* We release btr_search_latch every once in a while to
|
||||
give other queries a chance to run. */
|
||||
if ((i != 0) && ((i % BTR_CHUNK_SIZE) == 0)) {
|
||||
if ((i != 0) && ((i % chunk_size) == 0)) {
|
||||
rw_lock_x_unlock(&btr_search_latch);
|
||||
os_thread_yield();
|
||||
rw_lock_x_lock(&btr_search_latch);
|
||||
}
|
||||
|
||||
|
||||
node = hash_get_nth_cell(btr_search_sys->hash_index, i)->node;
|
||||
|
||||
while (node != NULL) {
|
||||
@ -1675,9 +1681,9 @@ btr_search_validate(void)
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < cell_count; i += BTR_CHUNK_SIZE) {
|
||||
ulint end_index = ut_min(i + BTR_CHUNK_SIZE - 1, cell_count - 1);
|
||||
|
||||
for (i = 0; i < cell_count; i += chunk_size) {
|
||||
ulint end_index = ut_min(i + chunk_size - 1, cell_count - 1);
|
||||
|
||||
/* We release btr_search_latch every once in a while to
|
||||
give other queries a chance to run. */
|
||||
if (i != 0) {
|
||||
|
@ -294,14 +294,14 @@ buf_LRU_try_free_flushed_blocks(void)
|
||||
}
|
||||
|
||||
/**********************************************************************
|
||||
Returns TRUE if less than 15 % of the buffer pool is available. This can be
|
||||
Returns TRUE if less than 25 % of the buffer pool is available. This can be
|
||||
used in heuristics to prevent huge transactions eating up the whole buffer
|
||||
pool for their locks. */
|
||||
|
||||
ibool
|
||||
buf_LRU_buf_pool_running_out(void)
|
||||
/*==============================*/
|
||||
/* out: TRUE if less than 15 % of buffer pool
|
||||
/* out: TRUE if less than 25 % of buffer pool
|
||||
left */
|
||||
{
|
||||
ibool ret = FALSE;
|
||||
@ -309,7 +309,7 @@ buf_LRU_buf_pool_running_out(void)
|
||||
mutex_enter(&(buf_pool->mutex));
|
||||
|
||||
if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free)
|
||||
+ UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->max_size / 7) {
|
||||
+ UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->max_size / 4) {
|
||||
|
||||
ret = TRUE;
|
||||
}
|
||||
@ -340,11 +340,11 @@ loop:
|
||||
mutex_enter(&(buf_pool->mutex));
|
||||
|
||||
if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free)
|
||||
+ UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->max_size / 10) {
|
||||
+ UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->max_size / 20) {
|
||||
ut_print_timestamp(stderr);
|
||||
|
||||
fprintf(stderr,
|
||||
" InnoDB: ERROR: over 9 / 10 of the buffer pool is occupied by\n"
|
||||
" InnoDB: ERROR: over 95 percent of the buffer pool is occupied by\n"
|
||||
"InnoDB: lock heaps or the adaptive hash index! Check that your\n"
|
||||
"InnoDB: transactions do not set too many row locks.\n"
|
||||
"InnoDB: Your buffer pool size is %lu MB. Maybe you should make\n"
|
||||
@ -356,17 +356,17 @@ loop:
|
||||
ut_error;
|
||||
|
||||
} else if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free)
|
||||
+ UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->max_size / 5) {
|
||||
+ UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->max_size / 3) {
|
||||
|
||||
if (!buf_lru_switched_on_innodb_mon) {
|
||||
|
||||
/* Over 80 % of the buffer pool is occupied by lock
|
||||
/* Over 67 % of the buffer pool is occupied by lock
|
||||
heaps or the adaptive hash index. This may be a memory
|
||||
leak! */
|
||||
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: WARNING: over 4 / 5 of the buffer pool is occupied by\n"
|
||||
" InnoDB: WARNING: over 67 percent of the buffer pool is occupied by\n"
|
||||
"InnoDB: lock heaps or the adaptive hash index! Check that your\n"
|
||||
"InnoDB: transactions do not set too many row locks.\n"
|
||||
"InnoDB: Your buffer pool size is %lu MB. Maybe you should make\n"
|
||||
@ -881,10 +881,10 @@ buf_LRU_block_remove_hashed_page(
|
||||
if (buf_page_hash_get(block->space, block->offset)) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: From hash table we find block %p of %lu %lu which is not %p\n",
|
||||
buf_page_hash_get(block->space, block->offset),
|
||||
(void*) buf_page_hash_get(block->space, block->offset),
|
||||
(ulong) buf_page_hash_get(block->space, block->offset)->space,
|
||||
(ulong) buf_page_hash_get(block->space, block->offset)->offset,
|
||||
block);
|
||||
(void*) block);
|
||||
}
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
|
@ -32,4 +32,4 @@ ADD_LIBRARY(innobase btr/btr0btr.c btr/btr0cur.c btr/btr0pcur.c btr/btr0sea.c
|
||||
thr/thr0loc.c
|
||||
trx/trx0purge.c trx/trx0rec.c trx/trx0roll.c trx/trx0rseg.c trx/trx0sys.c trx/trx0trx.c trx/trx0undo.c
|
||||
usr/usr0sess.c
|
||||
ut/ut0byte.c ut/ut0dbg.c ut/ut0mem.c ut/ut0rnd.c ut/ut0ut.c)
|
||||
ut/ut0byte.c ut/ut0dbg.c ut/ut0mem.c ut/ut0rnd.c ut/ut0ut.c ut/ut0vec.c)
|
||||
|
@ -89,6 +89,25 @@ else
|
||||
CXXFLAGS="$OPTIMIZE_CXXFLAGS -DDBUG_OFF $CXXFLAGS -DDEBUG_OFF"
|
||||
fi
|
||||
|
||||
# NOTE: The flags below are disabled by default since we can't easily get
|
||||
# rid of the "string over 509 characters in length" warnings, and thus can't
|
||||
# add -Werror. But it's a good idea to enable these for a test compile
|
||||
# before shipping a new snapshot to MySQL to catch errors that could make
|
||||
# the compile fail on non-C99 compilers.
|
||||
|
||||
# If using gcc, disallow usage of C99 features to avoid accidentally
|
||||
# introducing problems on compilers that only implement C89.
|
||||
#if test "$ac_cv_prog_gcc" = "yes"
|
||||
#then
|
||||
# CFLAGS="$CFLAGS -std=c89 -ansi -pedantic -Wno-long-long"
|
||||
#fi
|
||||
|
||||
# If using gcc, add some extra warning flags.
|
||||
if test "$ac_cv_prog_gcc" = "yes"
|
||||
then
|
||||
CFLAGS="$CFLAGS -Werror-implicit-function-declaration"
|
||||
fi
|
||||
|
||||
case "$target_os" in
|
||||
lin*)
|
||||
CFLAGS="$CFLAGS -DUNIV_LINUX";;
|
||||
|
@ -216,20 +216,43 @@ dtype_print(
|
||||
|
||||
mtype = type->mtype;
|
||||
prtype = type->prtype;
|
||||
if (mtype == DATA_VARCHAR) {
|
||||
|
||||
switch (mtype) {
|
||||
case DATA_VARCHAR:
|
||||
fputs("DATA_VARCHAR", stderr);
|
||||
} else if (mtype == DATA_CHAR) {
|
||||
break;
|
||||
|
||||
case DATA_CHAR:
|
||||
fputs("DATA_CHAR", stderr);
|
||||
} else if (mtype == DATA_BINARY) {
|
||||
break;
|
||||
|
||||
case DATA_BINARY:
|
||||
fputs("DATA_BINARY", stderr);
|
||||
} else if (mtype == DATA_INT) {
|
||||
break;
|
||||
|
||||
case DATA_FIXBINARY:
|
||||
fputs("DATA_FIXBINARY", stderr);
|
||||
break;
|
||||
|
||||
case DATA_BLOB:
|
||||
fputs("DATA_BLOB", stderr);
|
||||
break;
|
||||
|
||||
case DATA_INT:
|
||||
fputs("DATA_INT", stderr);
|
||||
} else if (mtype == DATA_MYSQL) {
|
||||
break;
|
||||
|
||||
case DATA_MYSQL:
|
||||
fputs("DATA_MYSQL", stderr);
|
||||
} else if (mtype == DATA_SYS) {
|
||||
break;
|
||||
|
||||
case DATA_SYS:
|
||||
fputs("DATA_SYS", stderr);
|
||||
} else {
|
||||
break;
|
||||
|
||||
default:
|
||||
fprintf(stderr, "type %lu", (ulong) mtype);
|
||||
break;
|
||||
}
|
||||
|
||||
len = type->len;
|
||||
@ -254,6 +277,18 @@ dtype_print(
|
||||
} else {
|
||||
fprintf(stderr, "prtype %lu", (ulong) prtype);
|
||||
}
|
||||
} else {
|
||||
if (prtype & DATA_UNSIGNED) {
|
||||
fputs(" DATA_UNSIGNED", stderr);
|
||||
}
|
||||
|
||||
if (prtype & DATA_BINARY_TYPE) {
|
||||
fputs(" DATA_BINARY_TYPE", stderr);
|
||||
}
|
||||
|
||||
if (prtype & DATA_NOT_NULL) {
|
||||
fputs(" DATA_NOT_NULL", stderr);
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr, " len %lu prec %lu", (ulong) len, (ulong) type->prec);
|
||||
|
@ -24,6 +24,7 @@ Created 1/8/1996 Heikki Tuuri
|
||||
#include "pars0pars.h"
|
||||
#include "trx0roll.h"
|
||||
#include "usr0sess.h"
|
||||
#include "ut0vec.h"
|
||||
|
||||
/*********************************************************************
|
||||
Based on a table object, this function builds the entry to be inserted
|
||||
@ -74,14 +75,14 @@ dict_create_sys_tables_tuple(
|
||||
dfield = dtuple_get_nth_field(entry, 3);
|
||||
|
||||
ptr = mem_heap_alloc(heap, 4);
|
||||
mach_write_to_4(ptr, table->type);
|
||||
mach_write_to_4(ptr, DICT_TABLE_ORDINARY);
|
||||
|
||||
dfield_set_data(dfield, ptr, 4);
|
||||
/* 6: MIX_ID ---------------------------*/
|
||||
dfield = dtuple_get_nth_field(entry, 4);
|
||||
|
||||
ptr = mem_heap_alloc(heap, 8);
|
||||
mach_write_to_8(ptr, table->mix_id);
|
||||
memset(ptr, 0, 8);
|
||||
|
||||
dfield_set_data(dfield, ptr, 8);
|
||||
/* 7: MIX_LEN --------------------------*/
|
||||
@ -89,19 +90,13 @@ dict_create_sys_tables_tuple(
|
||||
dfield = dtuple_get_nth_field(entry, 5);
|
||||
|
||||
ptr = mem_heap_alloc(heap, 4);
|
||||
mach_write_to_4(ptr, table->mix_len);
|
||||
memset(ptr, 0, 4);
|
||||
|
||||
dfield_set_data(dfield, ptr, 4);
|
||||
/* 8: CLUSTER_NAME ---------------------*/
|
||||
dfield = dtuple_get_nth_field(entry, 6);
|
||||
dfield_set_data(dfield, NULL, UNIV_SQL_NULL); /* not supported */
|
||||
|
||||
if (table->type == DICT_TABLE_CLUSTER_MEMBER) {
|
||||
dfield_set_data(dfield, table->cluster_name,
|
||||
ut_strlen(table->cluster_name));
|
||||
ut_error; /* Oracle-style clusters are not supported yet */
|
||||
} else {
|
||||
dfield_set_data(dfield, NULL, UNIV_SQL_NULL);
|
||||
}
|
||||
/* 9: SPACE ----------------------------*/
|
||||
dfield = dtuple_get_nth_field(entry, 7);
|
||||
|
||||
@ -207,7 +202,6 @@ dict_build_table_def_step(
|
||||
tab_node_t* node) /* in: table create node */
|
||||
{
|
||||
dict_table_t* table;
|
||||
dict_table_t* cluster_table;
|
||||
dtuple_t* row;
|
||||
ulint error;
|
||||
const char* path_or_name;
|
||||
@ -235,23 +229,6 @@ dict_build_table_def_step(
|
||||
return(DB_TOO_BIG_RECORD);
|
||||
}
|
||||
|
||||
if (table->type == DICT_TABLE_CLUSTER_MEMBER) {
|
||||
|
||||
cluster_table = dict_table_get_low(table->cluster_name);
|
||||
|
||||
if (cluster_table == NULL) {
|
||||
|
||||
return(DB_CLUSTER_NOT_FOUND);
|
||||
}
|
||||
|
||||
/* Inherit space and mix len from the cluster */
|
||||
|
||||
table->space = cluster_table->space;
|
||||
table->mix_len = cluster_table->mix_len;
|
||||
|
||||
table->mix_id = dict_hdr_get_new_id(DICT_HDR_MIX_ID);
|
||||
}
|
||||
|
||||
if (srv_file_per_table) {
|
||||
/* We create a new single-table tablespace for the table.
|
||||
We initially let it be 4 pages:
|
||||
@ -614,15 +591,6 @@ dict_create_index_tree_step(
|
||||
|
||||
sys_indexes = dict_sys->sys_indexes;
|
||||
|
||||
if (index->type & DICT_CLUSTERED
|
||||
&& table->type == DICT_TABLE_CLUSTER_MEMBER) {
|
||||
|
||||
/* Do not create a new index tree: entries are put to the
|
||||
cluster tree */
|
||||
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
/* Run a mini-transaction in which the index tree is allocated for
|
||||
the index and its root address is written to the index entry in
|
||||
sys_indexes */
|
||||
@ -1159,11 +1127,8 @@ dict_create_or_check_foreign_constraint_tables(void)
|
||||
{
|
||||
dict_table_t* table1;
|
||||
dict_table_t* table2;
|
||||
que_thr_t* thr;
|
||||
que_t* graph;
|
||||
ulint error;
|
||||
trx_t* trx;
|
||||
const char* str;
|
||||
|
||||
mutex_enter(&(dict_sys->mutex));
|
||||
|
||||
@ -1215,7 +1180,7 @@ dict_create_or_check_foreign_constraint_tables(void)
|
||||
VARBINARY, like in other InnoDB system tables, to get a clean
|
||||
design. */
|
||||
|
||||
str =
|
||||
error = que_eval_sql(NULL,
|
||||
"PROCEDURE CREATE_FOREIGN_SYS_TABLES_PROC () IS\n"
|
||||
"BEGIN\n"
|
||||
"CREATE TABLE\n"
|
||||
@ -1227,22 +1192,8 @@ dict_create_or_check_foreign_constraint_tables(void)
|
||||
"SYS_FOREIGN_COLS(ID CHAR, POS INT, FOR_COL_NAME CHAR, REF_COL_NAME CHAR);\n"
|
||||
"CREATE UNIQUE CLUSTERED INDEX ID_IND ON SYS_FOREIGN_COLS (ID, POS);\n"
|
||||
"COMMIT WORK;\n"
|
||||
"END;\n";
|
||||
|
||||
graph = pars_sql(str);
|
||||
|
||||
ut_a(graph);
|
||||
|
||||
graph->trx = trx;
|
||||
trx->graph = NULL;
|
||||
|
||||
graph->fork_type = QUE_FORK_MYSQL_INTERFACE;
|
||||
|
||||
ut_a(thr = que_fork_start_command(graph));
|
||||
|
||||
que_run_threads(thr);
|
||||
|
||||
error = trx->error_state;
|
||||
"END;\n"
|
||||
, trx);
|
||||
|
||||
if (error != DB_SUCCESS) {
|
||||
fprintf(stderr, "InnoDB: error %lu in creation\n",
|
||||
@ -1261,8 +1212,6 @@ dict_create_or_check_foreign_constraint_tables(void)
|
||||
error = DB_MUST_GET_MORE_FILE_SPACE;
|
||||
}
|
||||
|
||||
que_graph_free(graph);
|
||||
|
||||
trx->op_info = "";
|
||||
|
||||
row_mysql_unlock_data_dictionary(trx);
|
||||
@ -1277,150 +1226,23 @@ dict_create_or_check_foreign_constraint_tables(void)
|
||||
return(error);
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
Adds foreign key definitions to data dictionary tables in the database. We
|
||||
look at table->foreign_list, and also generate names to constraints that were
|
||||
not named by the user. A generated constraint has a name of the format
|
||||
databasename/tablename_ibfk_<number>, where the numbers start from 1, and are
|
||||
given locally for this table, that is, the number is not global, as in the
|
||||
old format constraints < 4.0.18 it used to be. */
|
||||
/********************************************************************
|
||||
Evaluate the given foreign key SQL statement. */
|
||||
|
||||
ulint
|
||||
dict_create_add_foreigns_to_dictionary(
|
||||
/*===================================*/
|
||||
dict_foreign_eval_sql(
|
||||
/*==================*/
|
||||
/* out: error code or DB_SUCCESS */
|
||||
ulint start_id,/* in: if we are actually doing ALTER TABLE
|
||||
ADD CONSTRAINT, we want to generate constraint
|
||||
numbers which are bigger than in the table so
|
||||
far; we number the constraints from
|
||||
start_id + 1 up; start_id should be set to 0 if
|
||||
we are creating a new table, or if the table
|
||||
so far has no constraints for which the name
|
||||
was generated here */
|
||||
pars_info_t* info, /* in: info struct, or NULL */
|
||||
const char* sql, /* in: SQL string to evaluate */
|
||||
dict_table_t* table, /* in: table */
|
||||
dict_foreign_t* foreign,/* in: foreign */
|
||||
trx_t* trx) /* in: transaction */
|
||||
{
|
||||
dict_foreign_t* foreign;
|
||||
que_thr_t* thr;
|
||||
que_t* graph;
|
||||
ulint number = start_id + 1;
|
||||
ulint len;
|
||||
ulint error;
|
||||
FILE* ef = dict_foreign_err_file;
|
||||
ulint i;
|
||||
char* sql;
|
||||
char* sqlend;
|
||||
/* This procedure builds an InnoDB stored procedure which will insert
|
||||
the necessary rows into SYS_FOREIGN and SYS_FOREIGN_COLS. */
|
||||
static const char str1[] = "PROCEDURE ADD_FOREIGN_DEFS_PROC () IS\n"
|
||||
"BEGIN\n"
|
||||
"INSERT INTO SYS_FOREIGN VALUES(";
|
||||
static const char str2[] = ");\n";
|
||||
static const char str3[] =
|
||||
"INSERT INTO SYS_FOREIGN_COLS VALUES(";
|
||||
static const char str4[] =
|
||||
"COMMIT WORK;\n"
|
||||
"END;\n";
|
||||
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
ut_ad(mutex_own(&(dict_sys->mutex)));
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
|
||||
if (NULL == dict_table_get_low("SYS_FOREIGN")) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: table SYS_FOREIGN not found from internal data dictionary\n");
|
||||
|
||||
return(DB_ERROR);
|
||||
}
|
||||
|
||||
foreign = UT_LIST_GET_FIRST(table->foreign_list);
|
||||
loop:
|
||||
if (foreign == NULL) {
|
||||
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
if (foreign->id == NULL) {
|
||||
/* Generate a new constraint id */
|
||||
ulint namelen = strlen(table->name);
|
||||
char* id = mem_heap_alloc(foreign->heap, namelen + 20);
|
||||
/* no overflow if number < 1e13 */
|
||||
sprintf(id, "%s_ibfk_%lu", table->name, (ulong) number++);
|
||||
foreign->id = id;
|
||||
}
|
||||
|
||||
len = (sizeof str1) + (sizeof str2) + (sizeof str4) - 3
|
||||
+ 9/* ' and , chars */ + 10/* 32-bit integer */
|
||||
+ ut_strlenq(foreign->id, '\'') * (foreign->n_fields + 1)
|
||||
+ ut_strlenq(table->name, '\'')
|
||||
+ ut_strlenq(foreign->referenced_table_name, '\'');
|
||||
|
||||
for (i = 0; i < foreign->n_fields; i++) {
|
||||
len += 9/* ' and , chars */ + 10/* 32-bit integer */
|
||||
+ (sizeof str3) + (sizeof str2) - 2
|
||||
+ ut_strlenq(foreign->foreign_col_names[i], '\'')
|
||||
+ ut_strlenq(foreign->referenced_col_names[i], '\'');
|
||||
}
|
||||
|
||||
sql = sqlend = mem_alloc(len + 1);
|
||||
|
||||
/* INSERT INTO SYS_FOREIGN VALUES(...); */
|
||||
memcpy(sqlend, str1, (sizeof str1) - 1);
|
||||
sqlend += (sizeof str1) - 1;
|
||||
*sqlend++ = '\'';
|
||||
sqlend = ut_strcpyq(sqlend, '\'', foreign->id);
|
||||
*sqlend++ = '\'', *sqlend++ = ',', *sqlend++ = '\'';
|
||||
sqlend = ut_strcpyq(sqlend, '\'', table->name);
|
||||
*sqlend++ = '\'', *sqlend++ = ',', *sqlend++ = '\'';
|
||||
sqlend = ut_strcpyq(sqlend, '\'', foreign->referenced_table_name);
|
||||
*sqlend++ = '\'', *sqlend++ = ',';
|
||||
sqlend += sprintf(sqlend, "%010lu",
|
||||
foreign->n_fields + (foreign->type << 24));
|
||||
memcpy(sqlend, str2, (sizeof str2) - 1);
|
||||
sqlend += (sizeof str2) - 1;
|
||||
|
||||
for (i = 0; i < foreign->n_fields; i++) {
|
||||
/* INSERT INTO SYS_FOREIGN_COLS VALUES(...); */
|
||||
memcpy(sqlend, str3, (sizeof str3) - 1);
|
||||
sqlend += (sizeof str3) - 1;
|
||||
*sqlend++ = '\'';
|
||||
sqlend = ut_strcpyq(sqlend, '\'', foreign->id);
|
||||
*sqlend++ = '\''; *sqlend++ = ',';
|
||||
sqlend += sprintf(sqlend, "%010lu", (ulong) i);
|
||||
*sqlend++ = ','; *sqlend++ = '\'';
|
||||
sqlend = ut_strcpyq(sqlend, '\'',
|
||||
foreign->foreign_col_names[i]);
|
||||
*sqlend++ = '\''; *sqlend++ = ','; *sqlend++ = '\'';
|
||||
sqlend = ut_strcpyq(sqlend, '\'',
|
||||
foreign->referenced_col_names[i]);
|
||||
*sqlend++ = '\'';
|
||||
memcpy(sqlend, str2, (sizeof str2) - 1);
|
||||
sqlend += (sizeof str2) - 1;
|
||||
}
|
||||
|
||||
memcpy(sqlend, str4, sizeof str4);
|
||||
sqlend += sizeof str4;
|
||||
|
||||
ut_a(sqlend == sql + len + 1);
|
||||
|
||||
graph = pars_sql(sql);
|
||||
|
||||
ut_a(graph);
|
||||
|
||||
mem_free(sql);
|
||||
|
||||
graph->trx = trx;
|
||||
trx->graph = NULL;
|
||||
|
||||
graph->fork_type = QUE_FORK_MYSQL_INTERFACE;
|
||||
|
||||
ut_a(thr = que_fork_start_command(graph));
|
||||
|
||||
que_run_threads(thr);
|
||||
|
||||
error = trx->error_state;
|
||||
|
||||
que_graph_free(graph);
|
||||
error = que_eval_sql(info, sql, trx);
|
||||
|
||||
if (error == DB_DUPLICATE_KEY) {
|
||||
mutex_enter(&dict_foreign_err_mutex);
|
||||
@ -1466,7 +1288,163 @@ loop:
|
||||
return(error);
|
||||
}
|
||||
|
||||
foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
|
||||
|
||||
goto loop;
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
Add a single foreign key field definition to the data dictionary tables in
|
||||
the database. */
|
||||
static
|
||||
ulint
|
||||
dict_create_add_foreign_field_to_dictionary(
|
||||
/*========================================*/
|
||||
/* out: error code or DB_SUCCESS */
|
||||
ulint field_nr, /* in: foreign field number */
|
||||
dict_table_t* table, /* in: table */
|
||||
dict_foreign_t* foreign, /* in: foreign */
|
||||
trx_t* trx) /* in: transaction */
|
||||
{
|
||||
pars_info_t* info = pars_info_create();
|
||||
|
||||
pars_info_add_str_literal(info, "id", foreign->id);
|
||||
|
||||
pars_info_add_int4_literal(info, "pos", field_nr);
|
||||
|
||||
pars_info_add_str_literal(info, "for_col_name",
|
||||
foreign->foreign_col_names[field_nr]);
|
||||
|
||||
pars_info_add_str_literal(info, "ref_col_name",
|
||||
foreign->referenced_col_names[field_nr]);
|
||||
|
||||
return dict_foreign_eval_sql(info,
|
||||
"PROCEDURE P () IS\n"
|
||||
"BEGIN\n"
|
||||
"INSERT INTO SYS_FOREIGN_COLS VALUES"
|
||||
"(:id, :pos, :for_col_name, :ref_col_name);\n"
|
||||
"END;\n"
|
||||
, table, foreign, trx);
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
Add a single foreign key definition to the data dictionary tables in the
|
||||
database. We also generate names to constraints that were not named by the
|
||||
user. A generated constraint has a name of the format
|
||||
databasename/tablename_ibfk_<number>, where the numbers start from 1, and
|
||||
are given locally for this table, that is, the number is not global, as in
|
||||
the old format constraints < 4.0.18 it used to be. */
|
||||
static
|
||||
ulint
|
||||
dict_create_add_foreign_to_dictionary(
|
||||
/*==================================*/
|
||||
/* out: error code or DB_SUCCESS */
|
||||
ulint* id_nr, /* in/out: number to use in id generation;
|
||||
incremented if used */
|
||||
dict_table_t* table, /* in: table */
|
||||
dict_foreign_t* foreign,/* in: foreign */
|
||||
trx_t* trx) /* in: transaction */
|
||||
{
|
||||
ulint error;
|
||||
ulint i;
|
||||
|
||||
pars_info_t* info = pars_info_create();
|
||||
|
||||
if (foreign->id == NULL) {
|
||||
/* Generate a new constraint id */
|
||||
ulint namelen = strlen(table->name);
|
||||
char* id = mem_heap_alloc(foreign->heap, namelen + 20);
|
||||
/* no overflow if number < 1e13 */
|
||||
sprintf(id, "%s_ibfk_%lu", table->name, (ulong) (*id_nr)++);
|
||||
foreign->id = id;
|
||||
}
|
||||
|
||||
pars_info_add_str_literal(info, "id", foreign->id);
|
||||
|
||||
pars_info_add_str_literal(info, "for_name", table->name);
|
||||
|
||||
pars_info_add_str_literal(info, "ref_name",
|
||||
foreign->referenced_table_name);
|
||||
|
||||
pars_info_add_int4_literal(info, "n_cols",
|
||||
foreign->n_fields + (foreign->type << 24));
|
||||
|
||||
error = dict_foreign_eval_sql(info,
|
||||
"PROCEDURE P () IS\n"
|
||||
"BEGIN\n"
|
||||
"INSERT INTO SYS_FOREIGN VALUES"
|
||||
"(:id, :for_name, :ref_name, :n_cols);\n"
|
||||
"END;\n"
|
||||
, table, foreign, trx);
|
||||
|
||||
if (error != DB_SUCCESS) {
|
||||
|
||||
return(error);
|
||||
}
|
||||
|
||||
for (i = 0; i < foreign->n_fields; i++) {
|
||||
error = dict_create_add_foreign_field_to_dictionary(i,
|
||||
table, foreign, trx);
|
||||
|
||||
if (error != DB_SUCCESS) {
|
||||
|
||||
return(error);
|
||||
}
|
||||
}
|
||||
|
||||
error = dict_foreign_eval_sql(NULL,
|
||||
"PROCEDURE P () IS\n"
|
||||
"BEGIN\n"
|
||||
"COMMIT WORK;\n"
|
||||
"END;\n"
|
||||
, table, foreign, trx);
|
||||
|
||||
return(error);
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
Adds foreign key definitions to data dictionary tables in the database. */
|
||||
|
||||
ulint
|
||||
dict_create_add_foreigns_to_dictionary(
|
||||
/*===================================*/
|
||||
/* out: error code or DB_SUCCESS */
|
||||
ulint start_id,/* in: if we are actually doing ALTER TABLE
|
||||
ADD CONSTRAINT, we want to generate constraint
|
||||
numbers which are bigger than in the table so
|
||||
far; we number the constraints from
|
||||
start_id + 1 up; start_id should be set to 0 if
|
||||
we are creating a new table, or if the table
|
||||
so far has no constraints for which the name
|
||||
was generated here */
|
||||
dict_table_t* table, /* in: table */
|
||||
trx_t* trx) /* in: transaction */
|
||||
{
|
||||
dict_foreign_t* foreign;
|
||||
ulint number = start_id + 1;
|
||||
ulint error;
|
||||
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
ut_ad(mutex_own(&(dict_sys->mutex)));
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
|
||||
if (NULL == dict_table_get_low("SYS_FOREIGN")) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: table SYS_FOREIGN not found from internal data dictionary\n");
|
||||
|
||||
return(DB_ERROR);
|
||||
}
|
||||
|
||||
for (foreign = UT_LIST_GET_FIRST(table->foreign_list);
|
||||
foreign;
|
||||
foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) {
|
||||
|
||||
error = dict_create_add_foreign_to_dictionary(&number,
|
||||
table, foreign, trx);
|
||||
|
||||
if (error != DB_SUCCESS) {
|
||||
|
||||
return(error);
|
||||
}
|
||||
}
|
||||
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
|
@ -657,6 +657,19 @@ dict_table_get_nth_col_pos(
|
||||
n));
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
Check whether the table uses the compact page format. */
|
||||
|
||||
ibool
|
||||
dict_table_is_comp_noninline(
|
||||
/*=========================*/
|
||||
/* out: TRUE if table uses the
|
||||
compact page format */
|
||||
const dict_table_t* table) /* in: table */
|
||||
{
|
||||
return(dict_table_is_comp(table));
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
Checks if a column is in the ordering columns of the clustered index of a
|
||||
table. Column prefixes are treated like whole columns. */
|
||||
@ -870,13 +883,6 @@ dict_table_add_to_cache(
|
||||
ut_a(table2 == NULL);
|
||||
}
|
||||
|
||||
if (table->type == DICT_TABLE_CLUSTER_MEMBER) {
|
||||
|
||||
table->mix_id_len = mach_dulint_get_compressed_size(
|
||||
table->mix_id);
|
||||
mach_dulint_write_compressed(table->mix_id_buf, table->mix_id);
|
||||
}
|
||||
|
||||
/* Add the columns to the column hash table */
|
||||
for (i = 0; i < table->n_cols; i++) {
|
||||
dict_col_add_to_cache(table, dict_table_get_nth_col(table, i));
|
||||
@ -1251,15 +1257,13 @@ dict_table_remove_from_cache(
|
||||
/* Remove table from LRU list of tables */
|
||||
UT_LIST_REMOVE(table_LRU, dict_sys->table_LRU, table);
|
||||
|
||||
mutex_free(&(table->autoinc_mutex));
|
||||
|
||||
size = mem_heap_get_size(table->heap);
|
||||
|
||||
ut_ad(dict_sys->size >= size);
|
||||
|
||||
dict_sys->size -= size;
|
||||
|
||||
mem_heap_free(table->heap);
|
||||
dict_mem_table_free(table);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
@ -1380,6 +1384,38 @@ dict_col_reposition_in_cache(
|
||||
HASH_INSERT(dict_col_t, hash, dict_sys->col_hash, fold, col);
|
||||
}
|
||||
|
||||
/********************************************************************
|
||||
If the given column name is reserved for InnoDB system columns, return
|
||||
TRUE. */
|
||||
|
||||
ibool
|
||||
dict_col_name_is_reserved(
|
||||
/*======================*/
|
||||
/* out: TRUE if name is reserved */
|
||||
const char* name) /* in: column name */
|
||||
{
|
||||
/* This check reminds that if a new system column is added to
|
||||
the program, it should be dealt with here. */
|
||||
#if DATA_N_SYS_COLS != 4
|
||||
#error "DATA_N_SYS_COLS != 4"
|
||||
#endif
|
||||
|
||||
static const char* reserved_names[] = {
|
||||
"DB_ROW_ID", "DB_TRX_ID", "DB_ROLL_PTR", "DB_MIX_ID"
|
||||
};
|
||||
|
||||
ulint i;
|
||||
|
||||
for (i = 0; i < UT_ARR_SIZE(reserved_names); i++) {
|
||||
if (strcmp(name, reserved_names[i]) == 0) {
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
}
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
Adds an index to the dictionary cache. */
|
||||
|
||||
@ -1394,7 +1430,6 @@ dict_index_add_to_cache(
|
||||
{
|
||||
dict_index_t* new_index;
|
||||
dict_tree_t* tree;
|
||||
dict_table_t* cluster;
|
||||
dict_field_t* field;
|
||||
ulint n_ord;
|
||||
ibool success;
|
||||
@ -1468,21 +1503,11 @@ dict_index_add_to_cache(
|
||||
dict_field_get_col(field)->ord_part++;
|
||||
}
|
||||
|
||||
if (table->type == DICT_TABLE_CLUSTER_MEMBER) {
|
||||
/* The index tree is found from the cluster object */
|
||||
/* Create an index tree memory object for the index */
|
||||
tree = dict_tree_create(new_index, page_no);
|
||||
ut_ad(tree);
|
||||
|
||||
cluster = dict_table_get_low(table->cluster_name);
|
||||
|
||||
tree = dict_index_get_tree(
|
||||
UT_LIST_GET_FIRST(cluster->indexes));
|
||||
new_index->tree = tree;
|
||||
} else {
|
||||
/* Create an index tree memory object for the index */
|
||||
tree = dict_tree_create(new_index, page_no);
|
||||
ut_ad(tree);
|
||||
|
||||
new_index->tree = tree;
|
||||
}
|
||||
new_index->tree = tree;
|
||||
|
||||
if (!UNIV_UNLIKELY(new_index->type & DICT_UNIVERSAL)) {
|
||||
|
||||
@ -1500,7 +1525,7 @@ dict_index_add_to_cache(
|
||||
}
|
||||
|
||||
/* Add the index to the list of indexes stored in the tree */
|
||||
UT_LIST_ADD_LAST(tree_indexes, tree->tree_indexes, new_index);
|
||||
tree->tree_index = new_index;
|
||||
|
||||
/* If the dictionary cache grows too big, trim the table LRU list */
|
||||
|
||||
@ -1532,7 +1557,7 @@ dict_index_remove_from_cache(
|
||||
ut_ad(mutex_own(&(dict_sys->mutex)));
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
|
||||
ut_ad(UT_LIST_GET_LEN((index->tree)->tree_indexes) == 1);
|
||||
ut_ad(index->tree->tree_index);
|
||||
dict_tree_free(index->tree);
|
||||
|
||||
/* Decrement the ord_part counts in columns which are ordering */
|
||||
@ -1553,7 +1578,7 @@ dict_index_remove_from_cache(
|
||||
|
||||
dict_sys->size -= size;
|
||||
|
||||
mem_heap_free(index->heap);
|
||||
dict_mem_index_free(index);
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
@ -1699,8 +1724,6 @@ dict_table_copy_types(
|
||||
dtype_t* type;
|
||||
ulint i;
|
||||
|
||||
ut_ad(!(table->type & DICT_UNIVERSAL));
|
||||
|
||||
for (i = 0; i < dtuple_get_n_fields(tuple); i++) {
|
||||
|
||||
dfield_type = dfield_get_type(dtuple_get_nth_field(tuple, i));
|
||||
@ -1749,22 +1772,8 @@ dict_index_build_internal_clust(
|
||||
|
||||
new_index->id = index->id;
|
||||
|
||||
if (table->type != DICT_TABLE_ORDINARY) {
|
||||
/* The index is mixed: copy common key prefix fields */
|
||||
|
||||
dict_index_copy(new_index, index, 0, table->mix_len);
|
||||
|
||||
/* Add the mix id column */
|
||||
dict_index_add_col(new_index,
|
||||
dict_table_get_sys_col(table, DATA_MIX_ID), 0);
|
||||
|
||||
/* Copy the rest of fields */
|
||||
dict_index_copy(new_index, index, table->mix_len,
|
||||
index->n_fields);
|
||||
} else {
|
||||
/* Copy the fields of index */
|
||||
dict_index_copy(new_index, index, 0, index->n_fields);
|
||||
}
|
||||
/* Copy the fields of index */
|
||||
dict_index_copy(new_index, index, 0, index->n_fields);
|
||||
|
||||
if (UNIV_UNLIKELY(index->type & DICT_UNIVERSAL)) {
|
||||
/* No fixed number of fields determines an entry uniquely */
|
||||
@ -3641,7 +3650,7 @@ dict_tree_create(
|
||||
|
||||
tree->id = index->id;
|
||||
|
||||
UT_LIST_INIT(tree->tree_indexes);
|
||||
tree->tree_index = NULL;
|
||||
|
||||
tree->magic_n = DICT_TREE_MAGIC_N;
|
||||
|
||||
@ -3667,135 +3676,7 @@ dict_tree_free(
|
||||
mem_free(tree);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
In an index tree, finds the index corresponding to a record in the tree. */
|
||||
UNIV_INLINE
|
||||
dict_index_t*
|
||||
dict_tree_find_index_low(
|
||||
/*=====================*/
|
||||
/* out: index */
|
||||
dict_tree_t* tree, /* in: index tree */
|
||||
rec_t* rec) /* in: record for which to find correct
|
||||
index */
|
||||
{
|
||||
dict_index_t* index;
|
||||
dict_table_t* table;
|
||||
dulint mix_id;
|
||||
ulint len;
|
||||
|
||||
index = UT_LIST_GET_FIRST(tree->tree_indexes);
|
||||
ut_ad(index);
|
||||
table = index->table;
|
||||
|
||||
if ((index->type & DICT_CLUSTERED)
|
||||
&& UNIV_UNLIKELY(table->type != DICT_TABLE_ORDINARY)) {
|
||||
|
||||
/* Get the mix id of the record */
|
||||
ut_a(!dict_table_is_comp(table));
|
||||
|
||||
mix_id = mach_dulint_read_compressed(
|
||||
rec_get_nth_field_old(rec, table->mix_len, &len));
|
||||
|
||||
while (ut_dulint_cmp(table->mix_id, mix_id) != 0) {
|
||||
|
||||
index = UT_LIST_GET_NEXT(tree_indexes, index);
|
||||
table = index->table;
|
||||
ut_ad(index);
|
||||
}
|
||||
}
|
||||
|
||||
return(index);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
In an index tree, finds the index corresponding to a record in the tree. */
|
||||
|
||||
dict_index_t*
|
||||
dict_tree_find_index(
|
||||
/*=================*/
|
||||
/* out: index */
|
||||
dict_tree_t* tree, /* in: index tree */
|
||||
rec_t* rec) /* in: record for which to find correct
|
||||
index */
|
||||
{
|
||||
dict_index_t* index;
|
||||
|
||||
index = dict_tree_find_index_low(tree, rec);
|
||||
|
||||
return(index);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
In an index tree, finds the index corresponding to a dtuple which is used
|
||||
in a search to a tree. */
|
||||
|
||||
dict_index_t*
|
||||
dict_tree_find_index_for_tuple(
|
||||
/*===========================*/
|
||||
/* out: index; NULL if the tuple does not
|
||||
contain the mix id field in a mixed tree */
|
||||
dict_tree_t* tree, /* in: index tree */
|
||||
dtuple_t* tuple) /* in: tuple for which to find index */
|
||||
{
|
||||
dict_index_t* index;
|
||||
dict_table_t* table;
|
||||
dulint mix_id;
|
||||
|
||||
ut_ad(dtuple_check_typed(tuple));
|
||||
|
||||
if (UT_LIST_GET_LEN(tree->tree_indexes) == 1) {
|
||||
|
||||
return(UT_LIST_GET_FIRST(tree->tree_indexes));
|
||||
}
|
||||
|
||||
index = UT_LIST_GET_FIRST(tree->tree_indexes);
|
||||
ut_ad(index);
|
||||
table = index->table;
|
||||
|
||||
if (dtuple_get_n_fields(tuple) <= table->mix_len) {
|
||||
|
||||
return(NULL);
|
||||
}
|
||||
|
||||
/* Get the mix id of the record */
|
||||
|
||||
mix_id = mach_dulint_read_compressed(
|
||||
dfield_get_data(
|
||||
dtuple_get_nth_field(tuple, table->mix_len)));
|
||||
|
||||
while (ut_dulint_cmp(table->mix_id, mix_id) != 0) {
|
||||
|
||||
index = UT_LIST_GET_NEXT(tree_indexes, index);
|
||||
table = index->table;
|
||||
ut_ad(index);
|
||||
}
|
||||
|
||||
return(index);
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
Checks if a table which is a mixed cluster member owns a record. */
|
||||
|
||||
ibool
|
||||
dict_is_mixed_table_rec(
|
||||
/*====================*/
|
||||
/* out: TRUE if the record belongs to this
|
||||
table */
|
||||
dict_table_t* table, /* in: table in a mixed cluster */
|
||||
rec_t* rec) /* in: user record in the clustered index */
|
||||
{
|
||||
byte* mix_id_field;
|
||||
ulint len;
|
||||
|
||||
ut_ad(!dict_table_is_comp(table));
|
||||
|
||||
mix_id_field = rec_get_nth_field_old(rec,
|
||||
table->mix_len, &len);
|
||||
|
||||
return(len == table->mix_id_len
|
||||
&& !ut_memcmp(table->mix_id_buf, mix_id_field, len));
|
||||
}
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
/**************************************************************************
|
||||
Checks that a tuple has n_fields_cmp value in a sensible range, so that
|
||||
no comparison can occur with the page number field in a node pointer. */
|
||||
@ -3807,19 +3688,14 @@ dict_tree_check_search_tuple(
|
||||
dict_tree_t* tree, /* in: index tree */
|
||||
dtuple_t* tuple) /* in: tuple used in a search */
|
||||
{
|
||||
dict_index_t* index;
|
||||
|
||||
index = dict_tree_find_index_for_tuple(tree, tuple);
|
||||
|
||||
if (index == NULL) {
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
dict_index_t* index = tree->tree_index;
|
||||
|
||||
ut_a(index);
|
||||
ut_a(dtuple_get_n_fields_cmp(tuple)
|
||||
<= dict_index_get_n_unique_in_tree(index));
|
||||
return(TRUE);
|
||||
}
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
/**************************************************************************
|
||||
Builds a node pointer out of a physical record and a page number. */
|
||||
@ -3842,7 +3718,7 @@ dict_tree_build_node_ptr(
|
||||
byte* buf;
|
||||
ulint n_unique;
|
||||
|
||||
ind = dict_tree_find_index_low(tree, rec);
|
||||
ind = tree->tree_index;
|
||||
|
||||
if (UNIV_UNLIKELY(tree->type & DICT_UNIVERSAL)) {
|
||||
/* In a universal index tree, we take the whole record as
|
||||
@ -3910,7 +3786,7 @@ dict_tree_copy_rec_order_prefix(
|
||||
ulint n;
|
||||
|
||||
UNIV_PREFETCH_R(rec);
|
||||
index = dict_tree_find_index_low(tree, rec);
|
||||
index = tree->tree_index;
|
||||
|
||||
if (UNIV_UNLIKELY(tree->type & DICT_UNIVERSAL)) {
|
||||
ut_a(!dict_table_is_comp(index->table));
|
||||
@ -3938,7 +3814,7 @@ dict_tree_build_data_tuple(
|
||||
dtuple_t* tuple;
|
||||
dict_index_t* ind;
|
||||
|
||||
ind = dict_tree_find_index_low(tree, rec);
|
||||
ind = tree->tree_index;
|
||||
|
||||
ut_ad(dict_table_is_comp(ind->table)
|
||||
|| n_fields <= rec_get_n_fields_old(rec));
|
||||
@ -4096,6 +3972,18 @@ dict_update_statistics(
|
||||
dict_update_statistics_low(table, FALSE);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
A noninlined version of dict_table_get_low. */
|
||||
|
||||
dict_table_t*
|
||||
dict_table_get_low_noninlined(
|
||||
/*==========================*/
|
||||
/* out: table, NULL if not found */
|
||||
const char* table_name) /* in: table name */
|
||||
{
|
||||
return(dict_table_get_low(table_name));
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
Prints info of a foreign key constraint. */
|
||||
static
|
||||
@ -4520,15 +4408,3 @@ dict_index_name_print(
|
||||
fputs(" of table ", file);
|
||||
ut_print_name(file, trx, index->table_name);
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
Export an inlined function for use in ha_innodb.c. */
|
||||
ibool
|
||||
innodb_dict_table_is_comp(
|
||||
/*===============*/
|
||||
/* out: TRUE if table uses the
|
||||
compact page format */
|
||||
const dict_table_t* table) /* in: table */
|
||||
{
|
||||
return dict_table_is_comp(table);
|
||||
}
|
||||
|
@ -768,7 +768,7 @@ dict_load_table(
|
||||
if (!btr_pcur_is_on_user_rec(&pcur, &mtr)
|
||||
|| rec_get_deleted_flag(rec, 0)) {
|
||||
/* Not found */
|
||||
|
||||
err_exit:
|
||||
btr_pcur_close(&pcur);
|
||||
mtr_commit(&mtr);
|
||||
mem_heap_free(heap);
|
||||
@ -780,11 +780,8 @@ dict_load_table(
|
||||
|
||||
/* Check if the table name in record is the searched one */
|
||||
if (len != ut_strlen(name) || ut_memcmp(name, field, len) != 0) {
|
||||
btr_pcur_close(&pcur);
|
||||
mtr_commit(&mtr);
|
||||
mem_heap_free(heap);
|
||||
|
||||
return(NULL);
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
ut_a(0 == ut_strcmp("SPACE",
|
||||
@ -848,36 +845,17 @@ dict_load_table(
|
||||
table->id = mach_read_from_8(field);
|
||||
|
||||
field = rec_get_nth_field_old(rec, 5, &len);
|
||||
table->type = mach_read_from_4(field);
|
||||
|
||||
if (table->type == DICT_TABLE_CLUSTER_MEMBER) {
|
||||
ut_error;
|
||||
#if 0 /* clustered tables have not been implemented yet */
|
||||
field = rec_get_nth_field_old(rec, 6, &len);
|
||||
table->mix_id = mach_read_from_8(field);
|
||||
|
||||
field = rec_get_nth_field_old(rec, 8, &len);
|
||||
table->cluster_name = mem_heap_strdupl(heap, (char*) field, len);
|
||||
#endif
|
||||
}
|
||||
|
||||
if ((table->type == DICT_TABLE_CLUSTER)
|
||||
|| (table->type == DICT_TABLE_CLUSTER_MEMBER)) {
|
||||
|
||||
field = rec_get_nth_field_old(rec, 7, &len);
|
||||
ut_a(len == 4);
|
||||
table->mix_len = mach_read_from_4(field);
|
||||
if (UNIV_UNLIKELY(mach_read_from_4(field) != DICT_TABLE_ORDINARY)) {
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: table %s: unknown table type %lu\n",
|
||||
name, (ulong) mach_read_from_4(field));
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
btr_pcur_close(&pcur);
|
||||
mtr_commit(&mtr);
|
||||
|
||||
if (table->type == DICT_TABLE_CLUSTER_MEMBER) {
|
||||
/* Load the cluster table definition if not yet in
|
||||
memory cache */
|
||||
dict_table_get_low(table->cluster_name);
|
||||
}
|
||||
|
||||
dict_load_columns(table, heap);
|
||||
|
||||
dict_table_add_to_cache(table);
|
||||
|
@ -50,7 +50,6 @@ dict_mem_table_create(
|
||||
|
||||
table->heap = heap;
|
||||
|
||||
table->type = DICT_TABLE_ORDINARY;
|
||||
table->flags = flags;
|
||||
table->name = mem_heap_strdup(heap, name);
|
||||
table->dir_path_of_temp_table = NULL;
|
||||
@ -66,9 +65,6 @@ dict_mem_table_create(
|
||||
|
||||
table->cached = FALSE;
|
||||
|
||||
table->mix_id = ut_dulint_zero;
|
||||
table->mix_len = 0;
|
||||
|
||||
table->cols = mem_heap_alloc(heap, (n_cols + DATA_N_SYS_COLS)
|
||||
* sizeof(dict_col_t));
|
||||
UT_LIST_INIT(table->indexes);
|
||||
@ -97,42 +93,19 @@ dict_mem_table_create(
|
||||
return(table);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
Creates a cluster memory object. */
|
||||
|
||||
dict_table_t*
|
||||
dict_mem_cluster_create(
|
||||
/*====================*/
|
||||
/* out, own: cluster object */
|
||||
const char* name, /* in: cluster name */
|
||||
ulint space, /* in: space where the clustered indexes
|
||||
of the member tables are placed */
|
||||
ulint n_cols, /* in: number of columns */
|
||||
ulint mix_len)/* in: length of the common key prefix in the
|
||||
cluster */
|
||||
{
|
||||
dict_table_t* cluster;
|
||||
|
||||
/* Clustered tables cannot work with the compact record format. */
|
||||
cluster = dict_mem_table_create(name, space, n_cols, 0);
|
||||
|
||||
cluster->type = DICT_TABLE_CLUSTER;
|
||||
cluster->mix_len = mix_len;
|
||||
|
||||
return(cluster);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
Declares a non-published table as a member in a cluster. */
|
||||
/********************************************************************
|
||||
Free a table memory object. */
|
||||
|
||||
void
|
||||
dict_mem_table_make_cluster_member(
|
||||
/*===============================*/
|
||||
dict_table_t* table, /* in: non-published table */
|
||||
const char* cluster_name) /* in: cluster name */
|
||||
dict_mem_table_free(
|
||||
/*================*/
|
||||
dict_table_t* table) /* in: table */
|
||||
{
|
||||
table->type = DICT_TABLE_CLUSTER_MEMBER;
|
||||
table->cluster_name = cluster_name;
|
||||
ut_ad(table);
|
||||
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
|
||||
|
||||
mutex_free(&(table->autoinc_mutex));
|
||||
mem_heap_free(table->heap);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
@ -286,5 +259,8 @@ dict_mem_index_free(
|
||||
/*================*/
|
||||
dict_index_t* index) /* in: index */
|
||||
{
|
||||
ut_ad(index);
|
||||
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
|
||||
|
||||
mem_heap_free(index->heap);
|
||||
}
|
||||
|
@ -212,6 +212,38 @@ for_step(
|
||||
return(thr);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
Performs an execution step of an exit statement node. */
|
||||
|
||||
que_thr_t*
|
||||
exit_step(
|
||||
/*======*/
|
||||
/* out: query thread to run next or NULL */
|
||||
que_thr_t* thr) /* in: query thread */
|
||||
{
|
||||
exit_node_t* node;
|
||||
que_node_t* loop_node;
|
||||
|
||||
ut_ad(thr);
|
||||
|
||||
node = thr->run_node;
|
||||
|
||||
ut_ad(que_node_get_type(node) == QUE_NODE_EXIT);
|
||||
|
||||
/* Loops exit by setting thr->run_node as the loop node's parent, so
|
||||
find our containing loop node and get its parent. */
|
||||
|
||||
loop_node = que_node_get_containing_loop_node(node);
|
||||
|
||||
/* If someone uses an EXIT statement outside of a loop, this will
|
||||
trigger. */
|
||||
ut_a(loop_node);
|
||||
|
||||
thr->run_node = que_node_get_parent(loop_node);
|
||||
|
||||
return(thr);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
Performs an execution step of a return-statement node. */
|
||||
|
||||
|
@ -1170,9 +1170,9 @@ ibuf_dummy_index_free(
|
||||
dict_index_t* index) /* in: dummy index */
|
||||
{
|
||||
dict_table_t* table = index->table;
|
||||
mem_heap_free(index->heap);
|
||||
mutex_free(&(table->autoinc_mutex));
|
||||
mem_heap_free(table->heap);
|
||||
|
||||
dict_mem_index_free(index);
|
||||
dict_mem_table_free(table);
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
|
@ -459,7 +459,10 @@ btr_store_big_rec_extern_fields(
|
||||
dict_index_t* index, /* in: index of rec; the index tree
|
||||
MUST be X-latched */
|
||||
rec_t* rec, /* in: record */
|
||||
const ulint* offsets, /* in: rec_get_offsets(rec, index) */
|
||||
const ulint* offsets, /* in: rec_get_offsets(rec, index);
|
||||
the "external storage" flags in offsets
|
||||
will not correspond to rec when
|
||||
this function returns */
|
||||
big_rec_t* big_rec_vec, /* in: vector containing fields
|
||||
to be stored externally */
|
||||
mtr_t* local_mtr); /* in: mtr containing the latch to
|
||||
|
@ -98,8 +98,7 @@ btr_cur_compress_recommendation(
|
||||
{
|
||||
page_t* page;
|
||||
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(
|
||||
btr_cur_get_page(cursor)),
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(btr_cur_get_rec(cursor)),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
|
||||
page = btr_cur_get_page(cursor);
|
||||
@ -142,8 +141,7 @@ btr_cur_can_delete_without_compress(
|
||||
{
|
||||
page_t* page;
|
||||
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(
|
||||
btr_cur_get_page(cursor)),
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(btr_cur_get_rec(cursor)),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
|
||||
page = btr_cur_get_page(cursor);
|
||||
|
@ -180,12 +180,14 @@ struct btr_search_struct{
|
||||
the same prefix should be indexed in the
|
||||
hash index */
|
||||
/*----------------------*/
|
||||
#ifdef UNIV_SEARCH_PERF_STAT
|
||||
ulint n_hash_succ; /* number of successful hash searches thus
|
||||
far */
|
||||
ulint n_hash_fail; /* number of failed hash searches */
|
||||
ulint n_patt_succ; /* number of successful pattern searches thus
|
||||
far */
|
||||
ulint n_searches; /* number of searches */
|
||||
#endif /* UNIV_SEARCH_PERF_STAT */
|
||||
};
|
||||
|
||||
#define BTR_SEARCH_MAGIC_N 1112765
|
||||
@ -218,8 +220,8 @@ extern rw_lock_t* btr_search_latch_temp;
|
||||
|
||||
#ifdef UNIV_SEARCH_PERF_STAT
|
||||
extern ulint btr_search_n_succ;
|
||||
#endif /* UNIV_SEARCH_PERF_STAT */
|
||||
extern ulint btr_search_n_hash_fail;
|
||||
#endif /* UNIV_SEARCH_PERF_STAT */
|
||||
|
||||
/* After change in n_fields or n_bytes in info, this many rounds are waited
|
||||
before starting the hash analysis again: this is to save CPU time when there
|
||||
|
@ -26,14 +26,14 @@ void
|
||||
buf_LRU_try_free_flushed_blocks(void);
|
||||
/*==================================*/
|
||||
/**********************************************************************
|
||||
Returns TRUE if less than 15 % of the buffer pool is available. This can be
|
||||
Returns TRUE if less than 25 % of the buffer pool is available. This can be
|
||||
used in heuristics to prevent huge transactions eating up the whole buffer
|
||||
pool for their locks. */
|
||||
|
||||
ibool
|
||||
buf_LRU_buf_pool_running_out(void);
|
||||
/*==============================*/
|
||||
/* out: TRUE if less than 15 % of buffer pool
|
||||
/* out: TRUE if less than 25 % of buffer pool
|
||||
left */
|
||||
|
||||
/*#######################################################################
|
||||
|
@ -100,6 +100,15 @@ ulint
|
||||
dict_col_get_clust_pos(
|
||||
/*===================*/
|
||||
dict_col_t* col);
|
||||
/********************************************************************
|
||||
If the given column name is reserved for InnoDB system columns, return
|
||||
TRUE. */
|
||||
|
||||
ibool
|
||||
dict_col_name_is_reserved(
|
||||
/*======================*/
|
||||
/* out: TRUE if name is reserved */
|
||||
const char* name); /* in: column name */
|
||||
/************************************************************************
|
||||
Initializes the autoinc counter. It is not an error to initialize an already
|
||||
initialized counter. */
|
||||
@ -321,6 +330,14 @@ dict_table_get_low(
|
||||
/* out: table, NULL if not found */
|
||||
const char* table_name); /* in: table name */
|
||||
/**************************************************************************
|
||||
A noninlined version of dict_table_get_low. */
|
||||
|
||||
dict_table_t*
|
||||
dict_table_get_low_noninlined(
|
||||
/*==========================*/
|
||||
/* out: table, NULL if not found */
|
||||
const char* table_name); /* in: table name */
|
||||
/**************************************************************************
|
||||
Returns an index object. */
|
||||
UNIV_INLINE
|
||||
dict_index_t*
|
||||
@ -496,10 +513,11 @@ dict_table_is_comp(
|
||||
compact page format */
|
||||
const dict_table_t* table); /* in: table */
|
||||
/************************************************************************
|
||||
Non inlined version of 'dict_table_is_comp' above. */
|
||||
Check whether the table uses the compact page format. */
|
||||
|
||||
ibool
|
||||
innodb_dict_table_is_comp(
|
||||
/*===============*/
|
||||
dict_table_is_comp_noninline(
|
||||
/*=========================*/
|
||||
/* out: TRUE if table uses the
|
||||
compact page format */
|
||||
const dict_table_t* table); /* in: table */
|
||||
@ -725,33 +743,6 @@ dict_tree_free(
|
||||
/**************************************************************************
|
||||
In an index tree, finds the index corresponding to a record in the tree. */
|
||||
|
||||
dict_index_t*
|
||||
dict_tree_find_index(
|
||||
/*=================*/
|
||||
/* out: index */
|
||||
dict_tree_t* tree, /* in: index tree */
|
||||
rec_t* rec); /* in: record for which to find correct index */
|
||||
/**************************************************************************
|
||||
In an index tree, finds the index corresponding to a dtuple which is used
|
||||
in a search to a tree. */
|
||||
|
||||
dict_index_t*
|
||||
dict_tree_find_index_for_tuple(
|
||||
/*===========================*/
|
||||
/* out: index; NULL if the tuple does not
|
||||
contain the mix id field in a mixed tree */
|
||||
dict_tree_t* tree, /* in: index tree */
|
||||
dtuple_t* tuple); /* in: tuple for which to find index */
|
||||
/***********************************************************************
|
||||
Checks if a table which is a mixed cluster member owns a record. */
|
||||
|
||||
ibool
|
||||
dict_is_mixed_table_rec(
|
||||
/*====================*/
|
||||
/* out: TRUE if the record belongs to this
|
||||
table */
|
||||
dict_table_t* table, /* in: table in a mixed cluster */
|
||||
rec_t* rec); /* in: user record in the clustered index */
|
||||
/**************************************************************************
|
||||
Returns an index object if it is found in the dictionary cache. */
|
||||
|
||||
@ -760,6 +751,7 @@ dict_index_get_if_in_cache(
|
||||
/*=======================*/
|
||||
/* out: index, NULL if not found */
|
||||
dulint index_id); /* in: index id */
|
||||
#ifdef UNIV_DEBUG
|
||||
/**************************************************************************
|
||||
Checks that a tuple has n_fields_cmp value in a sensible range, so that
|
||||
no comparison can occur with the page number field in a node pointer. */
|
||||
@ -770,6 +762,7 @@ dict_tree_check_search_tuple(
|
||||
/* out: TRUE if ok */
|
||||
dict_tree_t* tree, /* in: index tree */
|
||||
dtuple_t* tuple); /* in: tuple used in a search */
|
||||
#endif /* UNIV_DEBUG */
|
||||
/**************************************************************************
|
||||
Builds a node pointer out of a physical record and a page number. */
|
||||
|
||||
@ -916,7 +909,6 @@ dict_tables_have_same_db(
|
||||
dbname '/' tablename */
|
||||
const char* name2); /* in: table name in the form
|
||||
dbname '/' tablename */
|
||||
|
||||
/*************************************************************************
|
||||
Scans from pointer onwards. Stops if is at the start of a copy of
|
||||
'string' where characters are compared without case sensitivity. Stops
|
||||
@ -928,7 +920,6 @@ dict_scan_to(
|
||||
/* out: scanned up to this */
|
||||
const char* ptr, /* in: scan from */
|
||||
const char* string);/* in: look for this */
|
||||
|
||||
/* Buffers for storing detailed information about the latest foreign key
|
||||
and unique key errors */
|
||||
extern FILE* dict_foreign_err_file;
|
||||
|
@ -92,7 +92,6 @@ dict_table_get_n_user_cols(
|
||||
{
|
||||
ut_ad(table);
|
||||
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
|
||||
ut_ad(table->cached);
|
||||
|
||||
return(table->n_cols - DATA_N_SYS_COLS);
|
||||
}
|
||||
@ -126,7 +125,6 @@ dict_table_get_n_cols(
|
||||
{
|
||||
ut_ad(table);
|
||||
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
|
||||
ut_ad(table->cached);
|
||||
|
||||
return(table->n_cols);
|
||||
}
|
||||
|
@ -35,9 +35,11 @@ combination of types */
|
||||
|
||||
/* Types for a table object */
|
||||
#define DICT_TABLE_ORDINARY 1
|
||||
#if 0 /* not implemented */
|
||||
#define DICT_TABLE_CLUSTER_MEMBER 2
|
||||
#define DICT_TABLE_CLUSTER 3 /* this means that the table is
|
||||
really a cluster definition */
|
||||
#endif
|
||||
|
||||
/* Table flags */
|
||||
#define DICT_TF_COMPACT 1 /* compact page format */
|
||||
@ -56,29 +58,13 @@ dict_mem_table_create(
|
||||
a member of a cluster */
|
||||
ulint n_cols, /* in: number of columns */
|
||||
ulint flags); /* in: table flags */
|
||||
/**************************************************************************
|
||||
Creates a cluster memory object. */
|
||||
|
||||
dict_cluster_t*
|
||||
dict_mem_cluster_create(
|
||||
/*====================*/
|
||||
/* out, own: cluster object (where the
|
||||
type dict_cluster_t == dict_table_t) */
|
||||
const char* name, /* in: cluster name */
|
||||
ulint space, /* in: space where the clustered
|
||||
indexes of the member tables are
|
||||
placed */
|
||||
ulint n_cols, /* in: number of columns */
|
||||
ulint mix_len); /* in: length of the common key prefix
|
||||
in the cluster */
|
||||
/**************************************************************************
|
||||
Declares a non-published table as a member in a cluster. */
|
||||
/********************************************************************
|
||||
Free a table memory object. */
|
||||
|
||||
void
|
||||
dict_mem_table_make_cluster_member(
|
||||
/*===============================*/
|
||||
dict_table_t* table, /* in: non-published table */
|
||||
const char* cluster_name); /* in: cluster name */
|
||||
dict_mem_table_free(
|
||||
/*================*/
|
||||
dict_table_t* table); /* in: table */
|
||||
/**************************************************************************
|
||||
Adds a column definition to a table. */
|
||||
|
||||
@ -176,9 +162,7 @@ struct dict_field_struct{
|
||||
/* Data structure for an index tree */
|
||||
struct dict_tree_struct{
|
||||
ulint type; /* tree type */
|
||||
dulint id; /* id of the index stored in the tree, in the
|
||||
case of a mixed index, the id of the clustered
|
||||
index of the cluster table */
|
||||
dulint id; /* id of the index stored in the tree */
|
||||
ulint space; /* space of index tree */
|
||||
ulint page; /* index tree root page number */
|
||||
byte pad[64];/* Padding to prevent other memory hotspots on
|
||||
@ -189,13 +173,8 @@ struct dict_tree_struct{
|
||||
struct has been memoryfixed (by mini-
|
||||
transactions wanting to access the index
|
||||
tree) */
|
||||
UT_LIST_BASE_NODE_T(dict_index_t)
|
||||
tree_indexes; /* list of indexes stored in the
|
||||
index tree: if the tree is not of the
|
||||
mixed type there is only one index in
|
||||
the list; if the tree is of the mixed
|
||||
type, the first index in the list is the
|
||||
index of the cluster which owns the tree */
|
||||
dict_index_t* tree_index; /* the index stored in the
|
||||
index tree */
|
||||
ulint magic_n;/* magic number */
|
||||
};
|
||||
|
||||
@ -301,8 +280,7 @@ a foreign key constraint is enforced, therefore RESTRICT just means no flag */
|
||||
|
||||
/* Data structure for a database table */
|
||||
struct dict_table_struct{
|
||||
dulint id; /* id of the table or cluster */
|
||||
ulint type; /* DICT_TABLE_ORDINARY, ... */
|
||||
dulint id; /* id of the table */
|
||||
ulint flags; /* DICT_TF_COMPACT, ... */
|
||||
mem_heap_t* heap; /* memory heap */
|
||||
const char* name; /* table name */
|
||||
@ -371,17 +349,6 @@ struct dict_table_struct{
|
||||
UT_LIST_BASE_NODE_T(lock_t)
|
||||
locks; /* list of locks on the table */
|
||||
/*----------------------*/
|
||||
dulint mix_id; /* if the table is a member in a cluster,
|
||||
this is its mix id */
|
||||
ulint mix_len;/* if the table is a cluster or a member
|
||||
this is the common key prefix lenght */
|
||||
ulint mix_id_len;/* mix id length in a compressed form */
|
||||
byte mix_id_buf[12];
|
||||
/* mix id of a mixed table written in
|
||||
a compressed form */
|
||||
const char* cluster_name; /* if the table is a member in a
|
||||
cluster, this is the name of the cluster */
|
||||
/*----------------------*/
|
||||
ibool does_not_fit_in_memory;
|
||||
/* this field is used to specify in simulations
|
||||
tables which are so big that disk should be
|
||||
|
@ -63,6 +63,14 @@ proc_eval_step(
|
||||
/* out: query thread to run next or NULL */
|
||||
que_thr_t* thr); /* in: query thread */
|
||||
/**************************************************************************
|
||||
Performs an execution step of an exit statement node. */
|
||||
|
||||
que_thr_t*
|
||||
exit_step(
|
||||
/*======*/
|
||||
/* out: query thread to run next or NULL */
|
||||
que_thr_t* thr); /* in: query thread */
|
||||
/**************************************************************************
|
||||
Performs an execution step of a return-statement node. */
|
||||
|
||||
que_thr_t*
|
||||
|
@ -222,6 +222,32 @@ do {\
|
||||
mem_heap_free_top(hash_get_heap(TABLE, fold111), sizeof(TYPE));\
|
||||
} while (0)
|
||||
|
||||
/********************************************************************
|
||||
Move all hash table entries from OLD_TABLE to NEW_TABLE.*/
|
||||
|
||||
#define HASH_MIGRATE(OLD_TABLE, NEW_TABLE, NODE_TYPE, PTR_NAME, FOLD_FUNC) \
|
||||
do {\
|
||||
ulint i2222;\
|
||||
ulint cell_count2222;\
|
||||
\
|
||||
cell_count2222 = hash_get_n_cells(OLD_TABLE);\
|
||||
\
|
||||
for (i2222 = 0; i2222 < cell_count2222; i2222++) {\
|
||||
NODE_TYPE* node2222 = HASH_GET_FIRST((OLD_TABLE), i2222);\
|
||||
\
|
||||
while (node2222) {\
|
||||
NODE_TYPE* next2222 = node2222->PTR_NAME;\
|
||||
ulint fold2222 = FOLD_FUNC(node2222);\
|
||||
\
|
||||
HASH_INSERT(NODE_TYPE, PTR_NAME, (NEW_TABLE),\
|
||||
fold2222, node2222);\
|
||||
\
|
||||
node2222 = next2222;\
|
||||
}\
|
||||
}\
|
||||
} while (0)
|
||||
|
||||
|
||||
/****************************************************************
|
||||
Gets the mutex index for a fold value in a hash table. */
|
||||
UNIV_INLINE
|
||||
|
@ -595,6 +595,15 @@ ibool
|
||||
lock_validate(void);
|
||||
/*===============*/
|
||||
/* out: TRUE if ok */
|
||||
/*************************************************************************
|
||||
Return approximate number or record locks (bits set in the bitmap) for
|
||||
this transaction. Since delete-marked records ma ybe removed, the
|
||||
record count will not be precise. */
|
||||
|
||||
ulint
|
||||
lock_number_of_rows_locked(
|
||||
/*=======================*/
|
||||
trx_t* trx); /* in: transaction */
|
||||
|
||||
/* The lock system */
|
||||
extern lock_sys_t* lock_sys;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user