Merge 10.2 into 10.3

This commit is contained in:
Marko Mäkelä 2018-08-28 12:22:56 +03:00
commit 7830fb7f45
83 changed files with 1939 additions and 600 deletions

2
debian/control vendored
View File

@ -688,7 +688,7 @@ Breaks: mariadb-backup-10.1,
mariadb-backup-10.2
Replaces: mariadb-backup-10.1,
mariadb-backup-10.2
Depends: mariadb-client-core-10.3,
Depends: mariadb-client-core-10.3 (= ${binary:Version}),
${misc:Depends},
${shlibs:Depends}
Description: Backup tool for MariaDB server

View File

@ -359,9 +359,8 @@ struct ddl_tracker_t {
/* For DDL operation found in redo log, */
space_id_to_name_t id_to_name;
};
const space_id_t REMOVED_SPACE_ID = ULINT_MAX;
static ddl_tracker_t ddl_tracker;
static ddl_tracker_t ddl_tracker;
/* Whether xtrabackup_binlog_info should be created on recovery */
static bool recover_binlog_info;
@ -617,9 +616,8 @@ void backup_file_op(ulint space_id, const byte* flags,
/** Callback whenever MLOG_INDEX_LOAD happens.
@param[in] space_id space id to check
@return false */
void backup_optimized_ddl_op(ulint space_id)
@param[in] space_id space id to check */
static void backup_optimized_ddl_op(ulint space_id)
{
// TODO : handle incremental
if (xtrabackup_incremental)
@ -630,6 +628,15 @@ void backup_optimized_ddl_op(ulint space_id)
pthread_mutex_unlock(&backup_mutex);
}
/** Callback whenever MLOG_TRUNCATE happens. */
static void backup_truncate_fail()
{
msg("mariabackup: Incompatible TRUNCATE operation detected.%s\n",
opt_lock_ddl_per_table
? ""
: " Use --lock-ddl-per-table to lock all tables before backup.");
}
/* ======== Date copying thread context ======== */
typedef struct {
@ -1209,7 +1216,7 @@ struct my_option xb_server_options[] =
"Use native AIO if supported on this platform.",
(G_PTR*) &srv_use_native_aio,
(G_PTR*) &srv_use_native_aio, 0, GET_BOOL, NO_ARG,
FALSE, 0, 0, 0, 0, 0},
TRUE, 0, 0, 0, 0, 0},
{"innodb_page_size", OPT_INNODB_PAGE_SIZE,
"The universal page size of the database.",
(G_PTR*) &innobase_page_size, (G_PTR*) &innobase_page_size, 0,
@ -4182,12 +4189,13 @@ fail_before_log_copying_thread_start:
/* copy log file by current position */
log_copy_scanned_lsn = checkpoint_lsn_start;
recv_sys->recovered_lsn = log_copy_scanned_lsn;
log_optimized_ddl_op = backup_optimized_ddl_op;
log_truncate = backup_truncate_fail;
if (xtrabackup_copy_logfile())
goto fail_before_log_copying_thread_start;
log_copying_stop = os_event_create(0);
log_optimized_ddl_op = backup_optimized_ddl_op;
os_thread_create(log_copying_thread, NULL, &log_copying_thread_id);
/* FLUSH CHANGED_PAGE_BITMAPS call */

View File

@ -16027,6 +16027,374 @@ a
aa
DROP FUNCTION f1;
#
# MDEV-17011: condition pushdown into materialized derived used
# in INSERT SELECT, multi-table UPDATE and DELETE
#
CREATE TABLE t1 (a int ,b int) ENGINE=MyISAM;
INSERT INTO t1 VALUES
(1, 1), (1, 2), (2, 1), (2, 2), (3,1), (3,3), (4,2);
CREATE TABLE t2 (a int) ENGINE MYISAM;
INSERT INTO t2 VALUES
(3), (7), (1), (4), (1);
CREATE TABLE t3 (a int, b int) ENGINE MYISAM;
EXPLAIN FORMAT=JSON INSERT INTO t3
SELECT * FROM (SELECT a, count(*) as c FROM t1 GROUP BY a) t WHERE a<=2;
EXPLAIN
{
"query_block": {
"select_id": 1,
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
"rows": 7,
"filtered": 100,
"attached_condition": "t.a <= 2",
"materialized": {
"query_block": {
"select_id": 2,
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
"table": {
"table_name": "t1",
"access_type": "ALL",
"rows": 7,
"filtered": 100,
"attached_condition": "t1.a <= 2"
}
}
}
}
}
}
}
}
INSERT INTO t3
SELECT * FROM (SELECT a, count(*) as c FROM t1 GROUP BY a) t WHERE a<=2;
SELECT * FROM t3;
a b
1 2
2 2
EXPLAIN FORMAT=JSON UPDATE t2, (SELECT a, count(*) as c FROM t1 GROUP BY a) t SET t2.a=t.c+10
WHERE t2.a= t.c and t.a>=3;
EXPLAIN
{
"query_block": {
"select_id": 1,
"table": {
"table_name": "t2",
"access_type": "ALL",
"rows": 5,
"filtered": 100,
"attached_condition": "t2.a is not null"
},
"table": {
"table_name": "<derived2>",
"access_type": "ref",
"possible_keys": ["key0"],
"key": "key0",
"key_length": "8",
"used_key_parts": ["c"],
"ref": ["test.t2.a"],
"rows": 2,
"filtered": 100,
"attached_condition": "t2.a = t.c and t.a >= 3",
"materialized": {
"query_block": {
"select_id": 2,
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
"table": {
"table_name": "t1",
"access_type": "ALL",
"rows": 7,
"filtered": 100,
"attached_condition": "t1.a >= 3"
}
}
}
}
}
}
}
}
UPDATE t2, (SELECT a, count(*) as c FROM t1 GROUP BY a) t SET t2.a=t.c+10
WHERE t2.a= t.c and t.a>=3;
SELECT * FROM t2;
a
3
7
11
4
11
EXPLAIN FORMAT=JSON DELETE t2 FROM t2, (SELECT a, count(*) as c FROM t1 GROUP BY a) t
WHERE t2.a= t.c+9 and t.a=2;
EXPLAIN
{
"query_block": {
"select_id": 1,
"table": {
"table_name": "t2",
"access_type": "ALL",
"rows": 5,
"filtered": 100
},
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
"rows": 7,
"filtered": 100,
"attached_condition": "t.a = 2 and t2.a = t.c + 9",
"materialized": {
"query_block": {
"select_id": 2,
"table": {
"table_name": "t1",
"access_type": "ALL",
"rows": 7,
"filtered": 100,
"attached_condition": "t1.a = 2"
}
}
}
}
}
}
DELETE t2 FROM t2, (SELECT a, count(*) as c FROM t1 GROUP BY a) t
WHERE t2.a= t.c+9 and t.a=2;
SELECT * FROM t2;
a
3
7
4
DROP TABLE t1,t2,t3;
#
# MDEV-16765: pushdown condition with the CASE structure
# defined with Item_cond item
#
CREATE TABLE t1(a INT, b INT);
INSERT INTO t1 VALUES (1,2), (3,4), (2,3);
SELECT *
FROM
(
SELECT CASE WHEN ((tab2.max_a=1) OR (tab2.max_a=2))
THEN 1 ELSE 0 END AS max_a,b
FROM (SELECT MAX(a) as max_a,b FROM t1 GROUP BY t1.b) AS tab2
) AS tab1
WHERE (tab1.max_a=1);
max_a b
1 2
1 3
EXPLAIN FORMAT=JSON SELECT *
FROM
(
SELECT CASE WHEN ((tab2.max_a=1) OR (tab2.max_a=2))
THEN 1 ELSE 0 END AS max_a,b
FROM (SELECT MAX(a) as max_a,b FROM t1 GROUP BY t1.b) AS tab2
) AS tab1
WHERE (tab1.max_a=1);
EXPLAIN
{
"query_block": {
"select_id": 1,
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
"rows": 3,
"filtered": 100,
"attached_condition": "case when (tab2.max_a = 1 or tab2.max_a = 2) then 1 else 0 end = 1",
"materialized": {
"query_block": {
"select_id": 3,
"having_condition": "case when (max_a = 1 or max_a = 2) then 1 else 0 end = 1",
"filesort": {
"sort_key": "t1.b",
"temporary_table": {
"table": {
"table_name": "t1",
"access_type": "ALL",
"rows": 3,
"filtered": 100
}
}
}
}
}
}
}
}
SELECT *
FROM
(
SELECT CASE WHEN ((tab2.max_a=1) OR ((tab2.max_a>2) AND (tab2.max_a<4)))
THEN 1 ELSE 0 END AS max_a,b
FROM (SELECT MAX(a) as max_a,b FROM t1 GROUP BY t1.b) AS tab2
) AS tab1
WHERE (tab1.max_a=1);
max_a b
1 2
1 4
EXPLAIN FORMAT=JSON SELECT *
FROM
(
SELECT CASE WHEN ((tab2.max_a=1) OR ((tab2.max_a>2) AND (tab2.max_a<4)))
THEN 1 ELSE 0 END AS max_a,b
FROM (SELECT MAX(a) as max_a,b FROM t1 GROUP BY t1.b) AS tab2
) AS tab1
WHERE (tab1.max_a=1);
EXPLAIN
{
"query_block": {
"select_id": 1,
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
"rows": 3,
"filtered": 100,
"attached_condition": "case when (tab2.max_a = 1 or tab2.max_a > 2 and tab2.max_a < 4) then 1 else 0 end = 1",
"materialized": {
"query_block": {
"select_id": 3,
"having_condition": "case when (max_a = 1 or max_a > 2 and max_a < 4) then 1 else 0 end = 1",
"filesort": {
"sort_key": "t1.b",
"temporary_table": {
"table": {
"table_name": "t1",
"access_type": "ALL",
"rows": 3,
"filtered": 100
}
}
}
}
}
}
}
}
SELECT *
FROM
(
SELECT CASE WHEN ((tab2.max_a>1) AND ((tab2.max_a=2) OR (tab2.max_a>2)))
THEN 1 ELSE 0 END AS max_a,b
FROM (SELECT MAX(a) as max_a,b FROM t1 GROUP BY t1.b) AS tab2
) AS tab1
WHERE (tab1.max_a=1);
max_a b
1 3
1 4
EXPLAIN FORMAT=JSON SELECT *
FROM
(
SELECT CASE WHEN ((tab2.max_a>1) AND ((tab2.max_a=2) OR (tab2.max_a>2)))
THEN 1 ELSE 0 END AS max_a,b
FROM (SELECT MAX(a) as max_a,b FROM t1 GROUP BY t1.b) AS tab2
) AS tab1
WHERE (tab1.max_a=1);
EXPLAIN
{
"query_block": {
"select_id": 1,
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
"rows": 3,
"filtered": 100,
"attached_condition": "case when (tab2.max_a > 1 and (tab2.max_a = 2 or tab2.max_a > 2)) then 1 else 0 end = 1",
"materialized": {
"query_block": {
"select_id": 3,
"having_condition": "case when (max_a > 1 and (max_a = 2 or max_a > 2)) then 1 else 0 end = 1",
"filesort": {
"sort_key": "t1.b",
"temporary_table": {
"table": {
"table_name": "t1",
"access_type": "ALL",
"rows": 3,
"filtered": 100
}
}
}
}
}
}
}
}
SELECT *
FROM
(
SELECT CASE WHEN ((tab2.b=2) OR (tab2.b=4))
THEN 1 ELSE 0 END AS max_a,b
FROM (SELECT MAX(a) as max_a,b FROM t1 GROUP BY t1.b) AS tab2
) AS tab1
WHERE (tab1.max_a=1);
max_a b
1 2
1 4
EXPLAIN FORMAT=JSON SELECT *
FROM
(
SELECT CASE WHEN ((tab2.b=2) OR (tab2.b=4))
THEN 1 ELSE 0 END AS max_a,b
FROM (SELECT MAX(a) as max_a,b FROM t1 GROUP BY t1.b) AS tab2
) AS tab1
WHERE (tab1.max_a=1);
EXPLAIN
{
"query_block": {
"select_id": 1,
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
"rows": 3,
"filtered": 100,
"attached_condition": "case when (tab2.b = 2 or tab2.b = 4) then 1 else 0 end = 1",
"materialized": {
"query_block": {
"select_id": 3,
"filesort": {
"sort_key": "t1.b",
"temporary_table": {
"table": {
"table_name": "t1",
"access_type": "ALL",
"rows": 3,
"filtered": 100,
"attached_condition": "case when (t1.b = 2 or t1.b = 4) then 1 else 0 end = 1"
}
}
}
}
}
}
}
}
DROP TABLE t1;
#
# MDEV-16803: pushdown condition with IN predicate in the derived table
# defined with several SELECT statements
#
CREATE TABLE t1 (a INT, b INT);
INSERT INTO t1 VALUES (1,2),(3,2),(1,1);
SELECT * FROM
(
SELECT a,b,1 as c
FROM t1
UNION ALL
SELECT a,b,2 as c
FROM t1
) AS tab
WHERE ((a,b) IN ((1,2),(3,2)));
a b c
1 2 1
3 2 1
1 2 2
3 2 2
DROP TABLE t1;
# Start of 10.3 tests
#
# MDEV-16801: splittable materialized derived/views with
# one grouping field from table without keys
#

View File

@ -2987,6 +2987,130 @@ DELIMITER ;$$
SELECT a FROM (SELECT "aa" a) t WHERE f1(t.a, (SELECT MAX('aa') FROM DUAL LIMIT 1));
DROP FUNCTION f1;
--echo #
--echo # MDEV-17011: condition pushdown into materialized derived used
--echo # in INSERT SELECT, multi-table UPDATE and DELETE
--echo #
CREATE TABLE t1 (a int ,b int) ENGINE=MyISAM;
INSERT INTO t1 VALUES
(1, 1), (1, 2), (2, 1), (2, 2), (3,1), (3,3), (4,2);
CREATE TABLE t2 (a int) ENGINE MYISAM;
INSERT INTO t2 VALUES
(3), (7), (1), (4), (1);
CREATE TABLE t3 (a int, b int) ENGINE MYISAM;
let $q1=
INSERT INTO t3
SELECT * FROM (SELECT a, count(*) as c FROM t1 GROUP BY a) t WHERE a<=2;
eval EXPLAIN FORMAT=JSON $q1;
eval $q1;
SELECT * FROM t3;
let $q2=
UPDATE t2, (SELECT a, count(*) as c FROM t1 GROUP BY a) t SET t2.a=t.c+10
WHERE t2.a= t.c and t.a>=3;
eval EXPLAIN FORMAT=JSON $q2;
eval $q2;
SELECT * FROM t2;
let $q3=
DELETE t2 FROM t2, (SELECT a, count(*) as c FROM t1 GROUP BY a) t
WHERE t2.a= t.c+9 and t.a=2;
eval EXPLAIN FORMAT=JSON $q3;
eval $q3;
SELECT * FROM t2;
DROP TABLE t1,t2,t3;
--echo #
--echo # MDEV-16765: pushdown condition with the CASE structure
--echo # defined with Item_cond item
--echo #
CREATE TABLE t1(a INT, b INT);
INSERT INTO t1 VALUES (1,2), (3,4), (2,3);
LET $query=
SELECT *
FROM
(
SELECT CASE WHEN ((tab2.max_a=1) OR (tab2.max_a=2))
THEN 1 ELSE 0 END AS max_a,b
FROM (SELECT MAX(a) as max_a,b FROM t1 GROUP BY t1.b) AS tab2
) AS tab1
WHERE (tab1.max_a=1);
EVAL $query;
EVAL EXPLAIN FORMAT=JSON $query;
LET $query=
SELECT *
FROM
(
SELECT CASE WHEN ((tab2.max_a=1) OR ((tab2.max_a>2) AND (tab2.max_a<4)))
THEN 1 ELSE 0 END AS max_a,b
FROM (SELECT MAX(a) as max_a,b FROM t1 GROUP BY t1.b) AS tab2
) AS tab1
WHERE (tab1.max_a=1);
EVAL $query;
EVAL EXPLAIN FORMAT=JSON $query;
LET $query=
SELECT *
FROM
(
SELECT CASE WHEN ((tab2.max_a>1) AND ((tab2.max_a=2) OR (tab2.max_a>2)))
THEN 1 ELSE 0 END AS max_a,b
FROM (SELECT MAX(a) as max_a,b FROM t1 GROUP BY t1.b) AS tab2
) AS tab1
WHERE (tab1.max_a=1);
EVAL $query;
EVAL EXPLAIN FORMAT=JSON $query;
LET $query=
SELECT *
FROM
(
SELECT CASE WHEN ((tab2.b=2) OR (tab2.b=4))
THEN 1 ELSE 0 END AS max_a,b
FROM (SELECT MAX(a) as max_a,b FROM t1 GROUP BY t1.b) AS tab2
) AS tab1
WHERE (tab1.max_a=1);
EVAL $query;
EVAL EXPLAIN FORMAT=JSON $query;
DROP TABLE t1;
--echo #
--echo # MDEV-16803: pushdown condition with IN predicate in the derived table
--echo # defined with several SELECT statements
--echo #
CREATE TABLE t1 (a INT, b INT);
INSERT INTO t1 VALUES (1,2),(3,2),(1,1);
SELECT * FROM
(
SELECT a,b,1 as c
FROM t1
UNION ALL
SELECT a,b,2 as c
FROM t1
) AS tab
WHERE ((a,b) IN ((1,2),(3,2)));
DROP TABLE t1;
--echo # Start of 10.3 tests
--echo #
--echo # MDEV-16801: splittable materialized derived/views with
--echo # one grouping field from table without keys

View File

@ -106,5 +106,25 @@ Note 1003 select `test`.`t2`.`d1` AS `d1`,`test`.`t1`.`d1` AS `d1` from `test`.`
DROP VIEW v1;
DROP TABLE t1,t2;
#
# MDEV-15475: Assertion `!table || (!table->read_set ||
# bitmap_is_set(table->read_set, field_index))'
# failed on EXPLAIN EXTENDED with constant table and view
#
CREATE TABLE t1 (pk INT PRIMARY KEY) ENGINE=MyISAM;
CREATE VIEW v1 AS SELECT * FROM t1;
INSERT INTO t1 VALUES (1);
EXPLAIN EXTENDED SELECT ISNULL(pk) FROM v1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 system NULL NULL NULL NULL 1 100.00
Warnings:
Note 1003 select /*always not null*/ 1 is null AS `ISNULL(pk)` from dual
EXPLAIN EXTENDED SELECT IFNULL(pk,0) FROM v1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 system NULL NULL NULL NULL 1 100.00
Warnings:
Note 1003 select ifnull(1,0) AS `IFNULL(pk,0)` from dual
DROP VIEW v1;
DROP TABLE t1;
#
# End of 5.5 tests
#

View File

@ -83,6 +83,22 @@ SELECT * FROM t2 LEFT JOIN v1 ON t2.d1=v1.d1 WHERE v1.d1 IS NULL;
DROP VIEW v1;
DROP TABLE t1,t2;
--echo #
--echo # MDEV-15475: Assertion `!table || (!table->read_set ||
--echo # bitmap_is_set(table->read_set, field_index))'
--echo # failed on EXPLAIN EXTENDED with constant table and view
--echo #
CREATE TABLE t1 (pk INT PRIMARY KEY) ENGINE=MyISAM;
CREATE VIEW v1 AS SELECT * FROM t1;
INSERT INTO t1 VALUES (1);
EXPLAIN EXTENDED SELECT ISNULL(pk) FROM v1;
EXPLAIN EXTENDED SELECT IFNULL(pk,0) FROM v1;
# Cleanup
DROP VIEW v1;
DROP TABLE t1;
--echo #
--echo # End of 5.5 tests
--echo #

View File

@ -2797,6 +2797,11 @@ SEC_TO_TIME(MAKEDATE(0,RAND(~0)))
838:59:59
Warnings:
Warning 1292 Truncated incorrect time value: '20000101'
SELECT PERIOD_DIFF(2018, AES_ENCRYPT('Rae Bareli', 'Rae Bareli'));
PERIOD_DIFF(2018, AES_ENCRYPT('Rae Bareli', 'Rae Bareli'))
24257
Warnings:
Warning 1292 Truncated incorrect INTEGER value: '-3S\xFA\xDE?\x00\x00\xCA\xB3\xEEE\xA4\xD1\xC1\xA8'
#
# End of 5.5 tests
#

View File

@ -1702,6 +1702,10 @@ DO TO_DAYS(SEC_TO_TIME(TIME(CEILING(UUID()))));
DO TO_DAYS(SEC_TO_TIME(MAKEDATE('',RAND(~('')))));
SELECT SEC_TO_TIME(MAKEDATE(0,RAND(~0)));
#
# MDEV-16810 AddressSanitizer: stack-buffer-overflow in int10_to_str
#
SELECT PERIOD_DIFF(2018, AES_ENCRYPT('Rae Bareli', 'Rae Bareli'));
--echo #
--echo # End of 5.5 tests

View File

@ -3893,5 +3893,22 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index a a 13 NULL 2 Using where; Using index
drop table t1;
#
# MDEV-15433: Optimizer does not use group by optimization with distinct
#
CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a INT NOT NULL, KEY(a));
OPTIMIZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
EXPLAIN SELECT DISTINCT a FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range NULL a 4 NULL 5 Using index for group-by
SELECT DISTINCT a FROM t1;
a
1
2
3
4
drop table t1;
#
# End of 10.1 tests
#

View File

@ -1608,6 +1608,33 @@ explain select min(a) from t1 where a between "a" and "Cafeeeeeeeeeeeeeeeeeeeeee
explain select min(a) from t1 where a between "abbbbbbbbbbbbbbbbbbbb" and "Cafe2";
drop table t1;
--echo #
--echo # MDEV-15433: Optimizer does not use group by optimization with distinct
--echo #
CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a INT NOT NULL, KEY(a));
--disable_query_log
INSERT INTO t1(a) VALUES (1), (2), (3), (4);
INSERT INTO t1(a) SELECT a FROM t1;
INSERT INTO t1(a) SELECT a FROM t1;
INSERT INTO t1(a) SELECT a FROM t1;
INSERT INTO t1(a) SELECT a FROM t1;
INSERT INTO t1(a) SELECT a FROM t1;
INSERT INTO t1(a) SELECT a FROM t1;
INSERT INTO t1(a) SELECT a FROM t1;
INSERT INTO t1(a) SELECT a FROM t1;
INSERT INTO t1(a) SELECT a FROM t1;
INSERT INTO t1(a) SELECT a FROM t1;
INSERT INTO t1(a) SELECT a FROM t1;
INSERT INTO t1(a) SELECT a FROM t1;
INSERT INTO t1(a) SELECT a FROM t1;
INSERT INTO t1(a) SELECT a FROM t1;
--enable_query_log
OPTIMIZE TABLE t1;
EXPLAIN SELECT DISTINCT a FROM t1;
SELECT DISTINCT a FROM t1;
drop table t1;
--echo #
--echo # End of 10.1 tests
--echo #

View File

@ -538,8 +538,6 @@ a
#
# End of 10.1 tests
#
ERROR 1300 (HY000): Invalid utf8 character string: 'test\xF0\x9F\x98\x81 '
ERROR 1300 (HY000): Invalid binary character string: 'test\xF0\x9F\x98\x81 '
ERROR 1300 (HY000) at line 2: Invalid utf8 character string: 'test\xF0\x9F\x98\x81'
set GLOBAL sql_mode=default;

View File

@ -638,10 +638,7 @@ EOF
--echo # End of 10.1 tests
--echo #
--error 1
--exec $MYSQL --default-character-set=utf8 -e "select 1" "test😁 " 2>&1
--error 1
--exec $MYSQL --default-character-set=binary -e "select 1" "test😁 " 2>&1
--write_file $MYSQLTEST_VARDIR/tmp/mdev-6572.sql
SET NAMES utf8;
USE test😁 ;

View File

@ -9,3 +9,5 @@ End of tests
2
X
3
ERROR 1300 (HY000): Invalid utf8 character string: 'test\xF0\x9F\x98\x81 '
ERROR 1300 (HY000): Invalid binary character string: 'test\xF0\x9F\x98\x81 '

View File

@ -22,3 +22,10 @@ exec $MYSQL test -e "select
let $query = select 3
as X;
exec $MYSQL test -e "$query";
# Not ran on Windows, since non-ASCII does not work on command line.
# (MDEV-16220)
--error 1
--exec $MYSQL --default-character-set=utf8 -e "select 1" "test😁 " 2>&1
--error 1
--exec $MYSQL --default-character-set=binary -e "select 1" "test😁 " 2>&1

View File

@ -231,6 +231,11 @@ The following specify which files/extra groups are read (specified before remain
cache, etc)
--enforce-storage-engine=name
Force the use of a storage engine for new tables
--eq-range-index-dive-limit=#
The optimizer will use existing index statistics instead
of doing index dives for equality ranges if the number of
equality ranges for the index is larger than or equal to
this number. If set to 0, index dives are always used.
--event-scheduler[=name]
Enable the event scheduler. Possible values are ON, OFF,
and DISABLED (keep the event scheduler completely
@ -1400,6 +1405,7 @@ encrypt-binlog FALSE
encrypt-tmp-disk-tables FALSE
encrypt-tmp-files FALSE
enforce-storage-engine (No default value)
eq-range-index-dive-limit 0
event-scheduler OFF
expensive-subquery-limit 100
expire-logs-days 0

View File

@ -2981,5 +2981,46 @@ deallocate prepare stmt;
set optimizer_switch=@save_optimizer_switch;
drop table t1,t2,t3;
#
# MDEV-16934: using system variable eq_range_index_dive_limit
# to reduce the number of index dives
#
create table t1 (a int, b varchar(31), index idx(a));
insert into t1 values
(7,'xxxx'), (1,'yy'), (3,'aaa'), (1,'bbb'), (2,'zz'),
(4,'vvvvv'), (7,'ddd'), (9,'zzzzz'), (1,'cc'), (5,'ffff');
insert into t1 select a+10, concat(b,'zz') from t1;
insert into t1 select a+15, concat(b,'yy') from t1;
insert into t1 select a+100, concat(b,'xx') from t1;
analyze table t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
select cast(count(a)/count(distinct a) as unsigned) as rec_per_key from t1;
rec_per_key
2
set eq_range_index_dive_limit=0;
explain select * from t1 where a in (8, 15, 31, 1, 9);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range idx idx 5 NULL 7 Using index condition
select * from t1 where a in (8, 15, 31, 1, 9);
a b
1 yy
1 bbb
1 cc
9 zzzzz
15 ffffzz
set eq_range_index_dive_limit=2;
explain select * from t1 where a in (8, 15, 31, 1, 9);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range idx idx 5 NULL 10 Using index condition
select * from t1 where a in (8, 15, 31, 1, 9);
a b
1 yy
1 bbb
1 cc
9 zzzzz
15 ffffzz
set eq_range_index_dive_limit=default;
drop table t1;
#
# End of 10.2 tests
#

View File

@ -2017,6 +2017,39 @@ set optimizer_switch=@save_optimizer_switch;
drop table t1,t2,t3;
--echo #
--echo # MDEV-16934: using system variable eq_range_index_dive_limit
--echo # to reduce the number of index dives
--echo #
create table t1 (a int, b varchar(31), index idx(a));
insert into t1 values
(7,'xxxx'), (1,'yy'), (3,'aaa'), (1,'bbb'), (2,'zz'),
(4,'vvvvv'), (7,'ddd'), (9,'zzzzz'), (1,'cc'), (5,'ffff');
insert into t1 select a+10, concat(b,'zz') from t1;
insert into t1 select a+15, concat(b,'yy') from t1;
insert into t1 select a+100, concat(b,'xx') from t1;
analyze table t1;
select cast(count(a)/count(distinct a) as unsigned) as rec_per_key from t1;
let $q=
select * from t1 where a in (8, 15, 31, 1, 9);
set eq_range_index_dive_limit=0;
eval explain $q;
eval $q;
set eq_range_index_dive_limit=2;
eval explain $q;
eval $q;
set eq_range_index_dive_limit=default;
drop table t1;
--echo #
--echo # End of 10.2 tests
--echo #

View File

@ -2993,6 +2993,47 @@ deallocate prepare stmt;
set optimizer_switch=@save_optimizer_switch;
drop table t1,t2,t3;
#
# MDEV-16934: using system variable eq_range_index_dive_limit
# to reduce the number of index dives
#
create table t1 (a int, b varchar(31), index idx(a));
insert into t1 values
(7,'xxxx'), (1,'yy'), (3,'aaa'), (1,'bbb'), (2,'zz'),
(4,'vvvvv'), (7,'ddd'), (9,'zzzzz'), (1,'cc'), (5,'ffff');
insert into t1 select a+10, concat(b,'zz') from t1;
insert into t1 select a+15, concat(b,'yy') from t1;
insert into t1 select a+100, concat(b,'xx') from t1;
analyze table t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
select cast(count(a)/count(distinct a) as unsigned) as rec_per_key from t1;
rec_per_key
2
set eq_range_index_dive_limit=0;
explain select * from t1 where a in (8, 15, 31, 1, 9);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range idx idx 5 NULL 7 Using index condition; Rowid-ordered scan
select * from t1 where a in (8, 15, 31, 1, 9);
a b
1 yy
1 bbb
9 zzzzz
1 cc
15 ffffzz
set eq_range_index_dive_limit=2;
explain select * from t1 where a in (8, 15, 31, 1, 9);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range idx idx 5 NULL 10 Using index condition; Rowid-ordered scan
select * from t1 where a in (8, 15, 31, 1, 9);
a b
1 yy
1 bbb
9 zzzzz
1 cc
15 ffffzz
set eq_range_index_dive_limit=default;
drop table t1;
#
# End of 10.2 tests
#
set optimizer_switch=@mrr_icp_extra_tmp;

View File

@ -482,3 +482,25 @@ DROP TABLE t1,t2;
set optimizer_switch= @tmp_subselect_extra_derived;
set optimizer_switch= @subselect_extra_no_sj_tmp;
set @optimizer_switch_for_subselect_extra_test=null;
#
# MDEV-6439: Server crashes in Explain_union::print_explain with explain in slow log, tis620 charset
#
connect con1,localhost,root,,;
SET NAMES tis620;
set @tmp= @@global.slow_query_log;
SET GLOBAL slow_query_log = 1;
SET long_query_time = 0.000001;
SET log_slow_verbosity = 'explain';
CREATE TABLE t1 (a VARCHAR(3)) ENGINE=MyISAM;
SELECT * FROM t1 WHERE a >= ANY ( SELECT 'foo');
a
SELECT * FROM t1 WHERE a >= ANY ( SELECT 'foo' UNION SELECT 'bar' );
ERROR HY000: Illegal mix of collations (tis620_thai_ci,COERCIBLE) and (latin1_swedish_ci,IMPLICIT) for operation '<='
create table t2 (b int);
insert into t2 values (1),(2),(3);
SELECT * FROM t1 WHERE a >= ANY ( SELECT 'foo' FROM t2);
ERROR HY000: Illegal mix of collations (tis620_thai_ci,COERCIBLE) and (latin1_swedish_ci,IMPLICIT) for operation '<='
drop table t1,t2;
SET GLOBAL slow_query_log=@tmp;
disconnect con1;
connection default;

View File

@ -6,4 +6,33 @@ set @optimizer_switch_for_subselect_extra_test='semijoin=off,firstmatch=off,loo
set optimizer_switch= @subselect_extra_no_sj_tmp;
set @optimizer_switch_for_subselect_extra_test=null;
set @optimizer_switch_for_subselect_extra_test=null;
--echo #
--echo # MDEV-6439: Server crashes in Explain_union::print_explain with explain in slow log, tis620 charset
--echo #
## Using a separate client connection is easier than restoring state
connect(con1,localhost,root,,);
SET NAMES tis620;
set @tmp= @@global.slow_query_log;
SET GLOBAL slow_query_log = 1;
SET long_query_time = 0.000001;
SET log_slow_verbosity = 'explain';
CREATE TABLE t1 (a VARCHAR(3)) ENGINE=MyISAM;
SELECT * FROM t1 WHERE a >= ANY ( SELECT 'foo');
--error ER_CANT_AGGREGATE_2COLLATIONS
SELECT * FROM t1 WHERE a >= ANY ( SELECT 'foo' UNION SELECT 'bar' );
create table t2 (b int);
insert into t2 values (1),(2),(3);
--error ER_CANT_AGGREGATE_2COLLATIONS
SELECT * FROM t1 WHERE a >= ANY ( SELECT 'foo' FROM t2);
drop table t1,t2;
SET GLOBAL slow_query_log=@tmp;
disconnect con1;
connection default;

View File

@ -499,7 +499,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` where <expr_cache><`test`.`t1`.`a1`,`test`.`t1`.`a2`>(<in_optimizer>((`test`.`t1`.`a1`,`test`.`t1`.`a2`),<exists>(/* select#2 */ select '1 - 01','2 - 01' having (<cache>(`test`.`t1`.`a1`) = '1 - 01' or '1 - 01' is null) and (<cache>(`test`.`t1`.`a2`) = '2 - 01' or '2 - 01' is null) and '1 - 01' is null and '2 - 01' is null)))
Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` where <expr_cache><`test`.`t1`.`a1`,`test`.`t1`.`a2`>(<in_optimizer>((`test`.`t1`.`a1`,`test`.`t1`.`a2`),<exists>(/* select#2 */ select '1 - 01','2 - 01' having (<cache>(`test`.`t1`.`a1`) = '1 - 01' or /*always not null*/ 1 is null) and (<cache>(`test`.`t1`.`a2`) = '2 - 01' or /*always not null*/ 1 is null) and '1 - 01' is null and '2 - 01' is null)))
select * from t1 where (a1, a2) in (select '1 - 01', '2 - 01');
a1 a2
1 - 01 2 - 01
@ -509,7 +509,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` where <expr_cache><`test`.`t1`.`a1`,`test`.`t1`.`a2`>(<in_optimizer>((`test`.`t1`.`a1`,`test`.`t1`.`a2`),<exists>(/* select#2 */ select '1 - 01','2 - 01' having (<cache>(`test`.`t1`.`a1`) = '1 - 01' or '1 - 01' is null) and (<cache>(`test`.`t1`.`a2`) = '2 - 01' or '2 - 01' is null) and '1 - 01' is null and '2 - 01' is null)))
Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` where <expr_cache><`test`.`t1`.`a1`,`test`.`t1`.`a2`>(<in_optimizer>((`test`.`t1`.`a1`,`test`.`t1`.`a2`),<exists>(/* select#2 */ select '1 - 01','2 - 01' having (<cache>(`test`.`t1`.`a1`) = '1 - 01' or /*always not null*/ 1 is null) and (<cache>(`test`.`t1`.`a2`) = '2 - 01' or /*always not null*/ 1 is null) and '1 - 01' is null and '2 - 01' is null)))
select * from t1 where (a1, a2) in (select '1 - 01', '2 - 01' from dual);
a1 a2
1 - 01 2 - 01
@ -1925,7 +1925,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 100.00
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from <materialize> (/* select#2 */ select max(`test`.`t2`.`c`) from `test`.`t2`) join `test`.`t1` where `test`.`t1`.`b` = 7 and `test`.`t1`.`a` = `<subquery2>`.`MAX(c)` and (<cache>(`<subquery2>`.`MAX(c)` is null) or `<subquery2>`.`MAX(c)` = 7)
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from <materialize> (/* select#2 */ select max(`test`.`t2`.`c`) from `test`.`t2`) join `test`.`t1` where `test`.`t1`.`b` = 7 and `test`.`t1`.`a` = `<subquery2>`.`MAX(c)` and (<cache>(/*always not null*/ 1 is null) or `<subquery2>`.`MAX(c)` = 7)
SELECT * FROM t1
WHERE a IN (SELECT MAX(c) FROM t2) AND b=7 AND (a IS NULL OR a=b);
a b

View File

@ -520,7 +520,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` where <expr_cache><`test`.`t1`.`a1`,`test`.`t1`.`a2`>(<in_optimizer>((`test`.`t1`.`a1`,`test`.`t1`.`a2`),<exists>(/* select#2 */ select '1 - 01','2 - 01' having (<cache>(`test`.`t1`.`a1`) = '1 - 01' or '1 - 01' is null) and (<cache>(`test`.`t1`.`a2`) = '2 - 01' or '2 - 01' is null) and '1 - 01' is null and '2 - 01' is null)))
Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` where <expr_cache><`test`.`t1`.`a1`,`test`.`t1`.`a2`>(<in_optimizer>((`test`.`t1`.`a1`,`test`.`t1`.`a2`),<exists>(/* select#2 */ select '1 - 01','2 - 01' having (<cache>(`test`.`t1`.`a1`) = '1 - 01' or /*always not null*/ 1 is null) and (<cache>(`test`.`t1`.`a2`) = '2 - 01' or /*always not null*/ 1 is null) and '1 - 01' is null and '2 - 01' is null)))
select * from t1 where (a1, a2) in (select '1 - 01', '2 - 01');
a1 a2
1 - 01 2 - 01
@ -530,7 +530,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` where <expr_cache><`test`.`t1`.`a1`,`test`.`t1`.`a2`>(<in_optimizer>((`test`.`t1`.`a1`,`test`.`t1`.`a2`),<exists>(/* select#2 */ select '1 - 01','2 - 01' having (<cache>(`test`.`t1`.`a1`) = '1 - 01' or '1 - 01' is null) and (<cache>(`test`.`t1`.`a2`) = '2 - 01' or '2 - 01' is null) and '1 - 01' is null and '2 - 01' is null)))
Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` where <expr_cache><`test`.`t1`.`a1`,`test`.`t1`.`a2`>(<in_optimizer>((`test`.`t1`.`a1`,`test`.`t1`.`a2`),<exists>(/* select#2 */ select '1 - 01','2 - 01' having (<cache>(`test`.`t1`.`a1`) = '1 - 01' or /*always not null*/ 1 is null) and (<cache>(`test`.`t1`.`a2`) = '2 - 01' or /*always not null*/ 1 is null) and '1 - 01' is null and '2 - 01' is null)))
select * from t1 where (a1, a2) in (select '1 - 01', '2 - 01' from dual);
a1 a2
1 - 01 2 - 01
@ -1963,7 +1963,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 100.00
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from <materialize> (/* select#2 */ select max(`test`.`t2`.`c`) from `test`.`t2`) join `test`.`t1` where `test`.`t1`.`b` = 7 and `test`.`t1`.`a` = `<subquery2>`.`MAX(c)` and (<cache>(`<subquery2>`.`MAX(c)` is null) or `<subquery2>`.`MAX(c)` = 7)
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from <materialize> (/* select#2 */ select max(`test`.`t2`.`c`) from `test`.`t2`) join `test`.`t1` where `test`.`t1`.`b` = 7 and `test`.`t1`.`a` = `<subquery2>`.`MAX(c)` and (<cache>(/*always not null*/ 1 is null) or `<subquery2>`.`MAX(c)` = 7)
SELECT * FROM t1
WHERE a IN (SELECT MAX(c) FROM t2) AND b=7 AND (a IS NULL OR a=b);
a b

View File

@ -3219,8 +3219,8 @@ DROP TABLE fv_test, fv_result;
#
CREATE TABLE t1 (a int);
INSERT INTO t1 VALUES (0),(1),(2);
SELECT LEAD(a) OVER (PARTITION BY a) as lead,
a AND LEAD(a) OVER (PARTITION BY a) AS a_and_lead_part
SELECT LEAD(a) OVER (PARTITION BY a ORDER BY a) as lead,
a AND LEAD(a) OVER (PARTITION BY a ORDER BY a) AS a_and_lead_part
FROM t1;
lead a_and_lead_part
NULL 0

View File

@ -2000,8 +2000,8 @@ DROP TABLE fv_test, fv_result;
CREATE TABLE t1 (a int);
INSERT INTO t1 VALUES (0),(1),(2);
SELECT LEAD(a) OVER (PARTITION BY a) as lead,
a AND LEAD(a) OVER (PARTITION BY a) AS a_and_lead_part
SELECT LEAD(a) OVER (PARTITION BY a ORDER BY a) as lead,
a AND LEAD(a) OVER (PARTITION BY a ORDER BY a) AS a_and_lead_part
FROM t1;
SELECT a OR LEAD(a) OVER (ORDER BY a) AS a_or_lead_order

View File

@ -226,4 +226,15 @@ pk a b a+b lag(a + b) over (partition by a order by pk) + pk
9 2 2 4 12
10 2 0 2 14
11 2 10 12 13
#
# MDEV-15204 - LAG function doesn't require ORDER BY in OVER clause
#
select pk,
lag(pk, 1) over ()
from t1;
ERROR HY000: No order list in window specification for 'lag'
select pk,
lead(pk, 1) over ()
from t1;
ERROR HY000: No order list in window specification for 'lead'
drop table t1;

View File

@ -107,4 +107,17 @@ select pk, a, b, a+b,
from t1
order by pk asc;
--echo #
--echo # MDEV-15204 - LAG function doesn't require ORDER BY in OVER clause
--echo #
--error ER_NO_ORDER_LIST_IN_WINDOW_SPEC
select pk,
lag(pk, 1) over ()
from t1;
--error ER_NO_ORDER_LIST_IN_WINDOW_SPEC
select pk,
lead(pk, 1) over ()
from t1;
drop table t1;

View File

@ -1,21 +1,10 @@
SET GLOBAL general_log='OFF';
TRUNCATE TABLE mysql.general_log;
SELECT COUNT(*) from mysql.general_log;
COUNT(*)
0
SELECT * FROM mysql.general_log;
event_time user_host thread_id server_id command_type argument
SET GLOBAL general_log='OFF';
TRUNCATE TABLE mysql.general_log;
SELECT COUNT(*) from mysql.general_log;
COUNT(*)
0
SELECT * FROM mysql.general_log;
event_time user_host thread_id server_id command_type argument
SET GLOBAL general_log='ON';
SELECT COUNT(*) from mysql.general_log;
COUNT(*)
1
SELECT argument from mysql.general_log WHERE argument NOT LIKE 'SELECT%';
argument
SET SESSION wsrep_osu_method=TOI;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
SET SESSION wsrep_osu_method=RSU;

View File

@ -0,0 +1,161 @@
connection node_1;
connection node_2;
connection node_2;
SET GLOBAL wsrep_forced_binlog_format='STATEMENT';
connection node_1;
SET GLOBAL wsrep_forced_binlog_format='STATEMENT';
CREATE TABLE t1 (
i int(11) NOT NULL AUTO_INCREMENT,
c char(32) DEFAULT 'dummy_text',
PRIMARY KEY (i)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
insert into t1(i) values(null);
select * from t1;
i c
1 dummy_text
insert into t1(i) values(null), (null), (null);
select * from t1;
i c
1 dummy_text
3 dummy_text
5 dummy_text
7 dummy_text
connection node_2;
select * from t1;
i c
1 dummy_text
3 dummy_text
5 dummy_text
7 dummy_text
SET GLOBAL wsrep_forced_binlog_format='none';
connection node_1;
SET GLOBAL wsrep_forced_binlog_format='none';
drop table t1;
SET SESSION binlog_format='STATEMENT';
show variables like 'binlog_format';
Variable_name Value
binlog_format STATEMENT
SET GLOBAL wsrep_auto_increment_control='OFF';
SET SESSION auto_increment_increment = 3;
SET SESSION auto_increment_offset = 1;
CREATE TABLE t1 (
i int(11) NOT NULL AUTO_INCREMENT,
c char(32) DEFAULT 'dummy_text',
PRIMARY KEY (i)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
insert into t1(i) values(null);
select * from t1;
i c
1 dummy_text
insert into t1(i) values(null), (null), (null);
select * from t1;
i c
1 dummy_text
4 dummy_text
7 dummy_text
10 dummy_text
connection node_2;
select * from t1;
i c
1 dummy_text
4 dummy_text
7 dummy_text
10 dummy_text
connection node_1;
SET GLOBAL wsrep_auto_increment_control='ON';
SET SESSION binlog_format='ROW';
show variables like 'binlog_format';
Variable_name Value
binlog_format ROW
show variables like '%auto_increment%';
Variable_name Value
auto_increment_increment 2
auto_increment_offset 1
wsrep_auto_increment_control ON
SET GLOBAL wsrep_auto_increment_control='OFF';
show variables like '%auto_increment%';
Variable_name Value
auto_increment_increment 3
auto_increment_offset 1
wsrep_auto_increment_control OFF
SET GLOBAL wsrep_auto_increment_control='ON';
drop table t1;
connection node_2;
SET GLOBAL wsrep_forced_binlog_format='ROW';
connection node_1;
SET GLOBAL wsrep_forced_binlog_format='ROW';
CREATE TABLE t1 (
i int(11) NOT NULL AUTO_INCREMENT,
c char(32) DEFAULT 'dummy_text',
PRIMARY KEY (i)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
insert into t1(i) values(null);
select * from t1;
i c
1 dummy_text
insert into t1(i) values(null), (null), (null);
select * from t1;
i c
1 dummy_text
3 dummy_text
5 dummy_text
7 dummy_text
connection node_2;
select * from t1;
i c
1 dummy_text
3 dummy_text
5 dummy_text
7 dummy_text
SET GLOBAL wsrep_forced_binlog_format='none';
connection node_1;
SET GLOBAL wsrep_forced_binlog_format='none';
drop table t1;
SET SESSION binlog_format='ROW';
show variables like 'binlog_format';
Variable_name Value
binlog_format ROW
SET GLOBAL wsrep_auto_increment_control='OFF';
SET SESSION auto_increment_increment = 3;
SET SESSION auto_increment_offset = 1;
CREATE TABLE t1 (
i int(11) NOT NULL AUTO_INCREMENT,
c char(32) DEFAULT 'dummy_text',
PRIMARY KEY (i)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
insert into t1(i) values(null);
select * from t1;
i c
1 dummy_text
insert into t1(i) values(null), (null), (null);
select * from t1;
i c
1 dummy_text
4 dummy_text
7 dummy_text
10 dummy_text
connection node_2;
select * from t1;
i c
1 dummy_text
4 dummy_text
7 dummy_text
10 dummy_text
connection node_1;
SET GLOBAL wsrep_auto_increment_control='ON';
show variables like 'binlog_format';
Variable_name Value
binlog_format ROW
show variables like '%auto_increment%';
Variable_name Value
auto_increment_increment 2
auto_increment_offset 1
wsrep_auto_increment_control ON
SET GLOBAL wsrep_auto_increment_control='OFF';
show variables like '%auto_increment%';
Variable_name Value
auto_increment_increment 3
auto_increment_offset 1
wsrep_auto_increment_control OFF
SET GLOBAL wsrep_auto_increment_control='ON';
drop table t1;

View File

@ -2,7 +2,6 @@ CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
connection node_2;
SET GLOBAL wsrep_provider_options = 'gcs.fc_limit=1';
SET GLOBAL wsrep_desync = TRUE;
FLUSH TABLES WITH READ LOCK;
connection node_1;
INSERT INTO t1 VALUES (2);
@ -19,7 +18,6 @@ SET SESSION wsrep_sync_wait = 0;
SELECT COUNT(*) = 1 FROM t1;
COUNT(*) = 1
1
SET GLOBAL wsrep_desync = FALSE;
UNLOCK TABLES;
SET SESSION wsrep_sync_wait = 1;
SELECT COUNT(*) = 10 FROM t1;

View File

@ -8,20 +8,21 @@
--connection node_1
SET GLOBAL general_log='OFF';
TRUNCATE TABLE mysql.general_log;
SELECT COUNT(*) from mysql.general_log;
SELECT * FROM mysql.general_log;
--let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.general_log;
--source include/wait_condition.inc
--sleep 1
--connection node_2
SET GLOBAL general_log='OFF';
TRUNCATE TABLE mysql.general_log;
SELECT COUNT(*) from mysql.general_log;
SELECT * FROM mysql.general_log;
--let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.general_log;
--source include/wait_condition.inc
--sleep 1
--connection node_1
SET GLOBAL general_log='ON';
SELECT COUNT(*) from mysql.general_log;
SELECT argument from mysql.general_log WHERE argument NOT LIKE 'SELECT%';
SET SESSION wsrep_osu_method=TOI;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
SET SESSION wsrep_osu_method=RSU;

View File

@ -0,0 +1,223 @@
##
## Tests the auto-increment with binlog in STATEMENT mode.
##
--source include/galera_cluster.inc
--source include/have_innodb.inc
--let $node_1=node_1
--let $node_2=node_2
--source include/auto_increment_offset_save.inc
##
## Verify the correct operation of the auto-increment when the binlog
## format artificially set to the 'STATEMENT' (although this mode is
## not recommended in the current version):
##
--connection node_2
SET GLOBAL wsrep_forced_binlog_format='STATEMENT';
--connection node_1
SET GLOBAL wsrep_forced_binlog_format='STATEMENT';
CREATE TABLE t1 (
i int(11) NOT NULL AUTO_INCREMENT,
c char(32) DEFAULT 'dummy_text',
PRIMARY KEY (i)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
insert into t1(i) values(null);
select * from t1;
insert into t1(i) values(null), (null), (null);
select * from t1;
--connection node_2
select * from t1;
SET GLOBAL wsrep_forced_binlog_format='none';
--connection node_1
SET GLOBAL wsrep_forced_binlog_format='none';
drop table t1;
##
## Check the operation when the automatic control over the auto-increment
## settings is switched off, that is, when we use the increment step and
## the offset specified by the user. In the current session, the binlog
## format is set to 'STATEMENT'. It is important that the values of the
## auto-increment options does not changed on other node - it allows us
## to check the correct transmission of the auto-increment options to
## other nodes:
##
--disable_warnings
SET SESSION binlog_format='STATEMENT';
--enable_warnings
show variables like 'binlog_format';
SET GLOBAL wsrep_auto_increment_control='OFF';
SET SESSION auto_increment_increment = 3;
SET SESSION auto_increment_offset = 1;
CREATE TABLE t1 (
i int(11) NOT NULL AUTO_INCREMENT,
c char(32) DEFAULT 'dummy_text',
PRIMARY KEY (i)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
insert into t1(i) values(null);
select * from t1;
insert into t1(i) values(null), (null), (null);
select * from t1;
--connection node_2
select * from t1;
--connection node_1
##
## Verify the return to automatic calculation of the step
## and offset of the auto-increment:
##
SET GLOBAL wsrep_auto_increment_control='ON';
SET SESSION binlog_format='ROW';
show variables like 'binlog_format';
show variables like '%auto_increment%';
##
## Verify the recovery of original user-defined values after
## stopping the automatic control over auto-increment:
##
SET GLOBAL wsrep_auto_increment_control='OFF';
show variables like '%auto_increment%';
##
## Restore original options and drop test table:
##
SET GLOBAL wsrep_auto_increment_control='ON';
drop table t1;
##
## Verify the correct operation of the auto-increment when the binlog
## format set to the 'ROW':
##
--connection node_2
SET GLOBAL wsrep_forced_binlog_format='ROW';
--connection node_1
SET GLOBAL wsrep_forced_binlog_format='ROW';
CREATE TABLE t1 (
i int(11) NOT NULL AUTO_INCREMENT,
c char(32) DEFAULT 'dummy_text',
PRIMARY KEY (i)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
insert into t1(i) values(null);
select * from t1;
insert into t1(i) values(null), (null), (null);
select * from t1;
--connection node_2
select * from t1;
SET GLOBAL wsrep_forced_binlog_format='none';
--connection node_1
SET GLOBAL wsrep_forced_binlog_format='none';
drop table t1;
##
## Check the operation when the automatic control over the auto-increment
## settings is switched off, that is, when we use the increment step and
## the offset specified by the user. In the current session, the binlog
## format is set to 'ROW'. It is important that the values of the
## auto-increment options does not changed on other node - it allows us
## to check the correct transmission of the auto-increment options to
## other nodes:
##
SET SESSION binlog_format='ROW';
show variables like 'binlog_format';
SET GLOBAL wsrep_auto_increment_control='OFF';
SET SESSION auto_increment_increment = 3;
SET SESSION auto_increment_offset = 1;
CREATE TABLE t1 (
i int(11) NOT NULL AUTO_INCREMENT,
c char(32) DEFAULT 'dummy_text',
PRIMARY KEY (i)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
insert into t1(i) values(null);
select * from t1;
insert into t1(i) values(null), (null), (null);
select * from t1;
--connection node_2
select * from t1;
--connection node_1
##
## Verify the return to automatic calculation of the step
## and offset of the auto-increment:
##
SET GLOBAL wsrep_auto_increment_control='ON';
show variables like 'binlog_format';
show variables like '%auto_increment%';
##
## Verify the recovery of original user-defined values after
## stopping the automatic control over auto-increment:
##
SET GLOBAL wsrep_auto_increment_control='OFF';
show variables like '%auto_increment%';
##
## Restore original options and drop test table:
##
SET GLOBAL wsrep_auto_increment_control='ON';
drop table t1;
--source include/auto_increment_offset_restore.inc

View File

@ -1,5 +1,7 @@
#
# Test wsrep_desync = ON . Node should temporarily not participate in flow control
# Desync will be done once the global read lock is acquired and resync will be done when
# it is released.
# Node should temporarily not participate in flow control
# so even if fc_limit has been reached, the master should be able to continue to
# commit transactions.
#
@ -13,7 +15,6 @@ INSERT INTO t1 VALUES (1);
--connection node_2
--let $wsrep_provider_options_orig = `SELECT @@wsrep_provider_options`
SET GLOBAL wsrep_provider_options = 'gcs.fc_limit=1';
SET GLOBAL wsrep_desync = TRUE;
# Block the slave applier thread
FLUSH TABLES WITH READ LOCK;
@ -37,8 +38,6 @@ SET SESSION wsrep_sync_wait = 0;
# No updates have arrived after the FLUSH TABLES
SELECT COUNT(*) = 1 FROM t1;
# Resync the slave
SET GLOBAL wsrep_desync = FALSE;
--disable_query_log
--eval SET GLOBAL wsrep_provider_options = '$wsrep_provider_options_orig';
--enable_query_log

View File

@ -1 +0,0 @@
unsupported_redo : MDEV-16791 allows optimized redo

View File

@ -0,0 +1,4 @@
CREATE TABLE t1 ENGINE=InnoDB SELECT 1;
DROP TABLE t1;
SET GLOBAL innodb_log_checkpoint_now=1;
SET GLOBAL innodb_log_checkpoint_now=DEFAULT;

View File

@ -0,0 +1,19 @@
--source include/have_debug.inc
let $targetdir=$MYSQLTEST_VARDIR/tmp/backup;
mkdir $targetdir;
CREATE TABLE t1 ENGINE=InnoDB SELECT 1;
--let after_load_tablespaces=TRUNCATE test.t1
--disable_result_log
--error 1
exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir --dbug=+d,mariabackup_events;
--enable_result_log
--let after_load_tablespaces=
DROP TABLE t1;
SET GLOBAL innodb_log_checkpoint_now=1;
SET GLOBAL innodb_log_checkpoint_now=DEFAULT;
rmdir $targetdir;

View File

@ -7,7 +7,7 @@ call mtr.add_suppression("InnoDB: Cannot open datafile for read-only: ");
call mtr.add_suppression("Table .* in the InnoDB data dictionary has tablespace id .*, but tablespace with that id or name does not exist");
CREATE TABLE t1(i INT PRIMARY KEY auto_increment, a int) ENGINE INNODB;
ALTER TABLE t1 FORCE, ALGORITHM=INPLACE;
# Fails during full backup
# No longer fails during full backup
DROP TABLE t1;
CREATE TABLE t1(i INT PRIMARY KEY auto_increment, a int) ENGINE INNODB;
INSERT INTO t1(a) select 1 union select 2 union select 3;

View File

@ -11,33 +11,15 @@ let $basedir=$MYSQLTEST_VARDIR/tmp/backup;
let $incremental_dir=$MYSQLTEST_VARDIR/tmp/backup_inc1;
CREATE TABLE t1(i INT PRIMARY KEY auto_increment, a int) ENGINE INNODB;
--source ../../suite/innodb/include/no_checkpoint_start.inc
ALTER TABLE t1 FORCE, ALGORITHM=INPLACE;
# Below mariabackup operation may complete successfully if checkpoint happens
# after the alter table command.
echo # Fails during full backup;
echo # No longer fails during full backup;
--disable_result_log
--error 0,1
exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$basedir;
--enable_result_log
DROP TABLE t1;
--let MYSQLD_DATADIR=$basedir/
perl;
open(OUT, ">$ENV{MYSQLTEST_VARDIR}/log/check.txt") || die;
print OUT '
--let no_checkpoint_end=1
--let CLEANUP_IF_CHECKPOINT=rmdir $basedir;
--source ../../suite/innodb/include/no_checkpoint_end.inc
--exit Backup failed to fail despite MLOG_INDEX_LOAD record
' if (-f "$ENV{MYSQLD_DATADIR}/xtrabackup_info");
close(OUT);
EOF
--source $MYSQLTEST_VARDIR/log/check.txt
--remove_file $MYSQLTEST_VARDIR/log/check.txt
rmdir $basedir;
CREATE TABLE t1(i INT PRIMARY KEY auto_increment, a int) ENGINE INNODB;
@ -50,29 +32,14 @@ INSERT INTO t1(a) select 1 union select 2 union select 3;
exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$basedir;
--enable_result_log
--source ../../suite/innodb/include/no_checkpoint_start.inc
ALTER TABLE t1 FORCE, ALGORITHM=INPLACE;
--disable_result_log
--error 0,1
exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$incremental_dir --incremental-basedir=$basedir;
--enable_result_log
DROP TABLE t1;
--let MYSQLD_DATADIR=$incremental_dir/
perl;
open(OUT, ">$ENV{MYSQLTEST_VARDIR}/log/check.txt") || die;
print OUT '
--let no_checkpoint_end=1
--let CLEANUP_IF_CHECKPOINT=rmdir $basedir;rmdir $incremental_dir;
--source ../../suite/innodb/include/no_checkpoint_end.inc
--exit Backup failed to fail despite MLOG_INDEX_LOAD record
' if (-f "$ENV{MYSQLD_DATADIR}/xtrabackup_info");
close(OUT);
EOF
--source $MYSQLTEST_VARDIR/log/check.txt
--remove_file $MYSQLTEST_VARDIR/log/check.txt
rmdir $basedir;rmdir $incremental_dir;
CREATE TABLE t1(i INT) ENGINE INNODB;

View File

@ -0,0 +1,16 @@
include/master-slave.inc
[connection master]
CREATE TABLE t1 (g POINT NOT NULL, SPATIAL INDEX(g));
INSERT INTO t1 VALUES (ST_GEOMFROMTEXT('Point(1 1)'));
INSERT INTO t1 VALUES (ST_GEOMFROMTEXT('Point(2 1)'));
INSERT INTO t1 VALUES (ST_GEOMFROMTEXT('Point(1 2)'));
INSERT INTO t1 VALUES (ST_GEOMFROMTEXT('Point(2 2)'));
DELETE FROM t1 where MBREqual(g, ST_GEOMFROMTEXT('Point(1 2)'));
connection slave;
select count(*) from t1;
count(*)
3
connection master;
DELETE FROM t1;
drop table t1;
include/rpl_end.inc

View File

@ -0,0 +1,17 @@
--source include/have_binlog_format_row.inc
--source include/master-slave.inc
CREATE TABLE t1 (g POINT NOT NULL, SPATIAL INDEX(g));
INSERT INTO t1 VALUES (ST_GEOMFROMTEXT('Point(1 1)'));
INSERT INTO t1 VALUES (ST_GEOMFROMTEXT('Point(2 1)'));
INSERT INTO t1 VALUES (ST_GEOMFROMTEXT('Point(1 2)'));
INSERT INTO t1 VALUES (ST_GEOMFROMTEXT('Point(2 2)'));
DELETE FROM t1 where MBREqual(g, ST_GEOMFROMTEXT('Point(1 2)'));
--sync_slave_with_master
select count(*) from t1;
--connection master
DELETE FROM t1;
drop table t1;
--source include/rpl_end.inc

View File

@ -880,6 +880,20 @@ NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT NULL
VARIABLE_NAME EQ_RANGE_INDEX_DIVE_LIMIT
SESSION_VALUE 0
GLOBAL_VALUE 0
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 0
VARIABLE_SCOPE SESSION
VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT The optimizer will use existing index statistics instead of doing index dives for equality ranges if the number of equality ranges for the index is larger than or equal to this number. If set to 0, index dives are always used.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME ERROR_COUNT
SESSION_VALUE 0
GLOBAL_VALUE NULL

View File

@ -894,6 +894,20 @@ NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT NULL
VARIABLE_NAME EQ_RANGE_INDEX_DIVE_LIMIT
SESSION_VALUE 0
GLOBAL_VALUE 0
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 0
VARIABLE_SCOPE SESSION
VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT The optimizer will use existing index statistics instead of doing index dives for equality ranges if the number of equality ranges for the index is larger than or equal to this number. If set to 0, index dives are always used.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME ERROR_COUNT
SESSION_VALUE 0
GLOBAL_VALUE NULL

View File

@ -89,3 +89,13 @@ pk left(c, 10) length(c) i
1 bar bar ba 60000 11
drop table t1;
disconnect c1;
CREATE TABLE t1 (b BLOB, vb TEXT AS (b) PERSISTENT, KEY(vb(64))) ENGINE=InnoDB;
INSERT INTO t1 (b) VALUES ('foo');
connect con1,localhost,root,,test;
CREATE TABLE t2 LIKE t1;
connection default;
DELETE FROM t1;
connection con1;
disconnect con1;
connection default;
DROP TABLE t1, t2;

View File

@ -79,3 +79,19 @@ commit;
select pk, left(c, 10), length(c), i from t1;
drop table t1;
disconnect c1;
#
# MDEV-16961 Assertion `!table || (!table->read_set || bitmap_is_set(table->read_set, field_index))' failed upon concurrent DELETE and DDL with virtual blob column
#
CREATE TABLE t1 (b BLOB, vb TEXT AS (b) PERSISTENT, KEY(vb(64))) ENGINE=InnoDB;
INSERT INTO t1 (b) VALUES ('foo');
--connect (con1,localhost,root,,test)
--send CREATE TABLE t2 LIKE t1
--connection default
DELETE FROM t1;
--connection con1
--reap
--disconnect con1
--connection default
DROP TABLE t1, t2;

View File

@ -41,6 +41,8 @@ cleanup_joiner()
kill -9 $RSYNC_REAL_PID >/dev/null 2>&1 || \
:
rm -rf "$RSYNC_CONF"
rm -f "$STUNNEL_CONF"
rm -f "$STUNNEL_PID"
rm -rf "$MAGIC_FILE"
rm -rf "$RSYNC_PID"
wsrep_log_info "Joiner cleanup done."
@ -68,7 +70,7 @@ check_pid_and_port()
local port_info="$(sockstat -46lp ${rsync_port} 2>/dev/null | \
grep ":${rsync_port}")"
local is_rsync="$(echo $port_info | \
grep '[[:space:]]\+rsync[[:space:]]\+'"$rsync_pid" 2>/dev/null)"
grep -E '[[:space:]]+(rsync|stunnel)[[:space:]]+'"$rsync_pid" 2>/dev/null)"
;;
*)
if ! command -v lsof > /dev/null; then
@ -79,7 +81,7 @@ check_pid_and_port()
local port_info="$(lsof -i :$rsync_port -Pn 2>/dev/null | \
grep "(LISTEN)")"
local is_rsync="$(echo $port_info | \
grep -w '^rsync[[:space:]]\+'"$rsync_pid" 2>/dev/null)"
grep -E '^(rsync|stunnel)[[:space:]]+'"$rsync_pid" 2>/dev/null)"
;;
esac
@ -120,6 +122,12 @@ is_local_ip()
$get_addr_bin | grep "$address" > /dev/null
}
STUNNEL_CONF="$WSREP_SST_OPT_DATA/stunnel.conf"
rm -f "$STUNNEL_CONF"
STUNNEL_PID="$WSREP_SST_OPT_DATA/stunnel.pid"
rm -f "$STUNNEL_PID"
MAGIC_FILE="$WSREP_SST_OPT_DATA/rsync_sst_complete"
rm -rf "$MAGIC_FILE"
@ -157,9 +165,28 @@ fi
FILTER="-f '- /lost+found' -f '- /.fseventsd' -f '- /.Trashes'
-f '+ /wsrep_sst_binlog.tar' -f '+ /ib_lru_dump' -f '+ /ibdata*' -f '+ /*/' -f '- /*'"
SSTKEY=$(parse_cnf sst tkey "")
SSTCERT=$(parse_cnf sst tcert "")
STUNNEL=""
if [ -f "$SSTKEY" ] && [ -f "$SSTCERT" ] && wsrep_check_programs stunnel
then
STUNNEL="stunnel ${STUNNEL_CONF}"
fi
if [ "$WSREP_SST_OPT_ROLE" = "donor" ]
then
cat << EOF > "$STUNNEL_CONF"
CApath = ${SSTCERT%/*}
foreground = yes
pid = $STUNNEL_PID
debug = warning
client = yes
connect = ${WSREP_SST_OPT_ADDR%/*}
TIMEOUTclose = 0
verifyPeer = yes
EOF
if [ $WSREP_SST_OPT_BYPASS -eq 0 ]
then
@ -221,7 +248,8 @@ then
# first, the normal directories, so that we can detect incompatible protocol
RC=0
eval rsync --owner --group --perms --links --specials \
eval rsync ${STUNNEL:+--rsh="$STUNNEL"} \
--owner --group --perms --links --specials \
--ignore-times --inplace --dirs --delete --quiet \
$WHOLE_FILE_OPT ${FILTER} "$WSREP_SST_OPT_DATA/" \
rsync://$WSREP_SST_OPT_ADDR >&2 || RC=$?
@ -244,7 +272,8 @@ then
fi
# second, we transfer InnoDB log files
rsync --owner --group --perms --links --specials \
rsync ${STUNNEL:+--rsh="$STUNNEL"} \
--owner --group --perms --links --specials \
--ignore-times --inplace --dirs --delete --quiet \
$WHOLE_FILE_OPT -f '+ /ib_logfile[0-9]*' -f '- **' "$WSREP_LOG_DIR/" \
rsync://$WSREP_SST_OPT_ADDR-log_dir >&2 || RC=$?
@ -264,7 +293,8 @@ then
find . -maxdepth 1 -mindepth 1 -type d -not -name "lost+found" \
-print0 | xargs -I{} -0 -P $count \
rsync --owner --group --perms --links --specials \
rsync ${STUNNEL:+--rsh="$STUNNEL"} \
--owner --group --perms --links --specials \
--ignore-times --inplace --recursive --delete --quiet \
$WHOLE_FILE_OPT --exclude '*/ib_logfile*' "$WSREP_SST_OPT_DATA"/{}/ \
rsync://$WSREP_SST_OPT_ADDR/{} >&2 || RC=$?
@ -287,7 +317,8 @@ then
echo "continue" # now server can resume updating data
echo "$STATE" > "$MAGIC_FILE"
rsync --archive --quiet --checksum "$MAGIC_FILE" rsync://$WSREP_SST_OPT_ADDR
rsync ${STUNNEL:+--rsh="$STUNNEL"} \
--archive --quiet --checksum "$MAGIC_FILE" rsync://$WSREP_SST_OPT_ADDR
echo "done $STATE"
@ -348,14 +379,37 @@ EOF
# If the IP is local listen only in it
if is_local_ip "$RSYNC_ADDR"
then
rsync --daemon --no-detach --address "$RSYNC_ADDR" --port "$RSYNC_PORT" --config "$RSYNC_CONF" &
RSYNC_EXTRA_ARGS="--address $RSYNC_ADDR"
STUNNEL_ACCEPT="$RSYNC_ADDR:$RSYNC_PORT"
else
# Not local, possibly a NAT, listen in all interface
rsync --daemon --no-detach --port "$RSYNC_PORT" --config "$RSYNC_CONF" &
# Not local, possibly a NAT, listen on all interfaces
RSYNC_EXTRA_ARGS=""
STUNNEL_ACCEPT="$RSYNC_PORT"
# Overwrite address with all
RSYNC_ADDR="*"
fi
RSYNC_REAL_PID=$!
if [ -z "$STUNNEL" ]
then
rsync --daemon --no-detach --port "$RSYNC_PORT" --config "$RSYNC_CONF" ${RSYNC_EXTRA_ARGS} &
RSYNC_REAL_PID=$!
else
cat << EOF > "$STUNNEL_CONF"
key = $SSTKEY
cert = $SSTCERT
foreground = yes
pid = $STUNNEL_PID
debug = warning
client = no
[rsync]
accept = $STUNNEL_ACCEPT
exec = $(which rsync)
execargs = rsync --server --daemon --config=$RSYNC_CONF .
EOF
stunnel "$STUNNEL_CONF" &
RSYNC_REAL_PID=$!
RSYNC_PID=$STUNNEL_PID
fi
until check_pid_and_port "$RSYNC_PID" "$RSYNC_REAL_PID" "$RSYNC_ADDR" "$RSYNC_PORT"
do

View File

@ -1494,6 +1494,17 @@ public:
/* Hash value */
virtual void hash(ulong *nr, ulong *nr2);
/**
Get the upper limit of the MySQL integral and floating-point type.
@return maximum allowed value for the field
*/
virtual ulonglong get_max_int_value() const
{
DBUG_ASSERT(false);
return 0ULL;
}
/**
Checks whether a string field is part of write_set.
@ -2115,6 +2126,11 @@ public:
*to= *from;
return from + 1;
}
virtual ulonglong get_max_int_value() const
{
return unsigned_flag ? 0xFFULL : 0x7FULL;
}
};
@ -2159,6 +2175,10 @@ public:
virtual const uchar *unpack(uchar* to, const uchar *from,
const uchar *from_end, uint param_data)
{ return unpack_int16(to, from, from_end); }
virtual ulonglong get_max_int_value() const
{
return unsigned_flag ? 0xFFFFULL : 0x7FFFULL;
}
};
class Field_medium :public Field_int
@ -2194,6 +2214,10 @@ public:
{
return Field::pack(to, from, max_length);
}
virtual ulonglong get_max_int_value() const
{
return unsigned_flag ? 0xFFFFFFULL : 0x7FFFFFULL;
}
};
@ -2243,6 +2267,10 @@ public:
{
return unpack_int32(to, from, from_end);
}
virtual ulonglong get_max_int_value() const
{
return unsigned_flag ? 0xFFFFFFFFULL : 0x7FFFFFFFULL;
}
};
@ -2295,6 +2323,10 @@ public:
{
return unpack_int64(to, from, from_end);
}
virtual ulonglong get_max_int_value() const
{
return unsigned_flag ? 0xFFFFFFFFFFFFFFFFULL : 0x7FFFFFFFFFFFFFFFULL;
}
void set_max();
bool is_max();
@ -2380,6 +2412,13 @@ public:
uint32 pack_length() const { return sizeof(float); }
uint row_pack_length() const { return pack_length(); }
void sql_type(String &str) const;
virtual ulonglong get_max_int_value() const
{
/*
We use the maximum as per IEEE754-2008 standard, 2^24
*/
return 0x1000000ULL;
}
private:
int save_field_metadata(uchar *first_byte);
};
@ -2433,6 +2472,13 @@ public:
uint32 pack_length() const { return sizeof(double); }
uint row_pack_length() const { return pack_length(); }
void sql_type(String &str) const;
virtual ulonglong get_max_int_value() const
{
/*
We use the maximum as per IEEE754-2008 standard, 2^53
*/
return 0x20000000000000ULL;
}
private:
int save_field_metadata(uchar *first_byte);
};

View File

@ -3047,9 +3047,15 @@ compute_next_insert_id(ulonglong nr,struct system_variables *variables)
nr= nr + 1; // optimization of the formula below
else
{
/*
Calculating the number of complete auto_increment_increment extents:
*/
nr= (((nr+ variables->auto_increment_increment -
variables->auto_increment_offset)) /
(ulonglong) variables->auto_increment_increment);
/*
Adding an offset to the auto_increment_increment extent boundary:
*/
nr= (nr* (ulonglong) variables->auto_increment_increment +
variables->auto_increment_offset);
}
@ -3073,6 +3079,51 @@ void handler::adjust_next_insert_id_after_explicit_value(ulonglong nr)
}
/** @brief
Computes the largest number X:
- smaller than or equal to "nr"
- of the form: auto_increment_offset + N * auto_increment_increment
where N>=0.
SYNOPSIS
prev_insert_id
nr Number to "round down"
variables variables struct containing auto_increment_increment and
auto_increment_offset
RETURN
The number X if it exists, "nr" otherwise.
*/
inline ulonglong
prev_insert_id(ulonglong nr, struct system_variables *variables)
{
if (unlikely(nr < variables->auto_increment_offset))
{
/*
There's nothing good we can do here. That is a pathological case, where
the offset is larger than the column's max possible value, i.e. not even
the first sequence value may be inserted. User will receive warning.
*/
DBUG_PRINT("info",("auto_increment: nr: %lu cannot honour "
"auto_increment_offset: %lu",
(ulong) nr, variables->auto_increment_offset));
return nr;
}
if (variables->auto_increment_increment == 1)
return nr; // optimization of the formula below
/*
Calculating the number of complete auto_increment_increment extents:
*/
nr= (((nr - variables->auto_increment_offset)) /
(ulonglong) variables->auto_increment_increment);
/*
Adding an offset to the auto_increment_increment extent boundary:
*/
return (nr * (ulonglong) variables->auto_increment_increment +
variables->auto_increment_offset);
}
/**
Update the auto_increment field if necessary.
@ -3309,10 +3360,23 @@ int handler::update_auto_increment()
if (unlikely(tmp)) // Out of range value in store
{
/*
It's better to return an error here than getting a confusing
'duplicate key error' later.
first test if the query was aborted due to strict mode constraints
*/
result= HA_ERR_AUTOINC_ERANGE;
if (thd->killed == KILL_BAD_DATA ||
nr > table->next_number_field->get_max_int_value())
DBUG_RETURN(HA_ERR_AUTOINC_ERANGE);
/*
field refused this value (overflow) and truncated it, use the result of
the truncation (which is going to be inserted); however we try to
decrease it to honour auto_increment_* variables.
That will shift the left bound of the reserved interval, we don't
bother shifting the right bound (anyway any other value from this
interval will cause a duplicate key).
*/
nr= prev_insert_id(table->next_number_field->val_int(), variables);
if (unlikely(table->next_number_field->store((longlong) nr, TRUE)))
nr= table->next_number_field->val_int();
}
if (append)
{

View File

@ -7794,7 +7794,7 @@ Item_direct_view_ref::derived_grouping_field_transformer_for_where(THD *thd,
void Item_field::print(String *str, enum_query_type query_type)
{
if (field && field->table->const_table &&
!(query_type & QT_NO_DATA_EXPANSION))
!(query_type & (QT_NO_DATA_EXPANSION | QT_VIEW_INTERNAL)))
{
print_value(str);
return;

View File

@ -4982,6 +4982,19 @@ Item *Item_cond::build_clone(THD *thd)
}
bool Item_cond::excl_dep_on_grouping_fields(st_select_lex *sel)
{
List_iterator_fast<Item> li(list);
Item *item;
while ((item= li++))
{
if (!item->excl_dep_on_grouping_fields(sel))
return false;
}
return true;
}
void Item_cond_and::mark_as_condition_AND_part(TABLE_LIST *embedding)
{
List_iterator<Item> li(list);
@ -5118,7 +5131,11 @@ longlong Item_func_isnull::val_int()
void Item_func_isnull::print(String *str, enum_query_type query_type)
{
args[0]->print_parenthesised(str, query_type, precedence());
if (const_item() && !args[0]->maybe_null &&
!(query_type & (QT_NO_DATA_EXPANSION | QT_VIEW_INTERNAL)))
str->append("/*always not null*/ 1");
else
args[0]->print_parenthesised(str, query_type, precedence());
str->append(STRING_WITH_LEN(" is null"));
}

View File

@ -2952,6 +2952,7 @@ public:
Item_transformer transformer, uchar *arg_t);
bool eval_not_null_tables(void *opt_arg);
Item *build_clone(THD *thd);
bool excl_dep_on_grouping_fields(st_select_lex *sel);
};
template <template<class> class LI, class T> class Item_equal_iterator;

View File

@ -1085,6 +1085,8 @@ public:
case Item_sum::DENSE_RANK_FUNC:
case Item_sum::PERCENT_RANK_FUNC:
case Item_sum::CUME_DIST_FUNC:
case Item_sum::LAG_FUNC:
case Item_sum::LEAD_FUNC:
case Item_sum::PERCENTILE_CONT_FUNC:
case Item_sum::PERCENTILE_DISC_FUNC:
return true;

View File

@ -146,7 +146,8 @@ void key_copy(uchar *to_key, const uchar *from_record, KEY *key_info,
{
key_length-= HA_KEY_BLOB_LENGTH;
length= MY_MIN(key_length, key_part->length);
uint bytes= key_part->field->get_key_image(to_key, length, Field::itRAW);
uint bytes= key_part->field->get_key_image(to_key, length,
key_info->flags & HA_SPATIAL ? Field::itMBR : Field::itRAW);
if (with_zerofill && bytes < length)
bzero((char*) to_key + bytes, length - bytes);
to_key+= HA_KEY_BLOB_LENGTH;

View File

@ -18,6 +18,7 @@
#include <my_bit.h>
#include "sql_select.h"
#include "key.h"
#include "sql_statistics.h"
/****************************************************************************
* Default MRR implementation (MRR to non-MRR converter)
@ -64,7 +65,12 @@ handler::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
ha_rows rows, total_rows= 0;
uint n_ranges=0;
THD *thd= table->in_use;
uint limit= thd->variables.eq_range_index_dive_limit;
bool use_statistics_for_eq_range= eq_ranges_exceeds_limit(seq,
seq_init_param,
limit);
/* Default MRR implementation doesn't need buffer */
*bufsz= 0;
@ -88,8 +94,15 @@ handler::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
min_endp= range.start_key.length? &range.start_key : NULL;
max_endp= range.end_key.length? &range.end_key : NULL;
}
int keyparts_used= my_count_bits(range.start_key.keypart_map);
if ((range.range_flag & UNIQUE_RANGE) && !(range.range_flag & NULL_RANGE))
rows= 1; /* there can be at most one row */
else if (use_statistics_for_eq_range &&
!(range.range_flag & NULL_RANGE) &&
(range.range_flag & EQ_RANGE) &&
table->key_info[keyno].actual_rec_per_key(keyparts_used - 1) > 0.5)
rows=
(ha_rows) table->key_info[keyno].actual_rec_per_key(keyparts_used - 1);
else
{
if (HA_POS_ERROR == (rows= this->records_in_range(keyno, min_endp,

View File

@ -1493,9 +1493,9 @@ static NTService Service; ///< Service object for WinNT
#endif /* __WIN__ */
#ifdef _WIN32
#include <sddl.h> /* ConvertStringSecurityDescriptorToSecurityDescriptor */
static char pipe_name[512];
static SECURITY_ATTRIBUTES saPipeSecurity;
static SECURITY_DESCRIPTOR sdPipeDescriptor;
static HANDLE hPipe = INVALID_HANDLE_VALUE;
#endif
@ -2750,21 +2750,20 @@ static void network_init(void)
strxnmov(pipe_name, sizeof(pipe_name)-1, "\\\\.\\pipe\\",
mysqld_unix_port, NullS);
bzero((char*) &saPipeSecurity, sizeof(saPipeSecurity));
bzero((char*) &sdPipeDescriptor, sizeof(sdPipeDescriptor));
if (!InitializeSecurityDescriptor(&sdPipeDescriptor,
SECURITY_DESCRIPTOR_REVISION))
/*
Create a security descriptor for pipe.
- Use low integrity level, so that it is possible to connect
from any process.
- Give Everyone read/write access to pipe.
*/
if (!ConvertStringSecurityDescriptorToSecurityDescriptor(
"S:(ML;; NW;;; LW) D:(A;; FRFW;;; WD)",
SDDL_REVISION_1, &saPipeSecurity.lpSecurityDescriptor, NULL))
{
sql_perror("Can't start server : Initialize security descriptor");
unireg_abort(1);
}
if (!SetSecurityDescriptorDacl(&sdPipeDescriptor, TRUE, NULL, FALSE))
{
sql_perror("Can't start server : Set security descriptor");
unireg_abort(1);
}
saPipeSecurity.nLength = sizeof(SECURITY_ATTRIBUTES);
saPipeSecurity.lpSecurityDescriptor = &sdPipeDescriptor;
saPipeSecurity.bInheritHandle = FALSE;
if ((hPipe= CreateNamedPipe(pipe_name,
PIPE_ACCESS_DUPLEX | FILE_FLAG_OVERLAPPED | FILE_FLAG_FIRST_PIPE_INSTANCE,
@ -4390,6 +4389,20 @@ static int init_common_variables()
DBUG_PRINT("info",("%s Ver %s for %s on %s\n",my_progname,
server_version, SYSTEM_TYPE,MACHINE_TYPE));
#ifdef WITH_WSREP
/*
We need to initialize auxiliary variables, that will be
further keep the original values of auto-increment options
as they set by the user. These variables used to restore
user-defined values of the auto-increment options after
setting of the wsrep_auto_increment_control to 'OFF'.
*/
global_system_variables.saved_auto_increment_increment=
global_system_variables.auto_increment_increment;
global_system_variables.saved_auto_increment_offset=
global_system_variables.auto_increment_offset;
#endif /* WITH_WSREP */
#ifdef HAVE_LINUX_LARGE_PAGES
/* Initialize large page size */
if (opt_large_pages)
@ -7007,6 +7020,7 @@ pthread_handler_t handle_connections_namedpipes(void *arg)
connect->host= my_localhost;
create_new_thread(connect);
}
LocalFree(saPipeSecurity.lpSecurityDescriptor);
CloseHandle(connectOverlapped.hEvent);
DBUG_LEAVE;
decrement_handler_count();

View File

@ -14645,6 +14645,32 @@ void QUICK_GROUP_MIN_MAX_SELECT::add_keys_and_lengths(String *key_names,
}
/* Check whether the number for equality ranges exceeds the set threshold */
bool eq_ranges_exceeds_limit(RANGE_SEQ_IF *seq, void *seq_init_param,
uint limit)
{
KEY_MULTI_RANGE range;
range_seq_t seq_it;
uint count = 0;
if (limit == 0)
{
/* 'Statistics instead of index dives' feature is turned off */
return false;
}
seq_it= seq->init(seq_init_param, 0, 0);
while (!seq->next(seq_it, &range))
{
if ((range.range_flag & EQ_RANGE) && !(range.range_flag & NULL_RANGE))
{
if (++count >= limit)
return true;
}
}
return false;
}
#ifndef DBUG_OFF
static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map,

View File

@ -242,7 +242,7 @@ public:
Number of children of this element in the RB-tree, plus 1 for this
element itself.
*/
uint16 elements;
uint32 elements;
/*
Valid only for elements which are RB-tree roots: Number of times this
RB-tree is referred to (it is referred by SEL_ARG::next_key_part or by
@ -1724,6 +1724,9 @@ SQL_SELECT *make_select(TABLE *head, table_map const_tables,
bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond);
bool eq_ranges_exceeds_limit(RANGE_SEQ_IF *seq, void *seq_init_param,
uint limit);
#ifdef WITH_PARTITION_STORAGE_ENGINE
bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond);
#endif

View File

@ -72,6 +72,7 @@ typedef struct st_sel_arg_range_seq
range_seq_t sel_arg_range_seq_init(void *init_param, uint n_ranges, uint flags)
{
SEL_ARG_RANGE_SEQ *seq= (SEL_ARG_RANGE_SEQ*)init_param;
seq->param->range_count=0;
seq->at_start= TRUE;
seq->stack[0].key_tree= NULL;
seq->stack[0].min_key= seq->param->min_key;
@ -272,25 +273,44 @@ walk_up_n_right:
key_info= NULL;
else
key_info= &seq->param->table->key_info[seq->real_keyno];
/*
Conditions below:
(1) - range analysis is used for estimating condition selectivity
(2) - This is a unique key, and we have conditions for all its
user-defined key parts.
(3) - The table uses extended keys, this key covers all components,
and we have conditions for all key parts.
This is an equality range (keypart_0=X and ... and keypart_n=Z) if
(1) - There are no flags indicating open range (e.g.,
"keypart_x > y") or GIS.
(2) - The lower bound and the upper bound of the range has the
same value (min_key == max_key).
*/
if (!(cur->min_key_flag & ~NULL_RANGE) && !cur->max_key_flag &&
(!key_info || // (1)
((uint)key_tree->part+1 == key_info->user_defined_key_parts && // (2)
key_info->flags & HA_NOSAME) || // (2)
((key_info->flags & HA_EXT_NOSAME) && // (3)
(uint)key_tree->part+1 == key_info->ext_key_parts) // (3)
) &&
range->start_key.length == range->end_key.length &&
!memcmp(seq->param->min_key,seq->param->max_key,range->start_key.length))
range->range_flag= UNIQUE_RANGE | (cur->min_key_flag & NULL_RANGE);
const uint is_open_range =
(NO_MIN_RANGE | NO_MAX_RANGE | NEAR_MIN | NEAR_MAX | GEOM_FLAG);
const bool is_eq_range_pred =
!(cur->min_key_flag & is_open_range) && // (1)
!(cur->max_key_flag & is_open_range) && // (1)
range->start_key.length == range->end_key.length && // (2)
!memcmp(seq->param->min_key, seq->param->max_key, // (2)
range->start_key.length);
if (is_eq_range_pred)
{
range->range_flag = EQ_RANGE;
/*
Conditions below:
(1) - Range analysis is used for estimating condition selectivity
(2) - This is a unique key, and we have conditions for all its
user-defined key parts.
(3) - The table uses extended keys, this key covers all components,
and we have conditions for all key parts.
*/
if (
!key_info || // (1)
((uint)key_tree->part+1 == key_info->user_defined_key_parts && // (2)
key_info->flags & HA_NOSAME) || // (2)
((key_info->flags & HA_EXT_NOSAME) && // (3)
(uint)key_tree->part+1 == key_info->ext_key_parts) // (3)
)
range->range_flag |= UNIQUE_RANGE | (cur->min_key_flag & NULL_RANGE);
}
if (seq->param->is_ror_scan)
{

View File

@ -711,7 +711,7 @@ uchar *net_store_data(uchar *to, const uchar *from, size_t length)
uchar *net_store_data(uchar *to,int32 from)
{
char buff[20];
char buff[22];
uint length=(uint) (int10_to_str(from,buff,10)-buff);
to=net_store_length_fast(to,length);
memcpy(to,buff,length);
@ -1137,7 +1137,7 @@ bool Protocol_text::store_tiny(longlong from)
DBUG_ASSERT(field_types == 0 || field_types[field_pos] == MYSQL_TYPE_TINY);
field_pos++;
#endif
char buff[20];
char buff[22];
return net_store_data((uchar*) buff,
(size_t) (int10_to_str((int) from, buff, -10) - buff));
}
@ -1151,7 +1151,7 @@ bool Protocol_text::store_short(longlong from)
field_types[field_pos] == MYSQL_TYPE_SHORT);
field_pos++;
#endif
char buff[20];
char buff[22];
return net_store_data((uchar*) buff,
(size_t) (int10_to_str((int) from, buff, -10) -
buff));
@ -1166,7 +1166,7 @@ bool Protocol_text::store_long(longlong from)
field_types[field_pos] == MYSQL_TYPE_LONG);
field_pos++;
#endif
char buff[20];
char buff[22];
return net_store_data((uchar*) buff,
(size_t) (int10_to_str((long int)from, buff,
(from <0)?-10:10)-buff));

View File

@ -581,6 +581,18 @@ typedef struct system_variables
ha_rows max_join_size;
ha_rows expensive_subquery_limit;
ulong auto_increment_increment, auto_increment_offset;
#ifdef WITH_WSREP
/*
Variables with stored values of the auto_increment_increment
and auto_increment_offset options that are will be needed when
wsrep_auto_increment_control will be set to 'OFF', because the
setting it to 'ON' leads to overwriting of the original values
(which are set by the user) by calculated values (which are
based on the cluster's size):
*/
ulong saved_auto_increment_increment, saved_auto_increment_offset;
#endif /* WITH_WSREP */
uint eq_range_index_dive_limit;
ulong column_compression_zlib_strategy;
ulong lock_wait_timeout;
ulong join_cache_level;

View File

@ -1682,8 +1682,7 @@ JOIN::optimize_inner()
DBUG_RETURN(1);
}
if (thd->lex->sql_command == SQLCOM_SELECT &&
optimizer_flag(thd, OPTIMIZER_SWITCH_COND_PUSHDOWN_FOR_DERIVED))
if (optimizer_flag(thd, OPTIMIZER_SWITCH_COND_PUSHDOWN_FOR_DERIVED))
{
TABLE_LIST *tbl;
List_iterator_fast<TABLE_LIST> li(select_lex->leaf_tables);
@ -1880,6 +1879,14 @@ JOIN::optimize_inner()
error= 1;
DBUG_RETURN(1);
}
if (!group_list)
{
/* The output has only one row */
order=0;
simple_order=1;
group_optimized_away= 1;
select_distinct=0;
}
}
/* Calculate how to do the join */
@ -6522,7 +6529,7 @@ add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab)
Item_field *cur_item;
key_map possible_keys(0);
if (join->group_list || join->simple_group)
if (join->group_list)
{ /* Collect all query fields referenced in the GROUP clause. */
for (cur_group= join->group_list; cur_group; cur_group= cur_group->next)
(*cur_group->item)->walk(&Item::collect_item_field_processor, 0,

View File

@ -2610,7 +2610,7 @@ static int show_create_view(THD *thd, TABLE_LIST *table, String *buff)
We can't just use table->query, because our SQL_MODE may trigger
a different syntax, like when ANSI_QUOTES is defined.
*/
table->view->unit.print(buff, enum_query_type(QT_ORDINARY |
table->view->unit.print(buff, enum_query_type(QT_VIEW_INTERNAL |
QT_ITEM_ORIGINAL_FUNC_NULLIF));
if (table->with_check != VIEW_CHECK_NONE)

View File

@ -21,7 +21,7 @@ enum enum_use_stat_tables_mode
{
NEVER,
COMPLEMENTARY,
PEFERABLY,
PREFERABLY,
} Use_stat_tables_mode;
typedef

View File

@ -264,7 +264,7 @@ bool get_date_from_daynr(long daynr,uint *ret_year,uint *ret_month,
ulong convert_period_to_month(ulong period)
{
ulong a,b;
if (period == 0)
if (period == 0 || period > 999912)
return 0L;
if ((a=period/100) < YY_PART_YEAR)
a+=2000;

View File

@ -349,13 +349,56 @@ static Sys_var_long Sys_pfs_connect_attrs_size(
#endif /* WITH_PERFSCHEMA_STORAGE_ENGINE */
#ifdef WITH_WSREP
/*
We need to keep the original values set by the user, as they will
be lost if wsrep_auto_increment_control set to 'ON':
*/
static bool update_auto_increment_increment (sys_var *self, THD *thd, enum_var_type type)
{
if (type == OPT_GLOBAL)
global_system_variables.saved_auto_increment_increment=
global_system_variables.auto_increment_increment;
else
thd->variables.saved_auto_increment_increment=
thd->variables.auto_increment_increment;
return false;
}
#endif /* WITH_WSREP */
static Sys_var_ulong Sys_auto_increment_increment(
"auto_increment_increment",
"Auto-increment columns are incremented by this",
SESSION_VAR(auto_increment_increment),
CMD_LINE(OPT_ARG),
VALID_RANGE(1, 65535), DEFAULT(1), BLOCK_SIZE(1),
#ifdef WITH_WSREP
NO_MUTEX_GUARD, IN_BINLOG, ON_CHECK(0),
ON_UPDATE(update_auto_increment_increment));
#else
NO_MUTEX_GUARD, IN_BINLOG);
#endif /* WITH_WSREP */
#ifdef WITH_WSREP
/*
We need to keep the original values set by the user, as they will
be lost if wsrep_auto_increment_control set to 'ON':
*/
static bool update_auto_increment_offset (sys_var *self, THD *thd, enum_var_type type)
{
if (type == OPT_GLOBAL)
global_system_variables.saved_auto_increment_offset=
global_system_variables.auto_increment_offset;
else
thd->variables.saved_auto_increment_offset=
thd->variables.auto_increment_offset;
return false;
}
#endif /* WITH_WSREP */
static Sys_var_ulong Sys_auto_increment_offset(
"auto_increment_offset",
@ -364,7 +407,12 @@ static Sys_var_ulong Sys_auto_increment_offset(
SESSION_VAR(auto_increment_offset),
CMD_LINE(OPT_ARG),
VALID_RANGE(1, 65535), DEFAULT(1), BLOCK_SIZE(1),
#ifdef WITH_WSREP
NO_MUTEX_GUARD, IN_BINLOG, ON_CHECK(0),
ON_UPDATE(update_auto_increment_offset));
#else
NO_MUTEX_GUARD, IN_BINLOG);
#endif /* WITH_WSREP */
static Sys_var_mybool Sys_automatic_sp_privileges(
"automatic_sp_privileges",
@ -2671,6 +2719,16 @@ static Sys_var_ulong Sys_div_precincrement(
SESSION_VAR(div_precincrement), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, DECIMAL_MAX_SCALE), DEFAULT(4), BLOCK_SIZE(1));
static Sys_var_uint Sys_eq_range_index_dive_limit(
"eq_range_index_dive_limit",
"The optimizer will use existing index statistics instead of "
"doing index dives for equality ranges if the number of equality "
"ranges for the index is larger than or equal to this number. "
"If set to 0, index dives are always used.",
SESSION_VAR(eq_range_index_dive_limit), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, UINT_MAX32), DEFAULT(0),
BLOCK_SIZE(1));
static Sys_var_ulong Sys_range_alloc_block_size(
"range_alloc_block_size",
"Allocation block size for storing ranges during optimization",
@ -5325,11 +5383,54 @@ static Sys_var_ulong Sys_wsrep_retry_autocommit(
SESSION_VAR(wsrep_retry_autocommit), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, 10000), DEFAULT(1), BLOCK_SIZE(1));
static bool update_wsrep_auto_increment_control (sys_var *self, THD *thd, enum_var_type type)
{
if (wsrep_auto_increment_control)
{
/*
The variables that control auto increment shall be calculated
automaticaly based on the size of the cluster. This usually done
within the wsrep_view_handler_cb callback. However, if the user
manually sets the value of wsrep_auto_increment_control to 'ON',
then we should to re-calculate these variables again (because
these values may be required before wsrep_view_handler_cb will
be re-invoked, which is rarely invoked if the cluster stays in
the stable state):
*/
global_system_variables.auto_increment_increment=
wsrep_cluster_size ? wsrep_cluster_size : 1;
global_system_variables.auto_increment_offset=
wsrep_local_index >= 0 ? wsrep_local_index + 1 : 1;
thd->variables.auto_increment_increment=
global_system_variables.auto_increment_increment;
thd->variables.auto_increment_offset=
global_system_variables.auto_increment_offset;
}
else
{
/*
We must restore the last values of the variables that
are explicitly specified by the user:
*/
global_system_variables.auto_increment_increment=
global_system_variables.saved_auto_increment_increment;
global_system_variables.auto_increment_offset=
global_system_variables.saved_auto_increment_offset;
thd->variables.auto_increment_increment=
thd->variables.saved_auto_increment_increment;
thd->variables.auto_increment_offset=
thd->variables.saved_auto_increment_offset;
}
return false;
}
static Sys_var_mybool Sys_wsrep_auto_increment_control(
"wsrep_auto_increment_control", "To automatically control the "
"assignment of autoincrement variables",
GLOBAL_VAR(wsrep_auto_increment_control),
CMD_LINE(OPT_ARG), DEFAULT(TRUE));
CMD_LINE(OPT_ARG), DEFAULT(TRUE),
NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0),
ON_UPDATE(update_wsrep_auto_increment_control));
static Sys_var_mybool Sys_wsrep_drupal_282555_workaround(
"wsrep_drupal_282555_workaround", "Enable a workaround to handle the "

View File

@ -6900,7 +6900,10 @@ void TABLE::mark_columns_used_by_virtual_fields(void)
for (uint i= 0 ; i < s->fields ; i++)
{
if (bitmap_is_set(&tmp_set, i))
{
s->field[i]->flags|= PART_INDIRECT_KEY_FLAG;
field[i]->flags|= PART_INDIRECT_KEY_FLAG;
}
}
bitmap_clear_all(&tmp_set);
}

View File

@ -160,6 +160,10 @@ extern "C" query_id_t wsrep_thd_query_id(THD *thd);
extern "C" query_id_t wsrep_thd_wsrep_last_query_id(THD *thd);
extern "C" void wsrep_thd_set_wsrep_last_query_id(THD *thd, query_id_t id);
extern "C" void wsrep_thd_auto_increment_variables(THD*,
unsigned long long *offset,
unsigned long long *increment);
extern void wsrep_close_client_connections(my_bool wait_to_end);
extern int wsrep_wait_committing_connections_close(int wait_time);
extern void wsrep_close_applier(THD *thd);

View File

@ -676,3 +676,25 @@ bool wsrep_thd_has_explicit_locks(THD *thd)
assert(thd);
return thd->mdl_context.has_explicit_locks();
}
/*
Get auto increment variables for THD. Use global settings for
applier threads.
*/
extern "C"
void wsrep_thd_auto_increment_variables(THD* thd,
unsigned long long* offset,
unsigned long long* increment)
{
if (thd->wsrep_exec_mode == REPL_RECV &&
thd->wsrep_conflict_state != REPLAYING)
{
*offset= global_system_variables.auto_increment_offset;
*increment= global_system_variables.auto_increment_increment;
}
else
{
*offset= thd->variables.auto_increment_offset;
*increment= thd->variables.auto_increment_increment;
}
}

View File

@ -619,6 +619,12 @@ bool wsrep_desync_check (sys_var *self, THD* thd, set_var* var)
return true;
}
if (thd->global_read_lock.is_acquired())
{
my_message (ER_CANNOT_USER, "Global read lock acquired. Can't set 'wsrep_desync'", MYF(0));
return true;
}
bool new_wsrep_desync= (bool) var->save_result.ulonglong_value;
if (wsrep_desync == new_wsrep_desync) {
if (new_wsrep_desync) {

View File

@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2011, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, MariaDB Corporation.
Copyright (c) 2017, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@ -391,7 +391,7 @@ buf_dump(
/* leave tmp_filename to exist */
return;
}
if ( (j % 1024) == 0) {
if (SHUTTING_DOWN() && !(j % 1024)) {
service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL,
"Dumping buffer pool "
ULINTPF "/" ULINTPF ", "

View File

@ -6110,7 +6110,7 @@ dict_table_get_index_on_name(
while (index != NULL) {
if (index->is_committed() == committed
&& innobase_strcasecmp(index->name, name) == 0) {
&& strcmp(index->name, name) == 0) {
return(index);
}

View File

@ -169,8 +169,6 @@ static int innobase_wsrep_set_checkpoint(handlerton* hton, const XID* xid);
static int innobase_wsrep_get_checkpoint(handlerton* hton, XID* xid);
#endif /* WITH_WSREP */
/** to protect innobase_open_files */
static mysql_mutex_t innobase_share_mutex;
/** to force correct commit order in binlog */
static ulong commit_threads = 0;
static mysql_cond_t commit_cond;
@ -461,8 +459,6 @@ operation, we only do it every INNOBASE_WAKE_INTERVAL'th step. */
#define INNOBASE_WAKE_INTERVAL 32
static ulong innobase_active_counter = 0;
static hash_table_t* innobase_open_tables;
/** Allowed values of innodb_change_buffering */
static const char* innodb_change_buffering_names[] = {
"none", /* IBUF_USE_NONE */
@ -571,7 +567,6 @@ const struct _ft_vft_ext ft_vft_ext_result = {innobase_fts_get_version,
/* Keys to register pthread mutexes/cond in the current file with
performance schema */
static mysql_pfs_key_t innobase_share_mutex_key;
static mysql_pfs_key_t commit_cond_mutex_key;
static mysql_pfs_key_t commit_cond_key;
static mysql_pfs_key_t pending_checkpoint_mutex_key;
@ -580,7 +575,6 @@ static mysql_pfs_key_t thd_destructor_thread_key;
static PSI_mutex_info all_pthread_mutexes[] = {
PSI_KEY(commit_cond_mutex),
PSI_KEY(pending_checkpoint_mutex),
PSI_KEY(innobase_share_mutex)
};
static PSI_cond_info all_innodb_conds[] = {
@ -1197,23 +1191,6 @@ static SHOW_VAR innodb_status_variables[]= {
{NullS, NullS, SHOW_LONG}
};
/************************************************************************//**
Handling the shared INNOBASE_SHARE structure that is needed to provide table
locking. Register the table name if it doesn't exist in the hash table. */
static
INNOBASE_SHARE*
get_share(
/*======*/
const char* table_name); /*!< in: table to lookup */
/************************************************************************//**
Free the shared object that was registered with get_share(). */
static
void
free_share(
/*=======*/
INNOBASE_SHARE* share); /*!< in/own: share to free */
/*****************************************************************//**
Frees a possible InnoDB trx object associated with the current THD.
@return 0 or error number */
@ -4291,10 +4268,6 @@ static int innodb_init(void* p)
ibuf_max_size_update(srv_change_buffer_max_size);
innobase_open_tables = hash_create(200);
mysql_mutex_init(innobase_share_mutex_key,
&innobase_share_mutex,
MY_MUTEX_INIT_FAST);
mysql_mutex_init(commit_cond_mutex_key,
&commit_cond_m, MY_MUTEX_INIT_FAST);
mysql_cond_init(commit_cond_key, &commit_cond, 0);
@ -4363,9 +4336,6 @@ innobase_end(handlerton*, ha_panic_function)
}
}
hash_table_free(innobase_open_tables);
innobase_open_tables = NULL;
st_my_thread_var* running = reinterpret_cast<st_my_thread_var*>(
my_atomic_loadptr_explicit(
reinterpret_cast<void**>(&srv_running),
@ -4383,7 +4353,6 @@ innobase_end(handlerton*, ha_panic_function)
innodb_shutdown();
innobase_space_shutdown();
mysql_mutex_destroy(&innobase_share_mutex);
mysql_mutex_destroy(&commit_cond_m);
mysql_cond_destroy(&commit_cond);
mysql_mutex_destroy(&pending_checkpoint_mutex);
@ -5619,6 +5588,7 @@ is consistent between KEY info from mysql and that from innodb index.
@param[in] key_info Index info from mysql
@param[in] index_info Index info from InnoDB
@return true if all column types match. */
static
bool
innobase_match_index_columns(
const KEY* key_info,
@ -5908,90 +5878,33 @@ innobase_build_v_templ(
s_templ->tb_name = table->s->table_name.str;
}
/*******************************************************************//**
This function builds a translation table in INNOBASE_SHARE
structure for fast index location with mysql array number from its
table->key_info structure. This also provides the necessary translation
between the key order in mysql key_info and InnoDB ib_table->indexes if
they are not fully matched with each other.
Note we do not have any mutex protecting the translation table
building based on the assumption that there is no concurrent
index creation/drop and DMLs that requires index lookup. All table
handle will be closed before the index creation/drop.
@return true if index translation table built successfully */
static
bool
innobase_build_index_translation(
/*=============================*/
const TABLE* table, /*!< in: table in MySQL data
dictionary */
dict_table_t* ib_table,/*!< in: table in InnoDB data
dictionary */
INNOBASE_SHARE* share) /*!< in/out: share structure
where index translation table
will be constructed in. */
/** Check consistency between .frm indexes and InnoDB indexes.
@param[in] table table object formed from .frm
@param[in] ib_table InnoDB table definition
@retval true if not errors were found */
static bool
check_index_consistency(const TABLE* table, const dict_table_t* ib_table)
{
DBUG_ENTER("innobase_build_index_translation");
bool ret = true;
mutex_enter(&dict_sys->mutex);
ulint mysql_num_index = table->s->keys;
ulint ib_num_index = UT_LIST_GET_LEN(ib_table->indexes);
dict_index_t** index_mapping = share->idx_trans_tbl.index_mapping;
ulint mysql_num_index = table->s->keys;
ulint ib_num_index = UT_LIST_GET_LEN(ib_table->indexes);
bool ret = true;
/* If there exists inconsistency between MySQL and InnoDB dictionary
(metadata) information, the number of index defined in MySQL
could exceed that in InnoDB, do not build index translation
table in such case */
could exceed that in InnoDB, return error */
if (ib_num_index < mysql_num_index) {
ret = false;
goto func_exit;
}
/* If index entry count is non-zero, nothing has
changed since last update, directly return TRUE */
if (share->idx_trans_tbl.index_count) {
/* Index entry count should still match mysql_num_index */
ut_a(share->idx_trans_tbl.index_count == mysql_num_index);
goto func_exit;
}
/* The number of index increased, rebuild the mapping table */
if (mysql_num_index > share->idx_trans_tbl.array_size) {
index_mapping = reinterpret_cast<dict_index_t**>(
ut_realloc(index_mapping,
mysql_num_index * sizeof(*index_mapping)));
if (index_mapping == NULL) {
/* Report an error if index_mapping continues to be
NULL and mysql_num_index is a non-zero value */
sql_print_error("InnoDB: fail to allocate memory for "
"index translation table. Number of "
"Index: " ULINTPF
", array size:" ULINTPF,
mysql_num_index,
share->idx_trans_tbl.array_size);
ret = false;
goto func_exit;
}
share->idx_trans_tbl.array_size = mysql_num_index;
}
/* For each index in the mysql key_info array, fetch its
corresponding InnoDB index pointer into index_mapping
array. */
for (ulint count = 0; count < mysql_num_index; count++) {
/* Fetch index pointers into index_mapping according to mysql
index sequence */
index_mapping[count] = dict_table_get_index_on_name(
const dict_index_t* index = dict_table_get_index_on_name(
ib_table, table->key_info[count].name.str);
if (index_mapping[count] == 0) {
if (index == NULL) {
sql_print_error("Cannot find index %s in InnoDB"
" index dictionary.",
table->key_info[count].name.str);
@ -6002,7 +5915,7 @@ innobase_build_index_translation(
/* Double check fetched index has the same
column info as those in mysql key_info. */
if (!innobase_match_index_columns(&table->key_info[count],
index_mapping[count])) {
index)) {
sql_print_error("Found index %s whose column info"
" does not match that of MariaDB.",
table->key_info[count].name.str);
@ -6011,51 +5924,10 @@ innobase_build_index_translation(
}
}
/* Successfully built the translation table */
share->idx_trans_tbl.index_count = mysql_num_index;
func_exit:
if (!ret) {
/* Build translation table failed. */
ut_free(index_mapping);
share->idx_trans_tbl.array_size = 0;
share->idx_trans_tbl.index_count = 0;
index_mapping = NULL;
}
share->idx_trans_tbl.index_mapping = index_mapping;
mutex_exit(&dict_sys->mutex);
DBUG_RETURN(ret);
return ret;
}
/*******************************************************************//**
This function uses index translation table to quickly locate the
requested index structure.
Note we do not have mutex protection for the index translatoin table
access, it is based on the assumption that there is no concurrent
translation table rebuild (fter create/drop index) and DMLs that
require index lookup.
@return dict_index_t structure for requested index. NULL if
fail to locate the index structure. */
static
dict_index_t*
innobase_index_lookup(
/*==================*/
INNOBASE_SHARE* share, /*!< in: share structure for index
translation table. */
uint keynr) /*!< in: index number for the requested
index */
{
if (share->idx_trans_tbl.index_mapping == NULL
|| keynr >= share->idx_trans_tbl.index_count) {
return(NULL);
}
return(share->idx_trans_tbl.index_mapping[keynr]);
}
/********************************************************************//**
Get the upper limit of the MySQL integral and floating-point type.
@return maximum allowed value for the field */
@ -6185,11 +6057,6 @@ ha_innobase::open(const char* name, int, uint)
m_user_thd = NULL;
if (!(m_share = get_share(name))) {
DBUG_RETURN(1);
}
/* Will be allocated if it is needed in ::update_row() */
m_upd_buf = NULL;
m_upd_buf_size = 0;
@ -6213,7 +6080,6 @@ ha_innobase::open(const char* name, int, uint)
norm_name);
}
no_such_table:
free_share(m_share);
set_my_errno(ENOENT);
DBUG_RETURN(HA_ERR_NO_SUCH_TABLE);
@ -6271,7 +6137,6 @@ no_such_table:
}
if (!thd_tablespace_op(thd)) {
free_share(m_share);
set_my_errno(ENOENT);
int ret_err = HA_ERR_NO_SUCH_TABLE;
@ -6328,9 +6193,10 @@ no_such_table:
mutex_exit(&dict_sys->mutex);
}
if (!innobase_build_index_translation(table, ib_table, m_share)) {
sql_print_error("Build InnoDB index translation table for"
" Table %s failed", name);
if (!check_index_consistency(table, ib_table)) {
sql_print_error("InnoDB indexes are inconsistent with what "
"defined in .frm for table %s",
name);
}
/* Allocate a buffer for a 'row reference'. A row reference is
@ -6426,9 +6292,6 @@ no_such_table:
/* Index block size in InnoDB: used by MySQL in query optimization */
stats.block_size = srv_page_size;
/* Init table lock structure */
thr_lock_data_init(&m_share->lock, &lock, NULL);
if (m_prebuilt->table == NULL
|| m_prebuilt->table->is_temporary()
|| m_prebuilt->table->persistent_autoinc
@ -6621,8 +6484,6 @@ ha_innobase::close()
m_upd_buf_size = 0;
}
free_share(m_share);
MONITOR_INC(MONITOR_TABLE_CLOSE);
/* Tell InnoDB server that there might be work for
@ -8186,7 +8047,7 @@ ha_innobase::write_row(
/* We need the upper limit of the col type to check for
whether we update the table autoinc counter or not. */
col_max_value = innobase_get_int_col_max_value(table->next_number_field);
col_max_value = table->next_number_field->get_max_int_value();
/* Get the value that MySQL attempted to store in the table.*/
auto_inc = table->next_number_field->val_uint();
@ -8261,14 +8122,30 @@ set_max_autoinc:
/* This should filter out the negative
values set explicitly by the user. */
if (auto_inc <= col_max_value) {
ut_a(m_prebuilt->autoinc_increment > 0);
ulonglong offset;
ulonglong increment;
dberr_t err;
offset = m_prebuilt->autoinc_offset;
increment = m_prebuilt->autoinc_increment;
#ifdef WITH_WSREP
/* Applier threads which are
processing ROW events and don't go
through server level autoinc
processing, therefore m_prebuilt
autoinc values don't get
properly assigned. Fetch values from
server side. */
if (wsrep_on(m_user_thd) &&
wsrep_thd_exec_mode(m_user_thd) == REPL_RECV) {
wsrep_thd_auto_increment_variables(m_user_thd, &offset, &increment);
} else {
#endif /* WITH_WSREP */
ut_a(m_prebuilt->autoinc_increment > 0);
offset = m_prebuilt->autoinc_offset;
increment = m_prebuilt->autoinc_increment;
#ifdef WITH_WSREP
}
#endif /* WITH_WSREP */
auto_inc = innobase_next_autoinc(
auto_inc,
@ -8966,12 +8843,27 @@ ha_innobase::update_row(
/* A value for an AUTO_INCREMENT column
was specified in the UPDATE statement. */
ulonglong offset, increment;
#ifdef WITH_WSREP
/* Applier threads which are processing ROW events and
don't go through server level autoinc processing,
therefore m_prebuilt autoinc values don't get properly
assigned. Fetch values from server side. */
if (wsrep_on(m_user_thd)
&& wsrep_thd_exec_mode(m_user_thd) == REPL_RECV) {
wsrep_thd_auto_increment_variables(
m_user_thd, &offset, &increment);
} else {
#endif /* WITH_WSREP */
offset = m_prebuilt->autoinc_offset;
increment = m_prebuilt->autoinc_increment;
#ifdef WITH_WSREP
}
#endif /* WITH_WSREP */
autoinc = innobase_next_autoinc(
autoinc, 1,
m_prebuilt->autoinc_increment,
m_prebuilt->autoinc_offset,
innobase_get_int_col_max_value(
table->found_next_number_field));
autoinc, 1, increment, offset,
table->found_next_number_field->get_max_int_value());
error = innobase_set_max_autoinc(autoinc);
@ -9489,60 +9381,18 @@ ha_innobase::innobase_get_index(
clustered index, even if it was internally
generated by InnoDB */
{
KEY* key;
KEY* key = NULL;
dict_table_t* ib_table = m_prebuilt->table;
dict_index_t* index;
DBUG_ENTER("innobase_get_index");
if (keynr != MAX_KEY && table->s->keys > 0) {
key = table->key_info + keynr;
index = innobase_index_lookup(m_share, keynr);
if (index != NULL) {
if (!key || ut_strcmp(index->name, key->name.str) != 0) {
ib::error() << " Index for key no " << keynr
<< " mysql name " << (key ? key->name.str : "NULL")
<< " InnoDB name " << index->name()
<< " for table " << m_prebuilt->table->name.m_name;
for(uint i=0; i < table->s->keys; i++) {
index = innobase_index_lookup(m_share, i);
key = table->key_info + keynr;
if (index) {
ib::info() << " Index for key no " << keynr
<< " mysql name " << (key ? key->name.str : "NULL")
<< " InnoDB name " << index->name()
<< " for table " << m_prebuilt->table->name.m_name;
}
}
}
ut_a(ut_strcmp(index->name, key->name.str) == 0);
} else {
/* Can't find index with keynr in the translation
table. Only print message if the index translation
table exists */
if (m_share->idx_trans_tbl.index_mapping != NULL) {
sql_print_warning("InnoDB could not find"
" index %s key no %u for"
" table %s through its"
" index translation table",
key ? key->name.str : "NULL",
keynr,
m_prebuilt->table->name
.m_name);
}
index = dict_table_get_index_on_name(
m_prebuilt->table, key->name.str);
}
key = &table->key_info[keynr];
index = dict_table_get_index_on_name(ib_table, key->name.str);
ut_ad(index);
} else {
key = 0;
index = dict_table_get_first_index(m_prebuilt->table);
index = dict_table_get_first_index(ib_table);
}
if (index == NULL) {
@ -9550,7 +9400,7 @@ ha_innobase::innobase_get_index(
"InnoDB could not find key no %u with name %s"
" from dict cache for table %s",
keynr, key ? key->name.str : "NULL",
m_prebuilt->table->name.m_name);
ib_table->name.m_name);
}
DBUG_RETURN(index);
@ -13703,20 +13553,12 @@ innodb_set_buf_pool_size(ulonglong buf_pool_size)
}
/*********************************************************************//**
Calculates the key number used inside MySQL for an Innobase index. We will
first check the "index translation table" for a match of the index to get
the index number. If there does not exist an "index translation table",
or not able to find the index in the translation table, then we will fall back
to the traditional way of looping through dict_index_t list to find a
match. In this case, we have to take into account if we generated a
default clustered index for the table
Calculates the key number used inside MySQL for an Innobase index.
@return the key number used inside MySQL */
static
unsigned
innobase_get_mysql_key_number_for_index(
/*====================================*/
INNOBASE_SHARE* share, /*!< in: share structure for index
translation table. */
const TABLE* table, /*!< in: table in MySQL data
dictionary */
dict_table_t* ib_table,/*!< in: table in InnoDB data
@ -13746,27 +13588,8 @@ innobase_get_mysql_key_number_for_index(
return(i);
}
/* If index translation table exists, we will first check
the index through index translation table for a match. */
if (share->idx_trans_tbl.index_mapping != NULL) {
for (i = 0; i < share->idx_trans_tbl.index_count; i++) {
if (share->idx_trans_tbl.index_mapping[i] == index) {
return(i);
}
}
/* Print an error message if we cannot find the index
in the "index translation table". */
if (index->is_committed()) {
sql_print_error("Cannot find index %s in InnoDB index"
" translation table.", index->name());
}
}
/* If we do not have an "index translation table", or not able
to find the index in the translation table, we'll directly find
matching index with information from mysql TABLE structure and
InnoDB dict_index_t list */
/* Directly find matching index with information from mysql TABLE
structure and InnoDB dict_index_t list */
for (i = 0; i < table->s->keys; i++) {
ind = dict_table_get_index_on_name(
ib_table, table->key_info[i].name.str);
@ -14123,11 +13946,6 @@ ha_innobase::info_low(
for (i = 0; i < table->s->keys; i++) {
ulong j;
/* We could get index quickly through internal
index mapping with the index translation table.
The identity of index (match up index name with
that of table->key_info[i]) is already verified in
innobase_get_index(). */
dict_index_t* index = innobase_get_index(i);
if (index == NULL) {
@ -14235,7 +14053,7 @@ ha_innobase::info_low(
if (err_index) {
errkey = innobase_get_mysql_key_number_for_index(
m_share, table, ib_table, err_index);
table, ib_table, err_index);
} else {
errkey = (unsigned int) (
(m_prebuilt->trx->error_key_num
@ -16240,104 +16058,6 @@ innobase_show_status(
/* Success */
return(false);
}
/************************************************************************//**
Handling the shared INNOBASE_SHARE structure that is needed to provide table
locking. Register the table name if it doesn't exist in the hash table. */
static
INNOBASE_SHARE*
get_share(
/*======*/
const char* table_name)
{
INNOBASE_SHARE* share;
mysql_mutex_lock(&innobase_share_mutex);
ulint fold = ut_fold_string(table_name);
HASH_SEARCH(table_name_hash, innobase_open_tables, fold,
INNOBASE_SHARE*, share,
ut_ad(share->use_count > 0),
!strcmp(share->table_name, table_name));
if (share == NULL) {
uint length = (uint) strlen(table_name);
/* TODO: invoke HASH_MIGRATE if innobase_open_tables
grows too big */
share = reinterpret_cast<INNOBASE_SHARE*>(
my_malloc(//PSI_INSTRUMENT_ME,
sizeof(*share) + length + 1,
MYF(MY_FAE | MY_ZEROFILL)));
share->table_name = reinterpret_cast<char*>(
memcpy(share + 1, table_name, length + 1));
HASH_INSERT(INNOBASE_SHARE, table_name_hash,
innobase_open_tables, fold, share);
thr_lock_init(&share->lock);
/* Index translation table initialization */
share->idx_trans_tbl.index_mapping = NULL;
share->idx_trans_tbl.index_count = 0;
share->idx_trans_tbl.array_size = 0;
}
++share->use_count;
mysql_mutex_unlock(&innobase_share_mutex);
return(share);
}
/************************************************************************//**
Free the shared object that was registered with get_share(). */
static
void
free_share(
/*=======*/
INNOBASE_SHARE* share) /*!< in/own: table share to free */
{
mysql_mutex_lock(&innobase_share_mutex);
#ifdef UNIV_DEBUG
INNOBASE_SHARE* share2;
ulint fold = ut_fold_string(share->table_name);
HASH_SEARCH(table_name_hash, innobase_open_tables, fold,
INNOBASE_SHARE*, share2,
ut_ad(share->use_count > 0),
!strcmp(share->table_name, share2->table_name));
ut_a(share2 == share);
#endif /* UNIV_DEBUG */
--share->use_count;
if (share->use_count == 0) {
ulint fold = ut_fold_string(share->table_name);
HASH_DELETE(INNOBASE_SHARE, table_name_hash,
innobase_open_tables, fold, share);
thr_lock_delete(&share->lock);
/* Free any memory from index translation table */
ut_free(share->idx_trans_tbl.index_mapping);
my_free(share);
/* TODO: invoke HASH_MIGRATE if innobase_open_tables
shrinks too much */
}
mysql_mutex_unlock(&innobase_share_mutex);
}
/*********************************************************************//**
Returns number of THR_LOCK locks used for one instance of InnoDB table.
InnoDB no longer relies on THR_LOCK locks so 0 value is returned.
@ -16789,16 +16509,16 @@ ha_innobase::get_auto_increment(
"THD: %ld, current: %llu, autoinc: %llu",
m_prebuilt->autoinc_increment,
increment,
thd_get_thread_id(ha_thd()),
thd_get_thread_id(m_user_thd),
current, autoinc);
if (!wsrep_on(ha_thd())) {
current = autoinc - m_prebuilt->autoinc_increment;
if (!wsrep_on(m_user_thd)) {
current = innobase_next_autoinc(
autoinc
- m_prebuilt->autoinc_increment,
1, increment, offset, col_max_value);
}
current = innobase_next_autoinc(
current, 1, increment, offset, col_max_value);
dict_table_autoinc_initialize(
m_prebuilt->table, current);
@ -20722,6 +20442,7 @@ static TABLE* innodb_acquire_mdl(THD* thd, dict_table_t* table)
if (!table_name_parse(table->name, db_buf, tbl_buf,
db_buf_len, tbl_buf_len)) {
table->release();
return NULL;
}
@ -20804,6 +20525,7 @@ static TABLE* innodb_find_table_for_vc(THD* thd, dict_table_t* table)
if (!table_name_parse(table->name, db_buf, tbl_buf,
db_buf_len, tbl_buf_len)) {
ut_ad(!"invalid table name");
return NULL;
}

View File

@ -23,35 +23,6 @@ this program; if not, write to the Free Software Foundation, Inc.,
system clustered index when there is no primary key. */
extern const char innobase_index_reserve_name[];
/* Structure defines translation table between mysql index and InnoDB
index structures */
struct innodb_idx_translate_t {
ulint index_count; /*!< number of valid index entries
in the index_mapping array */
ulint array_size; /*!< array size of index_mapping */
dict_index_t** index_mapping; /*!< index pointer array directly
maps to index in InnoDB from MySQL
array index */
};
/** InnoDB table share */
typedef struct st_innobase_share {
THR_LOCK lock;
const char* table_name; /*!< InnoDB table name */
uint use_count; /*!< reference count,
incremented in get_share()
and decremented in
free_share() */
void* table_name_hash;
/*!< hash table chain node */
innodb_idx_translate_t
idx_trans_tbl; /*!< index translation table between
MySQL and InnoDB */
} INNOBASE_SHARE;
/** Prebuilt structures in an InnoDB table handle used within MySQL */
struct row_prebuilt_t;
@ -496,9 +467,6 @@ protected:
THR_LOCK_DATA lock;
/** information for MySQL table locking */
INNOBASE_SHARE* m_share;
/** buffer used in updates */
uchar* m_upd_buf;
@ -635,17 +603,6 @@ trx_t*
innobase_trx_allocate(
MYSQL_THD thd); /*!< in: user thread handle */
/** Match index columns between MySQL and InnoDB.
This function checks whether the index column information
is consistent between KEY info from mysql and that from innodb index.
@param[in] key_info Index info from mysql
@param[in] index_info Index info from InnoDB
@return true if all column types match. */
bool
innobase_match_index_columns(
const KEY* key_info,
const dict_index_t* index_info);
/*********************************************************************//**
This function checks each index name for a table against reserved
system default primary index name 'GEN_CLUST_INDEX'. If a name

View File

@ -9775,12 +9775,6 @@ foreign_fail:
log_append_on_checkpoint(NULL);
/* Invalidate the index translation table. In partitioned
tables, there is no share. */
if (m_share) {
m_share->idx_trans_tbl.index_count = 0;
}
/* Tell the InnoDB server that there might be work for
utility threads: */

View File

@ -139,10 +139,15 @@ bool recv_parse_log_recs(lsn_t checkpoint_lsn, store_t store, bool apply);
/** Moves the parsing buffer data left to the buffer start. */
void recv_sys_justify_left_parsing_buf();
/** Report optimized DDL operation (without redo log), corresponding to MLOG_INDEX_LOAD.
/** Report optimized DDL operation (without redo log),
corresponding to MLOG_INDEX_LOAD.
@param[in] space_id tablespace identifier
*/
extern void(*log_optimized_ddl_op)(ulint space_id);
extern void (*log_optimized_ddl_op)(ulint space_id);
/** Report backup-unfriendly TRUNCATE operation (with separate log file),
corresponding to MLOG_TRUNCATE. */
extern void (*log_truncate)();
/** Report an operation to create, delete, or rename a file during backup.
@param[in] space_id tablespace identifier

View File

@ -3528,7 +3528,7 @@ lock_table_create(
UT_LIST_ADD_LAST(trx->lock.trx_locks, lock);
#ifdef WITH_WSREP
if (c_lock) {
if (c_lock && wsrep_on_trx(trx)) {
if (wsrep_thd_is_BF(trx->mysql_thd, FALSE)) {
ut_list_insert(table->locks, c_lock, lock,
TableLockGetNode());
@ -3758,7 +3758,7 @@ lock_table_enqueue_waiting(
}
#ifdef WITH_WSREP
if (trx->lock.was_chosen_as_deadlock_victim) {
if (trx->lock.was_chosen_as_deadlock_victim && wsrep_on_trx(trx)) {
return(DB_DEADLOCK);
}
#endif /* WITH_WSREP */

View File

@ -959,12 +959,6 @@ log_write_up_to(
return;
}
if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL,
"log write up to: " LSN_PF,
lsn);
}
loop:
ut_ad(++loop_count < 128);
@ -1092,6 +1086,13 @@ loop:
}
}
if (UNIV_UNLIKELY(srv_shutdown_state != SRV_SHUTDOWN_NONE)) {
service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL,
"InnoDB log write: "
LSN_PF "," LSN_PF,
log_sys.write_lsn, lsn);
}
if (log_sys.is_encrypted()) {
log_crypt(write_buf + area_start, log_sys.write_lsn,
area_end - area_start);

View File

@ -169,11 +169,16 @@ typedef std::map<
static recv_spaces_t recv_spaces;
/** Report optimized DDL operation (without redo log), corresponding to MLOG_INDEX_LOAD.
/** Report optimized DDL operation (without redo log),
corresponding to MLOG_INDEX_LOAD.
@param[in] space_id tablespace identifier
*/
void (*log_optimized_ddl_op)(ulint space_id);
/** Report backup-unfriendly TRUNCATE operation (with separate log file),
corresponding to MLOG_TRUNCATE. */
void (*log_truncate)();
/** Report an operation to create, delete, or rename a file during backup.
@param[in] space_id tablespace identifier
@param[in] flags tablespace flags (NULL if not create)
@ -189,11 +194,9 @@ void (*log_file_op)(ulint space_id, const byte* flags,
@param[in,out] name file name
@param[in] len length of the file name
@param[in] space_id the tablespace ID
@param[in] deleted whether this is a MLOG_FILE_DELETE record
@retval true if able to process file successfully.
@retval false if unable to process the file */
@param[in] deleted whether this is a MLOG_FILE_DELETE record */
static
bool
void
fil_name_process(
char* name,
ulint len,
@ -201,15 +204,13 @@ fil_name_process(
bool deleted)
{
if (srv_operation == SRV_OPERATION_BACKUP) {
return true;
return;
}
ut_ad(srv_operation == SRV_OPERATION_NORMAL
|| srv_operation == SRV_OPERATION_RESTORE
|| srv_operation == SRV_OPERATION_RESTORE_EXPORT);
bool processed = true;
/* We will also insert space=NULL into the map, so that
further checks can ensure that a MLOG_FILE_NAME record was
scanned before applying any page records for the space_id. */
@ -256,7 +257,6 @@ fil_name_process(
<< f.name << "' and '" << name << "'."
" You must delete one of them.";
recv_sys->found_corrupt_fs = true;
processed = false;
}
break;
@ -309,7 +309,6 @@ fil_name_process(
" remove the .ibd file, you can set"
" --innodb_force_recovery.";
recv_sys->found_corrupt_fs = true;
processed = false;
break;
}
@ -320,7 +319,6 @@ fil_name_process(
break;
}
}
return(processed);
}
/** Parse or process a MLOG_FILE_* record.
@ -1093,6 +1091,12 @@ recv_parse_or_apply_log_rec_body(
}
return(ptr + 8);
case MLOG_TRUNCATE:
if (log_truncate) {
ut_ad(srv_operation != SRV_OPERATION_NORMAL);
log_truncate();
recv_sys->found_corrupt_fs = true;
return NULL;
}
return(truncate_t::parse_redo_entry(ptr, end_ptr, space_id));
default:

View File

@ -846,7 +846,8 @@ page_cur_insert_rec_write_log(
ulint i;
if (index->table->is_temporary()) {
ut_ad(!mlog_open(mtr, 0));
mtr->set_modified();
ut_ad(mtr->get_log_mode() == MTR_LOG_NO_REDO);
return;
}

View File

@ -3915,7 +3915,7 @@ row_import_for_mysql(
DBUG_EXECUTE_IF("ib_import_reset_space_and_lsn_failure",
err = DB_TOO_MANY_CONCURRENT_TRXS;);
#ifdef BTR_CUR_HASH_ADAPT
/* On DISCARD TABLESPACE, we did not drop any adaptive hash
index entries. If we replaced the discarded tablespace with a
smaller one here, there could still be some adaptive hash
@ -3932,6 +3932,7 @@ row_import_for_mysql(
break;
}
}
#endif /* BTR_CUR_HASH_ADAPT */
if (err != DB_SUCCESS) {
char table_name[MAX_FULL_NAME_LEN + 1];

View File

@ -3934,17 +3934,6 @@ row_merge_drop_indexes(
ut_ad(prev);
ut_a(table->fts);
fts_drop_index(table, index, trx);
/* Since
INNOBASE_SHARE::idx_trans_tbl
is shared between all open
ha_innobase handles to this
table, no thread should be
accessing this dict_index_t
object. Also, we should be
holding LOCK=SHARED MDL on the
table even after the MDL
upgrade timeout. */
/* We can remove a DICT_FTS
index from the cache, because
we do not allow ADD FULLTEXT INDEX

View File

@ -3536,6 +3536,7 @@ row_drop_table_for_mysql(
if (!table->no_rollback()) {
if (table->space != fil_system.sys_space) {
#ifdef BTR_CUR_HASH_ADAPT
/* On DISCARD TABLESPACE, we would not drop the
adaptive hash index entries. If the tablespace is
missing here, delete-marking the record in SYS_INDEXES
@ -3557,6 +3558,7 @@ row_drop_table_for_mysql(
goto funct_exit;
}
}
#endif /* BTR_CUR_HASH_ADAPT */
/* Delete the link file if used. */
if (DICT_TF_HAS_DATA_DIR(table->flags)) {

View File

@ -452,6 +452,7 @@ row_vers_build_clust_v_col(
byte* record= 0;
ut_ad(dict_index_has_virtual(index));
ut_ad(index->table == clust_index->table);
if (vcol_info != NULL) {
vcol_info->set_used();

View File

@ -2417,7 +2417,22 @@ static bool srv_purge_should_exit()
return(true);
}
/* Slow shutdown was requested. */
return !trx_sys.any_active_transactions() && !trx_sys.history_size();
if (ulint history_size = trx_sys.history_size()) {
#if defined HAVE_SYSTEMD && !defined EMBEDDED_LIBRARY
static ib_time_t progress_time;
ib_time_t time = ut_time();
if (time - progress_time >= 15) {
progress_time = time;
service_manager_extend_timeout(
INNODB_EXTEND_TIMEOUT_INTERVAL,
"InnoDB: to purge " ULINTPF " transactions",
history_size);
}
#endif
return false;
}
return !trx_sys.any_active_transactions();
}
/*********************************************************************//**
@ -2583,14 +2598,6 @@ srv_do_purge(ulint* n_total_purged)
(++count % rseg_truncate_frequency) == 0);
*n_total_purged += n_pages_purged;
if (n_pages_purged) {
service_manager_extend_timeout(
INNODB_EXTEND_TIMEOUT_INTERVAL,
"InnoDB " ULINTPF " pages purged", n_pages_purged);
/* The previous round still did some work. */
continue;
}
} while (n_pages_purged > 0 && !purge_sys.paused()
&& !srv_purge_should_exit());

View File

@ -258,7 +258,12 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr)
trx_sys.get_max_trx_id(), mtr);
}
/* Before any transaction-generating background threads or the
/* After the purge thread has been given permission to exit,
we may roll back transactions (trx->undo_no==0)
in THD::cleanup() invoked from unlink_thd() in fast shutdown,
or in trx_rollback_resurrected() in slow shutdown.
Before any transaction-generating background threads or the
purge have been started, recv_recovery_rollback_active() can
start transactions in row_merge_drop_temp_indexes() and
fts_drop_orphaned_tables(), and roll back recovered transactions.
@ -268,17 +273,15 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr)
innodb_force_recovery=2 or innodb_force_recovery=3.
DROP TABLE may be executed at any innodb_force_recovery level.
After the purge thread has been given permission to exit,
in fast shutdown, we may roll back transactions (trx->undo_no==0)
in THD::cleanup() invoked from unlink_thd(), and we may also
continue to execute user transactions. */
During fast shutdown, we may also continue to execute
user transactions. */
ut_ad(srv_undo_sources
|| trx->undo_no == 0
|| (!purge_sys.enabled()
&& (srv_startup_is_before_trx_rollback_phase
|| trx_rollback_is_active
|| srv_force_recovery >= SRV_FORCE_NO_BACKGROUND))
|| ((trx->undo_no == 0 || trx->mysql_thd
|| trx->internal)
|| ((trx->mysql_thd || trx->internal)
&& srv_fast_shutdown));
#ifdef WITH_WSREP