Merge 10.2 into 10.3

This commit is contained in:
Marko Mäkelä 2021-04-13 10:26:01 +03:00
commit 6e6318b29b
40 changed files with 566 additions and 131 deletions

View File

@ -111,6 +111,15 @@ then
sed '/Package: mariadb-plugin-cassandra/,/^$/d' -i debian/control
fi
# From Debian Stretch/Ubuntu Bionic onwards dh-systemd is just an empty
# transitional metapackage and the functionality was merged into debhelper.
# In Ubuntu Hirsute is was completely removed, so it can't be referenced anymore.
# Keep using it only on Debian Jessie and Ubuntu Xenial.
if apt-cache madison dh-systemd | grep 'dh-systemd' >/dev/null 2>&1
then
sed 's/debhelper (>= 9.20160709~),/debhelper (>= 9), dh-systemd,/' -i debian/control
fi
# Mroonga, TokuDB never built on Travis CI anyway, see build flags above
if [[ $TRAVIS ]]
then

2
debian/control vendored
View File

@ -5,7 +5,7 @@ Maintainer: MariaDB Developers <maria-developers@lists.launchpad.net>
Build-Depends: bison,
chrpath,
cmake (>= 2.7),
debhelper (>= 9),
debhelper (>= 9.20160709~),
dh-apparmor,
dh-exec,
dh-systemd,

View File

@ -44,6 +44,7 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA
#include <mysql.h>
#include <mysqld.h>
#include <my_sys.h>
#include <stdlib.h>
#include <string.h>
#include <limits>
#include "common.h"
@ -108,6 +109,13 @@ xb_mysql_connect()
return(NULL);
}
#if !defined(DONT_USE_MYSQL_PWD)
if (!opt_password)
{
opt_password=getenv("MYSQL_PWD");
}
#endif
if (!opt_secure_auth) {
mysql_options(connection, MYSQL_SECURE_AUTH,
(char *) &opt_secure_auth);

View File

@ -41,7 +41,6 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA
#include <my_global.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <mysql.h>
#include <my_dir.h>
@ -905,12 +904,6 @@ ibx_init()
opt_user = opt_ibx_user;
opt_password = opt_ibx_password;
#if !defined(DONT_USE_MYSQL_PWD)
if (!opt_password)
{
opt_password=getenv("MYSQL_PWD");
}
#endif
opt_host = opt_ibx_host;
opt_defaults_group = opt_ibx_defaults_group;
opt_socket = opt_ibx_socket;

View File

@ -1809,33 +1809,33 @@ static int prepare_export()
// Process defaults-file , it can have some --lc-language stuff,
// which is* unfortunately* still necessary to get mysqld up
if (strncmp(orig_argv1,"--defaults-file=",16) == 0)
if (strncmp(orig_argv1,"--defaults-file=", 16) == 0)
{
snprintf(cmdline, sizeof cmdline,
IF_WIN("\"","") "\"%s\" --mysqld \"%s\" "
IF_WIN("\"","") "\"%s\" --mysqld \"%s\""
" --defaults-extra-file=./backup-my.cnf --defaults-group-suffix=%s --datadir=."
" --innodb --innodb-fast-shutdown=0 --loose-partition"
" --innodb_purge_rseg_truncate_frequency=1 --innodb-buffer-pool-size=%llu"
" --console --skip-log-error --skip-log-bin --bootstrap %s < "
" --console --skip-log-error --skip-log-bin --bootstrap %s< "
BOOTSTRAP_FILENAME IF_WIN("\"",""),
mariabackup_exe,
orig_argv1, (my_defaults_group_suffix?my_defaults_group_suffix:""),
xtrabackup_use_memory,
(srv_force_recovery ? "--innodb-force-recovery=1" : ""));
(srv_force_recovery ? "--innodb-force-recovery=1 " : ""));
}
else
{
snprintf(cmdline, sizeof cmdline,
IF_WIN("\"","") "\"%s\" --mysqld"
IF_WIN("\"","") "\"%s\" --mysqld"
" --defaults-file=./backup-my.cnf --defaults-group-suffix=%s --datadir=."
" --innodb --innodb-fast-shutdown=0 --loose-partition"
" --innodb_purge_rseg_truncate_frequency=1 --innodb-buffer-pool-size=%llu"
" --console --log-error= --skip-log-bin --bootstrap %s < "
" --console --log-error= --skip-log-bin --bootstrap %s< "
BOOTSTRAP_FILENAME IF_WIN("\"",""),
mariabackup_exe,
(my_defaults_group_suffix?my_defaults_group_suffix:""),
xtrabackup_use_memory,
(srv_force_recovery ? "--innodb-force-recovery=1" : ""));
(srv_force_recovery ? "--innodb-force-recovery=1 " : ""));
}
msg("Prepare export : executing %s\n", cmdline);
@ -6559,6 +6559,8 @@ int main(int argc, char **argv)
{
char **client_defaults, **server_defaults;
my_getopt_prefix_matching= 0;
if (get_exepath(mariabackup_exe,FN_REFLEN, argv[0]))
strncpy(mariabackup_exe,argv[0], FN_REFLEN-1);

View File

@ -497,23 +497,21 @@ sub mtr_report_stats ($$$$) {
$test_time = sprintf("%.3f", $test->{timer} / 1000);
$test->{'name'} =~ s/$current_suite\.//;
my $test_result;
# if a test case has to be retried it should have the result MTR_RES_FAILED in jUnit XML
if ($test->{'retries'} > 0) {
$test_result = "MTR_RES_FAILED";
my $combinations;
if (defined($test->{combinations})){
$combinations = join ',', sort @{$test->{combinations}};
} else {
$test_result = $test->{'result'};
$combinations = "";
}
$xml_report .= qq(\t\t<testcase assertions="" classname="$current_suite" name="$test->{'name'}" status="$test_result" time="$test_time");
$xml_report .= qq(\t\t<testcase assertions="" classname="$current_suite" name="$test->{'name'}" ).
qq(status="$test->{'result'}" time="$test_time" combinations="$combinations");
my $comment = $test->{'comment'};
$comment =~ s/[\"]//g;
my $comment= replace_special_symbols($test->{'comment'});
# if a test case has to be retried it should have the result MTR_RES_FAILED in jUnit XML
if ($test->{'result'} eq "MTR_RES_FAILED" || $test->{'retries'} > 0) {
if ($test->{'result'} eq "MTR_RES_FAILED") {
my $logcontents = $test->{'logfile-failed'} || $test->{'logfile'};
$logcontents= $logcontents.$test->{'warnings'}."\n";
# remove any double ] that would end the cdata
$logcontents =~ s/]]/\x{fffd}/g;
# replace wide characters that aren't allowed in XML 1.0
@ -576,6 +574,16 @@ sub mtr_print_line () {
print '-' x 74 . "\n";
}
sub replace_special_symbols($) {
my $text= shift;
$text =~ s/&/&#38;/g;
$text =~ s/'/&#39;/g;
$text =~ s/"/&#34;/g;
$text =~ s/</&lt;/g;
$text =~ s/>/&gt;/g;
return $text;
}
sub mtr_print_thick_line {
my $char= shift || '=';

View File

@ -540,4 +540,31 @@ id select_type table type possible_keys key key_len ref rows Extra
set join_cache_level=default;
set optimizer_switch= @save_optimizer_switch;
DROP TABLE t1,t2;
set @save_optimizer_switch= @@optimizer_switch;
set optimizer_switch="derived_merge=on";
CREATE TABLE t1 (id int, d2 datetime, id1 int) ;
insert into t1 values (1,'2020-01-01 10:10:10',1),(2,'2020-01-01 10:10:10',2),(3,'2020-01-01 10:10:10',3);
CREATE TABLE t2 (id int, d1 datetime, id1 int) ;
insert into t2 values (1,'2020-01-01 10:10:10',1),(2,'2020-01-01 10:10:10',2),(3,'2020-01-01 10:10:10',2);
prepare stmt from "
SELECT * from
(SELECT min(d2) AS d2, min(d1) AS d1 FROM
(SELECT t1.d2 AS d2, (SELECT t2.d1
FROM t2 WHERE t1.id1 = t2.id1
ORDER BY t2.id DESC LIMIT 1) AS d1
FROM t1
) dt2
) ca
ORDER BY ca.d2;";
execute stmt;
d2 d1
2020-01-01 10:10:10 2020-01-01 10:10:10
execute stmt;
d2 d1
2020-01-01 10:10:10 2020-01-01 10:10:10
set optimizer_switch= @save_optimizer_switch;
DROP TABLE t1, t2;
#
# End of 10.3 tests
#
set optimizer_switch=@exit_optimizer_switch;

View File

@ -406,5 +406,38 @@ set optimizer_switch= @save_optimizer_switch;
DROP TABLE t1,t2;
#
# MDEV-25182: Complex query in Store procedure corrupts results
#
set @save_optimizer_switch= @@optimizer_switch;
set optimizer_switch="derived_merge=on";
CREATE TABLE t1 (id int, d2 datetime, id1 int) ;
insert into t1 values (1,'2020-01-01 10:10:10',1),(2,'2020-01-01 10:10:10',2),(3,'2020-01-01 10:10:10',3);
CREATE TABLE t2 (id int, d1 datetime, id1 int) ;
insert into t2 values (1,'2020-01-01 10:10:10',1),(2,'2020-01-01 10:10:10',2),(3,'2020-01-01 10:10:10',2);
prepare stmt from "
SELECT * from
(SELECT min(d2) AS d2, min(d1) AS d1 FROM
(SELECT t1.d2 AS d2, (SELECT t2.d1
FROM t2 WHERE t1.id1 = t2.id1
ORDER BY t2.id DESC LIMIT 1) AS d1
FROM t1
) dt2
) ca
ORDER BY ca.d2;";
execute stmt;
execute stmt;
set optimizer_switch= @save_optimizer_switch;
DROP TABLE t1, t2;
--echo #
--echo # End of 10.3 tests
--echo #
# The following command must be the last one the file
set optimizer_switch=@exit_optimizer_switch;

View File

@ -26,10 +26,10 @@ create procedure sp() select * from `v1
flush tables;
use test;
exec $MYSQL_DUMP --compact --comment --routines --add-drop-database --databases 'mysqltest1
exec $MYSQL_DUMP --compact --comments --routines --add-drop-database --databases 'mysqltest1
1tsetlqsym';
exec $MYSQL_DUMP --compact --comment --routines --add-drop-database --databases 'mysqltest1
exec $MYSQL_DUMP --compact --comments --routines --add-drop-database --databases 'mysqltest1
1tsetlqsym' | $MYSQL;
show tables from `mysqltest1
@ -45,11 +45,11 @@ create database `test\``
show databases like 'test%';
exec $MYSQL_DUMP --compact --comment --add-drop-database --databases 'test`' 'test\`
exec $MYSQL_DUMP --compact --comments --add-drop-database --databases 'test`' 'test\`
\! ls
#';
exec $MYSQL_DUMP --compact --comment --add-drop-database --databases 'test`' 'test\`
exec $MYSQL_DUMP --compact --comments --add-drop-database --databases 'test`' 'test\`
\! ls
#' | $MYSQL;

View File

@ -21,7 +21,7 @@ select 7 as expected, /*!01000 1 + /*!01000 8 + /*!01000 error */ 16 + */ 2 + */
select 4 as expected, /* 1 + /*!01000 8 + */ 2 + */ 4;
EOF
--exec $MYSQL --comment --force --table test <$MYSQLTEST_VARDIR/tmp/bug39559.sql
--exec $MYSQL --comments --force --table test <$MYSQLTEST_VARDIR/tmp/bug39559.sql
--remove_file $MYSQLTEST_VARDIR/tmp/bug39559.sql
--echo # Bug#46527 "COMMIT AND CHAIN RELEASE does not make sense"

View File

@ -5496,5 +5496,42 @@ id select_type table type possible_keys key key_len ref rows Extra
DEALLOCATE PREPARE stmt;
DROP TABLE t1;
#
# MDEV-25108: Running of the EXPLAIN EXTENDED statement produces extra warning
# in case it is executed in PS (prepared statement) mode
#
CREATE TABLE t1 (c int);
CREATE TABLE t2 (d int);
# EXPLAIN EXTENDED in regular way (not PS mode)
EXPLAIN EXTENDED SELECT (SELECT 1 FROM t2 WHERE d = c) FROM t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 system NULL NULL NULL NULL 0 0.00 Const row not found
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
Warnings:
Note 1276 Field or reference 'test.t1.c' of SELECT #2 was resolved in SELECT #1
Note 1003 /* select#1 */ select (/* select#2 */ select 1 from `test`.`t2` where 0) AS `(SELECT 1 FROM t2 WHERE d = c)` from `test`.`t1`
SHOW WARNINGS;
Level Code Message
Note 1276 Field or reference 'test.t1.c' of SELECT #2 was resolved in SELECT #1
Note 1003 /* select#1 */ select (/* select#2 */ select 1 from `test`.`t2` where 0) AS `(SELECT 1 FROM t2 WHERE d = c)` from `test`.`t1`
# Now run the same EXPLAIN EXTENDED in PS mode. Number of warnings
# and their content must be the same as in case running the statement
# in regular way
PREPARE stmt FROM "EXPLAIN EXTENDED SELECT (SELECT 1 FROM t2 WHERE d = c) FROM t1";
Warnings:
Note 1276 Field or reference 'test.t1.c' of SELECT #2 was resolved in SELECT #1
EXECUTE stmt;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 system NULL NULL NULL NULL 0 0.00 Const row not found
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
Warnings:
Note 1276 Field or reference 'test.t1.c' of SELECT #2 was resolved in SELECT #1
Note 1003 /* select#1 */ select (/* select#2 */ select 1 from `test`.`t2` where 0) AS `(SELECT 1 FROM t2 WHERE d = c)` from `test`.`t1`
SHOW WARNINGS;
Level Code Message
Note 1276 Field or reference 'test.t1.c' of SELECT #2 was resolved in SELECT #1
Note 1003 /* select#1 */ select (/* select#2 */ select 1 from `test`.`t2` where 0) AS `(SELECT 1 FROM t2 WHERE d = c)` from `test`.`t1`
DEALLOCATE PREPARE stmt;
DROP TABLE t1, t2;
#
# End of 10.2 tests
#

View File

@ -4963,6 +4963,26 @@ EXECUTE stmt;
DEALLOCATE PREPARE stmt;
DROP TABLE t1;
--echo #
--echo # MDEV-25108: Running of the EXPLAIN EXTENDED statement produces extra warning
--echo # in case it is executed in PS (prepared statement) mode
--echo #
CREATE TABLE t1 (c int);
CREATE TABLE t2 (d int);
--echo # EXPLAIN EXTENDED in regular way (not PS mode)
EXPLAIN EXTENDED SELECT (SELECT 1 FROM t2 WHERE d = c) FROM t1;
SHOW WARNINGS;
--echo # Now run the same EXPLAIN EXTENDED in PS mode. Number of warnings
--echo # and their content must be the same as in case running the statement
--echo # in regular way
PREPARE stmt FROM "EXPLAIN EXTENDED SELECT (SELECT 1 FROM t2 WHERE d = c) FROM t1";
EXECUTE stmt;
SHOW WARNINGS;
DEALLOCATE PREPARE stmt;
DROP TABLE t1, t2;
--echo #
--echo # End of 10.2 tests
--echo #

View File

@ -894,9 +894,13 @@ sub run_test_server ($$$) {
rename $log_file_name, $log_file_name.".failed";
}
delete($result->{result});
$result->{retries}= $retries+1;
$result->write_test($sock, 'TESTCASE');
{
local @$result{'retries', 'result'};
delete $result->{result};
$result->{retries}= $retries+1;
$result->write_test($sock, 'TESTCASE');
}
push(@$completed, $result);
next;
}
}

View File

@ -14,14 +14,12 @@ MW-328A : MDEV-22666 galera.MW-328A MTR failed: "Semaphore wait has lasted > 600
MW-328B : MDEV-22666 galera.MW-328A MTR failed: "Semaphore wait has lasted > 600 seconds" and do not release port 16002
MW-329 : MDEV-19962 Galera test failure on MW-329
galera_as_slave_replication_bundle : MDEV-15785 OPTION_GTID_BEGIN is set in Gtid_log_event::do_apply_event()
galera_binlog_stmt_autoinc : MDEV-19959 Galera test failure on galera_binlog_stmt_autoinc
galera_concurrent_ctas : MDEV-24842 Galera test failure on galera_concurrent_ctas
galera_gcache_recover_manytrx : MDEV-18834 Galera test failure
galera_mdl_race : MDEV-21524: galera.galera_mdl_race MTR failed: query 'reap' succeeded - should have failed with errno 1213
galera_parallel_simple : MDEV-20318 galera.galera_parallel_simple fails
galera_partition : MDEV-21806: galera.galera_partition MTR failed: failed to recover from DONOR state
galera_shutdown_nonprim : MDEV-21493 galera.galera_shutdown_nonprim
galera_sst_mariabackup_encrypt_with_key : MDEV-21484 galera_sst_mariabackup_encrypt_with_key
galera_var_node_address : MDEV-20485 Galera test failure
galera_wan : MDEV-17259 Test failure on galera.galera_wan
partition : MDEV-19958 Galera test failure on galera.partition

View File

@ -1,3 +1,5 @@
connection node_1;
connection node_2;
SELECT @@global.wsrep_sst_auth;
@@global.wsrep_sst_auth
********

View File

@ -0,0 +1,7 @@
!include ../galera_2nodes.cnf
[mysqld.1]
auto_increment_offset=1
[mysqld.2]
auto_increment_offset=2

View File

@ -5,8 +5,3 @@ wsrep_sst_auth=root:
[mysqld.2]
wsrep_sst_auth=root:

View File

@ -1,6 +1,11 @@
--source include/galera_cluster.inc
--source include/have_innodb.inc
# Save original auto_increment_offset values.
--let $node_1=node_1
--let $node_2=node_2
--source include/auto_increment_offset_save.inc
#
# MDEV-10492: Assertion failure on shutdown when wsrep_sst_auth set in config
#
@ -30,4 +35,5 @@ SELECT @@global.wsrep_sst_auth;
--source include/wait_condition.inc
SELECT @@global.wsrep_sst_auth;
# Restore original auto_increment_offset values.
--source include/auto_increment_offset_restore.inc

View File

@ -0,0 +1,69 @@
#
# MDEV-24971 InnoDB access freed virtual column
# after rollback of secondary index
#
CREATE TABLE t1(f1 INT, f2 INT AS (f1 + 2) VIRTUAL)ENGINE=InnoDB;
INSERT INTO t1(f1) VALUES(1), (1);
ALTER TABLE t1 ADD UNIQUE INDEX(f2), ALGORITHM=INPLACE, LOCK=EXCLUSIVE;
ERROR 23000: Duplicate entry '3' for key 'f2'
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int(11) DEFAULT NULL,
`f2` int(11) GENERATED ALWAYS AS (`f1` + 2) VIRTUAL
) ENGINE=InnoDB DEFAULT CHARSET=latin1
DROP TABLE t1;
CREATE TABLE t1(f1 INT, f2 INT AS (f1 + 2) VIRTUAL)ENGINE=InnoDB;
INSERT INTO t1(f1) VALUES(1), (1);
ALTER TABLE t1 ADD UNIQUE INDEX(f2), ALGORITHM=INPLACE, LOCK=SHARED;
ERROR 23000: Duplicate entry '3' for key 'f2'
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int(11) DEFAULT NULL,
`f2` int(11) GENERATED ALWAYS AS (`f1` + 2) VIRTUAL
) ENGINE=InnoDB DEFAULT CHARSET=latin1
DROP TABLE t1;
CREATE TABLE t1(f1 INT, f2 INT AS (f1) VIRTUAL)ENGINE=InnoDB;
SET DEBUG_DBUG="+d,create_index_fail";
SET DEBUG_SYNC="innodb_inplace_alter_table_enter SIGNAL con1_go WAIT_FOR alter_signal";
ALTER TABLE t1 ADD COLUMN f3 INT AS (f1) VIRTUAL, ADD INDEX(f2, f3);
connect con1,localhost,root,,,;
SET DEBUG_SYNC="now WAIT_FOR con1_go";
BEGIN;
SELECT * FROM t1;
f1 f2
SET DEBUG_SYNC="now SIGNAL alter_signal";
connection default;
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
connection con1;
rollback;
connection default;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int(11) DEFAULT NULL,
`f2` int(11) GENERATED ALWAYS AS (`f1`) VIRTUAL
) ENGINE=InnoDB DEFAULT CHARSET=latin1
DROP TABLE t1;
CREATE TABLE t1(f1 INT, f2 INT AS (f1) VIRTUAL)ENGINE=InnoDB;
SET DEBUG_DBUG="+d,create_index_fail";
SET DEBUG_SYNC="innodb_inplace_alter_table_enter SIGNAL con1_go WAIT_FOR alter_signal";
ALTER TABLE t1 ADD INDEX(f2);
connection con1;
SET DEBUG_SYNC="now WAIT_FOR con1_go";
BEGIN;
INSERT INTO t1(f1) VALUES(1);
SET DEBUG_SYNC="now SIGNAL alter_signal";
connection default;
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
connection con1;
rollback;
connection default;
disconnect con1;
DROP TABLE t1;
CREATE TABLE t1(f1 CHAR(100), f2 CHAR(100) as (f1) VIRTUAL)ENGINE=InnoDB;
ALTER TABLE t1 ADD COLUMN f3 CHAR(100) AS (f2) VIRTUAL, ADD INDEX(f3(10), f1, f3(12));
ERROR 42S21: Duplicate column name 'f3'
DROP TABLE t1;
SET DEBUG_SYNC=RESET;

View File

@ -0,0 +1,71 @@
--source include/have_innodb.inc
--source include/have_debug.inc
--echo #
--echo # MDEV-24971 InnoDB access freed virtual column
--echo # after rollback of secondary index
--echo #
# Exclusive lock must not defer the index removal
CREATE TABLE t1(f1 INT, f2 INT AS (f1 + 2) VIRTUAL)ENGINE=InnoDB;
INSERT INTO t1(f1) VALUES(1), (1);
--error ER_DUP_ENTRY
ALTER TABLE t1 ADD UNIQUE INDEX(f2), ALGORITHM=INPLACE, LOCK=EXCLUSIVE;
SHOW CREATE TABLE t1;
DROP TABLE t1;
# If Shared lock and table doesn't have any other open handle
# then InnoDB must not defer the index removal
CREATE TABLE t1(f1 INT, f2 INT AS (f1 + 2) VIRTUAL)ENGINE=InnoDB;
INSERT INTO t1(f1) VALUES(1), (1);
--error ER_DUP_ENTRY
ALTER TABLE t1 ADD UNIQUE INDEX(f2), ALGORITHM=INPLACE, LOCK=SHARED;
SHOW CREATE TABLE t1;
DROP TABLE t1;
# InnoDB should store the newly dropped virtual column into
# new_vcol_info in index when rollback of alter happens
CREATE TABLE t1(f1 INT, f2 INT AS (f1) VIRTUAL)ENGINE=InnoDB;
SET DEBUG_DBUG="+d,create_index_fail";
SET DEBUG_SYNC="innodb_inplace_alter_table_enter SIGNAL con1_go WAIT_FOR alter_signal";
SEND ALTER TABLE t1 ADD COLUMN f3 INT AS (f1) VIRTUAL, ADD INDEX(f2, f3);
connect(con1,localhost,root,,,);
SET DEBUG_SYNC="now WAIT_FOR con1_go";
BEGIN;
SELECT * FROM t1;
SET DEBUG_SYNC="now SIGNAL alter_signal";
connection default;
--error ER_DUP_ENTRY
reap;
connection con1;
rollback;
connection default;
SHOW CREATE TABLE t1;
DROP TABLE t1;
CREATE TABLE t1(f1 INT, f2 INT AS (f1) VIRTUAL)ENGINE=InnoDB;
SET DEBUG_DBUG="+d,create_index_fail";
SET DEBUG_SYNC="innodb_inplace_alter_table_enter SIGNAL con1_go WAIT_FOR alter_signal";
send ALTER TABLE t1 ADD INDEX(f2);
connection con1;
SET DEBUG_SYNC="now WAIT_FOR con1_go";
BEGIN;
INSERT INTO t1(f1) VALUES(1);
SET DEBUG_SYNC="now SIGNAL alter_signal";
connection default;
--error ER_DUP_ENTRY
reap;
connection con1;
rollback;
connection default;
disconnect con1;
DROP TABLE t1;
CREATE TABLE t1(f1 CHAR(100), f2 CHAR(100) as (f1) VIRTUAL)ENGINE=InnoDB;
--error ER_DUP_FIELDNAME
ALTER TABLE t1 ADD COLUMN f3 CHAR(100) AS (f2) VIRTUAL, ADD INDEX(f3(10), f1, f3(12));
DROP TABLE t1;
SET DEBUG_SYNC=RESET;

View File

@ -329,7 +329,7 @@ while($ntables)
-- echo ### detect failure. Before the patch mysqlbinlog would find
-- echo ### a corrupted event, thence would fail.
-- let $MYSQLD_DATADIR= `SELECT @@datadir`
-- exec $MYSQL_BINLOG -v --hex $MYSQLD_DATADIR/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_bug50018.binlog
-- exec $MYSQL_BINLOG -v --hexdump $MYSQLD_DATADIR/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_bug50018.binlog
## clean up
## For debugging purposes you might want not to remove these

View File

@ -765,9 +765,9 @@ if [[ $ssyslog -eq 1 ]];then
logger -p daemon.info -t ${ssystag}wsrep-sst-$WSREP_SST_OPT_ROLE "$@"
}
INNOAPPLY="${INNOBACKUPEX_BIN} --innobackupex $disver $iapts \$INNOEXTRA --apply-log \$rebuildcmd \${DATA} 2>&1 | logger -p daemon.err -t ${ssystag}innobackupex-apply"
INNOMOVE="${INNOBACKUPEX_BIN} --innobackupex ${WSREP_SST_OPT_CONF} $disver $impts --move-back --force-non-empty-directories \${DATA} 2>&1 | logger -p daemon.err -t ${ssystag}innobackupex-move"
INNOBACKUP="${INNOBACKUPEX_BIN} --innobackupex ${WSREP_SST_OPT_CONF} $disver $iopts \$tmpopts \$INNOEXTRA --galera-info --stream=\$sfmt \$itmpdir 2> >(logger -p daemon.err -t ${ssystag}innobackupex-backup)"
INNOAPPLY="${INNOBACKUPEX_BIN} --prepare $disver $iapts \$INNOEXTRA $rebuildcmd --target-dir=\${DATA} 2>&1 | logger -p daemon.err -t ${ssystag}innobackupex-apply"
INNOMOVE="${INNOBACKUPEX_BIN} ${WSREP_SST_OPT_CONF} --move-back $disver $impts --force-non-empty-directories --target-dir=\${DATA} 2>&1 | logger -p daemon.err -t ${ssystag}innobackupex-move"
INNOBACKUP="${INNOBACKUPEX_BIN} ${WSREP_SST_OPT_CONF} --backup $disver $iopts \$tmpopts \$INNOEXTRA --galera-info --stream=\$sfmt --target-dir=\$itmpdir 2> >(logger -p daemon.err -t ${ssystag}innobackupex-backup)"
fi
else
@ -829,9 +829,9 @@ then
fi
INNOAPPLY="${INNOBACKUPEX_BIN} --innobackupex $disver $iapts \$INNOEXTRA --apply-log \$rebuildcmd \${DATA} &> ${INNOAPPLYLOG}"
INNOMOVE="${INNOBACKUPEX_BIN} --innobackupex ${WSREP_SST_OPT_CONF} $disver $impts --move-back --force-non-empty-directories \${DATA} &> ${INNOMOVELOG}"
INNOBACKUP="${INNOBACKUPEX_BIN} --innobackupex ${WSREP_SST_OPT_CONF} $disver $iopts \$tmpopts \$INNOEXTRA --galera-info --stream=\$sfmt \$itmpdir 2> ${INNOBACKUPLOG}"
INNOAPPLY="${INNOBACKUPEX_BIN} --prepare $disver $iapts \$INNOEXTRA $rebuildcmd --target-dir=\${DATA} &> ${INNOAPPLYLOG}"
INNOMOVE="${INNOBACKUPEX_BIN} ${WSREP_SST_OPT_CONF} --move-back $disver $impts --force-non-empty-directories --target-dir=\${DATA} &> ${INNOMOVELOG}"
INNOBACKUP="${INNOBACKUPEX_BIN} ${WSREP_SST_OPT_CONF} --backup $disver $iopts \$tmpopts \$INNOEXTRA --galera-info --stream=\$sfmt --target-dir=\$itmpdir 2> ${INNOBACKUPLOG}"
fi
get_stream
@ -854,7 +854,7 @@ then
-z $(parse_cnf --mysqld tmpdir "") && \
-z $(parse_cnf xtrabackup tmpdir "") ]]; then
xtmpdir=$(mktemp -d)
tmpopts=" --tmpdir=$xtmpdir"
tmpopts="--tmpdir=$xtmpdir"
wsrep_log_info "Using $xtmpdir as xtrabackup temporary directory"
fi

View File

@ -5276,13 +5276,19 @@ bool Item_ref_null_helper::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
@param resolved_item item which was resolved in outer SELECT(for warning)
@param mark_item item which should be marked (can be differ in case of
substitution)
@param suppress_warning_output flag specifying whether to suppress output of
a warning message
*/
static bool mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current,
Item_ident *resolved_item,
Item_ident *mark_item)
Item_ident *mark_item,
bool suppress_warning_output)
{
DBUG_ENTER("mark_as_dependent");
DBUG_PRINT("info", ("current select: %d (%p) last: %d (%p)",
current->select_number, current,
(last ? last->select_number : 0), last));
/* store pointer on SELECT_LEX from which item is dependent */
if (mark_item && mark_item->can_be_depended)
@ -5293,7 +5299,7 @@ static bool mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current,
if (current->mark_as_dependent(thd, last,
/** resolved_item psergey-thu **/ mark_item))
DBUG_RETURN(TRUE);
if (thd->lex->describe & DESCRIBE_EXTENDED)
if ((thd->lex->describe & DESCRIBE_EXTENDED) && !suppress_warning_output)
{
const char *db_name= (resolved_item->db_name ?
resolved_item->db_name : "");
@ -5322,6 +5328,8 @@ static bool mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current,
@param found_item Item which was found during resolving (if resolved
identifier belongs to VIEW)
@param resolved_item Identifier which was resolved
@param suppress_warning_output flag specifying whether to suppress output of
a warning message
@note
We have to mark all items between current_sel (including) and
@ -5335,7 +5343,8 @@ void mark_select_range_as_dependent(THD *thd,
SELECT_LEX *last_select,
SELECT_LEX *current_sel,
Field *found_field, Item *found_item,
Item_ident *resolved_item)
Item_ident *resolved_item,
bool suppress_warning_output)
{
/*
Go from current SELECT to SELECT where field was resolved (it
@ -5370,7 +5379,7 @@ void mark_select_range_as_dependent(THD *thd,
found_field->table->map;
prev_subselect_item->const_item_cache= 0;
mark_as_dependent(thd, last_select, current_sel, resolved_item,
dependent);
dependent, suppress_warning_output);
}
}
@ -5836,7 +5845,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
context->select_lex, this,
((ref_type == REF_ITEM ||
ref_type == FIELD_ITEM) ?
(Item_ident*) (*reference) : 0));
(Item_ident*) (*reference) : 0), false);
return 0;
}
}
@ -5848,7 +5857,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
context->select_lex, this,
((ref_type == REF_ITEM || ref_type == FIELD_ITEM) ?
(Item_ident*) (*reference) :
0));
0), false);
if (thd->lex->in_sum_func &&
thd->lex->in_sum_func->nest_level >= select->nest_level)
{
@ -5962,7 +5971,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
set_max_sum_func_level(thd, select);
mark_as_dependent(thd, last_checked_context->select_lex,
context->select_lex, rf,
rf);
rf, false);
return 0;
}
@ -5975,7 +5984,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
set_max_sum_func_level(thd, select);
mark_as_dependent(thd, last_checked_context->select_lex,
context->select_lex,
this, (Item_ident*)*reference);
this, (Item_ident*)*reference, false);
if (last_checked_context->select_lex->having_fix_field)
{
Item_ref *rf;
@ -7800,7 +7809,7 @@ public:
if (tbl->table == item->field->table)
{
if (sel != current_select)
mark_as_dependent(thd, sel, current_select, item, item);
mark_as_dependent(thd, sel, current_select, item, item, false);
return;
}
}
@ -7996,7 +8005,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
((refer_type == REF_ITEM ||
refer_type == FIELD_ITEM) ?
(Item_ident*) (*reference) :
0));
0), false);
/*
view reference found, we substituted it instead of this
Item, so can quit
@ -8046,7 +8055,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
goto error;
thd->change_item_tree(reference, fld);
mark_as_dependent(thd, last_checked_context->select_lex,
current_sel, fld, fld);
current_sel, fld, fld, false);
/*
A reference is resolved to a nest level that's outer or the same as
the nest level of the enclosing set function : adjust the value of
@ -8069,7 +8078,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
/* Should be checked in resolve_ref_in_select_and_group(). */
DBUG_ASSERT(*ref && (*ref)->fixed);
mark_as_dependent(thd, last_checked_context->select_lex,
context->select_lex, this, this);
context->select_lex, this, this, false);
/*
A reference is resolved to a nest level that's outer or the same as
the nest level of the enclosing set function : adjust the value of

View File

@ -6703,7 +6703,8 @@ void mark_select_range_as_dependent(THD *thd,
st_select_lex *last_select,
st_select_lex *current_sel,
Field *found_field, Item *found_item,
Item_ident *resolved_item);
Item_ident *resolved_item,
bool suppress_warning_output);
extern Cached_item *new_Cached_item(THD *thd, Item *item,
bool pass_through_ref);

View File

@ -285,7 +285,8 @@ public:
friend bool Item_ref::fix_fields(THD *, Item **);
friend void mark_select_range_as_dependent(THD*,
st_select_lex*, st_select_lex*,
Field*, Item*, Item_ident*);
Field*, Item*, Item_ident*,
bool);
friend bool convert_join_subqueries_to_semijoins(JOIN *join);
};

View File

@ -6318,7 +6318,7 @@ find_field_in_tables(THD *thd, Item_ident *item,
if (!all_merged && current_sel != last_select)
{
mark_select_range_as_dependent(thd, last_select, current_sel,
found, *ref, item);
found, *ref, item, true);
}
}
return found;

View File

@ -2723,7 +2723,7 @@ void st_select_lex_unit::exclude_tree()
*/
bool st_select_lex::mark_as_dependent(THD *thd, st_select_lex *last,
Item *dependency)
Item_ident *dependency)
{
DBUG_ASSERT(this != last);
@ -2731,10 +2731,14 @@ bool st_select_lex::mark_as_dependent(THD *thd, st_select_lex *last,
/*
Mark all selects from resolved to 1 before select where was
found table as depended (of select where was found table)
We move by name resolution context, bacause during merge can some select
be excleded from SELECT tree
*/
SELECT_LEX *s= this;
Name_resolution_context *c= &this->context;
do
{
SELECT_LEX *s= c->select_lex;
if (!(s->uncacheable & UNCACHEABLE_DEPENDENT_GENERATED))
{
// Select is dependent of outer select
@ -2756,7 +2760,7 @@ bool st_select_lex::mark_as_dependent(THD *thd, st_select_lex *last,
if (subquery_expr && subquery_expr->mark_as_dependent(thd, last,
dependency))
return TRUE;
} while ((s= s->outer_select()) != last && s != 0);
} while ((c= c->outer_context) != NULL && (c->select_lex != last));
is_correlated= TRUE;
this->master_unit()->item->is_correlated= TRUE;
return FALSE;

View File

@ -1211,7 +1211,8 @@ public:
}
inline bool is_subquery_function() { return master_unit()->item != 0; }
bool mark_as_dependent(THD *thd, st_select_lex *last, Item *dependency);
bool mark_as_dependent(THD *thd, st_select_lex *last,
Item_ident *dependency);
void set_braces(bool value)
{

View File

@ -12840,10 +12840,12 @@ ha_rows JOIN_TAB::get_examined_rows()
bool JOIN_TAB::preread_init()
{
TABLE_LIST *derived= table->pos_in_table_list;
DBUG_ENTER("JOIN_TAB::preread_init");
if (!derived || !derived->is_materialized_derived())
{
preread_init_done= TRUE;
return FALSE;
DBUG_RETURN(FALSE);
}
/* Materialize derived table/view. */
@ -12852,7 +12854,7 @@ bool JOIN_TAB::preread_init()
derived->get_unit()->uncacheable) &&
mysql_handle_single_derived(join->thd->lex,
derived, DT_CREATE | DT_FILL))
return TRUE;
DBUG_RETURN(TRUE);
if (!(derived->get_unit()->uncacheable & UNCACHEABLE_DEPENDENT) ||
derived->is_nonrecursive_derived_with_rec_ref())
@ -12872,7 +12874,7 @@ bool JOIN_TAB::preread_init()
if (init_ftfuncs(join->thd, join->select_lex, MY_TEST(join->order)))
return TRUE;
return FALSE;
DBUG_RETURN(FALSE);
}

View File

@ -292,7 +292,7 @@ dict_table_try_drop_aborted(
&& !UT_LIST_GET_FIRST(table->locks)) {
/* Silence a debug assertion in row_merge_drop_indexes(). */
ut_d(table->acquire());
row_merge_drop_indexes(trx, table, TRUE);
row_merge_drop_indexes(trx, table, true);
ut_d(table->release());
ut_ad(table->get_ref_count() == ref_count);
trx_commit_for_mysql(trx);

View File

@ -931,7 +931,7 @@ dict_mem_fill_vcol_from_v_indexes(
Later virtual column set will be
refreshed during loading of table. */
if (!dict_index_has_virtual(index)
|| index->has_new_v_col) {
|| index->has_new_v_col()) {
continue;
}

View File

@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2012, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2020, MariaDB Corporation.
Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@ -156,9 +156,24 @@ schedule new estimates for table and index statistics to be calculated.
void dict_stats_update_if_needed_func(dict_table_t *table)
#endif
{
ut_ad(table->stat_initialized);
ut_ad(!mutex_own(&dict_sys->mutex));
if (UNIV_UNLIKELY(!table->stat_initialized)) {
/* The table may have been evicted from dict_sys
and reloaded internally by InnoDB for FOREIGN KEY
processing, but not reloaded by the SQL layer.
We can (re)compute the transient statistics when the
table is actually loaded by the SQL layer.
Note: If InnoDB persistent statistics are enabled,
we will skip the updates. We must do this, because
dict_table_get_n_rows() below assumes that the
statistics have been initialized. The DBA may have
to execute ANALYZE TABLE. */
return;
}
ulonglong counter = table->stat_modified_counter++;
ulonglong n_rows = dict_table_get_n_rows(table);

View File

@ -322,13 +322,6 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
@return whether the table will be rebuilt */
bool need_rebuild () const { return(old_table != new_table); }
/** Clear uncommmitted added indexes after a failed operation. */
void clear_added_indexes()
{
for (ulint i= 0; i < num_to_add_index; i++)
add_index[i]->detach_columns(true);
}
/** Convert table-rebuilding ALTER to instant ALTER. */
void prepare_instant()
{
@ -376,6 +369,42 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
}
}
/** @return whether the given column is being added */
bool is_new_vcol(const dict_v_col_t &v_col) const
{
for (ulint i= 0; i < num_to_add_vcol; i++)
if (&add_vcol[i] == &v_col)
return true;
return false;
}
/** During rollback, make newly added indexes point to
newly added virtual columns. */
void clean_new_vcol_index()
{
ut_ad(old_table == new_table);
const dict_index_t *index= dict_table_get_first_index(old_table);
while ((index= dict_table_get_next_index(index)) != NULL)
{
if (!index->has_virtual() || index->is_committed())
continue;
ulint n_drop_new_vcol= index->get_new_n_vcol();
for (ulint i= 0; n_drop_new_vcol && i < index->n_fields; i++)
{
dict_col_t *col= index->fields[i].col;
/* Skip the non-virtual and old virtual columns */
if (!col->is_virtual())
continue;
dict_v_col_t *vcol= reinterpret_cast<dict_v_col_t*>(col);
if (!is_new_vcol(*vcol))
continue;
index->fields[i].col= &index->new_vcol_info->
add_drop_v_col(index->heap, vcol, --n_drop_new_vcol)->m_col;
}
}
}
private:
// Disable copying
ha_innobase_inplace_ctx(const ha_innobase_inplace_ctx&);
@ -3074,7 +3103,7 @@ online_retry_drop_indexes_low(
ut_ad(table->get_ref_count() >= 1);
if (table->drop_aborted) {
row_merge_drop_indexes(trx, table, TRUE);
row_merge_drop_indexes(trx, table, true);
}
}
@ -5660,7 +5689,7 @@ new_table_failed:
for (ulint a = 0; a < ctx->num_to_add_index; a++) {
dict_index_t* index = ctx->add_index[a];
const bool has_new_v_col = index->has_new_v_col;
const ulint n_v_col = index->get_new_n_vcol();
index = create_index_dict(ctx->trx, index, add_v);
error = ctx->trx->error_state;
if (error != DB_SUCCESS) {
@ -5690,7 +5719,9 @@ error_handling_drop_uncached_1:
goto error_handling_drop_uncached_1;
}
index->parser = index_defs[a].parser;
index->has_new_v_col = has_new_v_col;
if (n_v_col) {
index->assign_new_v_col(n_v_col);
}
/* Note the id of the transaction that created this
index, we use it to restrict readers from accessing
this index, to ensure read consistency. */
@ -5761,7 +5792,7 @@ error_handling_drop_uncached_1:
for (ulint a = 0; a < ctx->num_to_add_index; a++) {
dict_index_t* index = ctx->add_index[a];
const bool has_new_v_col = index->has_new_v_col;
const ulint n_v_col = index->get_new_n_vcol();
DBUG_EXECUTE_IF(
"create_index_metadata_fail",
if (a + 1 == ctx->num_to_add_index) {
@ -5793,7 +5824,9 @@ error_handling_drop_uncached:
}
index->parser = index_defs[a].parser;
index->has_new_v_col = has_new_v_col;
if (n_v_col) {
index->assign_new_v_col(n_v_col);
}
/* Note the id of the transaction that created this
index, we use it to restrict readers from accessing
this index, to ensure read consistency. */
@ -6019,7 +6052,7 @@ error_handled:
online_retry_drop_indexes_with_trx(user_table, ctx->trx);
} else {
ut_ad(!ctx->need_rebuild());
row_merge_drop_indexes(ctx->trx, user_table, TRUE);
row_merge_drop_indexes(ctx->trx, user_table, true);
trx_commit_for_mysql(ctx->trx);
}
@ -7269,7 +7302,6 @@ oom:
that we hold at most a shared lock on the table. */
m_prebuilt->trx->error_info = NULL;
ctx->trx->error_state = DB_SUCCESS;
ctx->clear_added_indexes();
DBUG_RETURN(true);
}
@ -7364,17 +7396,18 @@ temparary index prefix
@param table the TABLE
@param locked TRUE=table locked, FALSE=may need to do a lazy drop
@param trx the transaction
*/
static MY_ATTRIBUTE((nonnull))
@param alter_trx transaction which takes S-lock on the table
while creating the index */
static
void
innobase_rollback_sec_index(
/*========================*/
dict_table_t* user_table,
const TABLE* table,
ibool locked,
trx_t* trx)
dict_table_t* user_table,
const TABLE* table,
bool locked,
trx_t* trx,
const trx_t* alter_trx=NULL)
{
row_merge_drop_indexes(trx, user_table, locked);
row_merge_drop_indexes(trx, user_table, locked, alter_trx);
/* Free the table->fts only if there is no FTS_DOC_ID
in the table */
@ -7469,7 +7502,12 @@ rollback_inplace_alter_table(
DBUG_ASSERT(ctx->new_table == prebuilt->table);
innobase_rollback_sec_index(
prebuilt->table, table, FALSE, ctx->trx);
prebuilt->table, table,
(ha_alter_info->alter_info->requested_lock
== Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE),
ctx->trx, prebuilt->trx);
ctx->clean_new_vcol_index();
}
trx_commit_for_mysql(ctx->trx);

View File

@ -328,7 +328,7 @@ public:
/** Re-latch all latches */
void latch();
dict_index_t* index() { return m_index; }
table_name_t table_name() { return m_index->table->name; }
private:
/** Insert a tuple to a page in a level

View File

@ -681,6 +681,35 @@ struct dict_v_col_t{
};
/** Data structure for newly added virtual column in a index.
It is used only during rollback_inplace_alter_table() of
addition of index depending on newly added virtual columns
and uses index heap. Should be freed when index is being
removed from cache. */
struct dict_add_v_col_info
{
ulint n_v_col;
dict_v_col_t *v_col;
/** Add the newly added virtual column while rollbacking
the index which contains new virtual columns
@param col virtual column to be duplicated
@param offset offset where to duplicate virtual column */
dict_v_col_t* add_drop_v_col(mem_heap_t *heap, dict_v_col_t *col,
ulint offset)
{
ut_ad(n_v_col);
ut_ad(offset < n_v_col);
if (!v_col)
v_col= static_cast<dict_v_col_t*>
(mem_heap_alloc(heap, n_v_col * sizeof *v_col));
new (&v_col[offset]) dict_v_col_t();
v_col[offset].m_col= col->m_col;
v_col[offset].v_pos= col->v_pos;
return &v_col[offset];
}
};
/** Data structure for newly added virtual column in a table */
struct dict_add_v_col_t{
/** number of new virtual column */
@ -919,9 +948,13 @@ struct dict_index_t{
dict_field_t* fields; /*!< array of field descriptions */
st_mysql_ftparser*
parser; /*!< fulltext parser plugin */
bool has_new_v_col;
/*!< whether it has a newly added virtual
column in ALTER */
/** It just indicates whether newly added virtual column
during alter. It stores column in case of alter failure.
It should use heap from dict_index_t. It should be freed
while removing the index from table. */
dict_add_v_col_info* new_vcol_info;
bool index_fts_syncing;/*!< Whether the fts index is
still syncing in the background;
FIXME: remove this and use MDL */
@ -1068,9 +1101,8 @@ struct dict_index_t{
/** @return whether the index is corrupted */
inline bool is_corrupted() const;
/** Detach the virtual columns from the index that is to be removed.
@param whether to reset fields[].col */
void detach_columns(bool clear= false)
/** Detach the virtual columns from the index that is to be removed. */
void detach_columns()
{
if (!has_virtual())
return;
@ -1080,8 +1112,6 @@ struct dict_index_t{
if (!col || !col->is_virtual())
continue;
col->detach(*this);
if (clear)
fields[i].col= NULL;
}
}
@ -1148,6 +1178,30 @@ struct dict_index_t{
bool
vers_history_row(const rec_t* rec, bool &history_row);
/** Assign the number of new column to be added as a part
of the index
@param n_vcol number of virtual columns to be added */
void assign_new_v_col(ulint n_vcol)
{
new_vcol_info= static_cast<dict_add_v_col_info*>(
mem_heap_zalloc(heap, sizeof *new_vcol_info));
new_vcol_info->n_v_col= n_vcol;
}
/* @return whether index has new virtual column */
bool has_new_v_col() const
{
return new_vcol_info != NULL;
}
/* @return number of newly added virtual column */
ulint get_new_n_vcol() const
{
if (new_vcol_info)
return new_vcol_info->n_v_col;
return 0;
}
#ifdef BTR_CUR_HASH_ADAPT
/** @return a clone of this */
dict_index_t* clone() const;
@ -2041,6 +2095,17 @@ public:
/** mysql_row_templ_t for base columns used for compute the virtual
columns */
dict_vcol_templ_t* vc_templ;
/* @return whether the table has any other transcation lock
other than the given transaction */
bool has_lock_other_than(const trx_t *trx) const
{
for (lock_t *lock= UT_LIST_GET_FIRST(locks); lock;
lock= UT_LIST_GET_NEXT(un_member.tab_lock.locks, lock))
if (lock->trx != trx)
return true;
return false;
}
};
inline void dict_index_t::set_modified(mtr_t& mtr) const

View File

@ -167,18 +167,20 @@ row_merge_drop_indexes_dict(
table_id_t table_id)/*!< in: table identifier */
MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Drop those indexes which were created before an error occurred.
/** Drop indexes that were created before an error occurred.
The data dictionary must have been locked exclusively by the caller,
because the transaction will not be committed. */
because the transaction will not be committed.
@param trx dictionary transaction
@param table table containing the indexes
@param locked True if table is locked,
false - may need to do lazy drop
@param alter_trx Alter table transaction */
void
row_merge_drop_indexes(
/*===================*/
trx_t* trx, /*!< in/out: transaction */
dict_table_t* table, /*!< in/out: table containing the indexes */
ibool locked) /*!< in: TRUE=table locked,
FALSE=may need to do a lazy drop */
MY_ATTRIBUTE((nonnull));
trx_t* trx,
dict_table_t* table,
bool locked,
const trx_t* alter_trx=NULL);
/*********************************************************************//**
Drop all partially created indexes during crash recovery. */

View File

@ -1222,7 +1222,7 @@ row_merge_write_fts_word(
if (UNIV_UNLIKELY(error != DB_SUCCESS)) {
ib::error() << "Failed to write word to FTS auxiliary"
" index table "
<< ins_ctx->btr_bulk->index()->table->name
<< ins_ctx->btr_bulk->table_name()
<< ", error " << error;
ret = error;
}

View File

@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2005, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2014, 2020, MariaDB Corporation.
Copyright (c) 2014, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@ -3828,17 +3828,20 @@ row_merge_drop_indexes_dict(
trx->op_info = "";
}
/*********************************************************************//**
Drop indexes that were created before an error occurred.
/** Drop indexes that were created before an error occurred.
The data dictionary must have been locked exclusively by the caller,
because the transaction will not be committed. */
because the transaction will not be committed.
@param trx dictionary transaction
@param table table containing the indexes
@param locked True if table is locked,
false - may need to do lazy drop
@param alter_trx Alter table transaction */
void
row_merge_drop_indexes(
/*===================*/
trx_t* trx, /*!< in/out: dictionary transaction */
dict_table_t* table, /*!< in/out: table containing the indexes */
ibool locked) /*!< in: TRUE=table locked,
FALSE=may need to do a lazy drop */
trx_t* trx,
dict_table_t* table,
bool locked,
const trx_t* alter_trx)
{
dict_index_t* index;
dict_index_t* next_index;
@ -3864,7 +3867,7 @@ row_merge_drop_indexes(
A concurrent purge will be prevented by dict_operation_lock. */
if (!locked && (table->get_ref_count() > 1
|| UT_LIST_GET_FIRST(table->locks))) {
|| table->has_lock_other_than(alter_trx))) {
/* We will have to drop the indexes later, when the
table is guaranteed to be no longer in use. Mark the
indexes as incomplete and corrupted, so that other
@ -4414,6 +4417,7 @@ row_merge_create_index(
dict_index_t* index;
ulint n_fields = index_def->n_fields;
ulint i;
ulint n_add_vcol = 0;
DBUG_ENTER("row_merge_create_index");
@ -4438,7 +4442,7 @@ row_merge_create_index(
ut_ad(ifield->col_no >= table->n_v_def);
name = add_v->v_col_name[
ifield->col_no - table->n_v_def];
index->has_new_v_col = true;
n_add_vcol++;
} else {
name = dict_table_get_v_col_name(
table, ifield->col_no);
@ -4450,6 +4454,10 @@ row_merge_create_index(
dict_mem_index_add_field(index, name, ifield->prefix_len);
}
if (n_add_vcol) {
index->assign_new_v_col(n_add_vcol);
}
DBUG_RETURN(index);
}

View File

@ -729,7 +729,7 @@ row_purge_skip_uncommitted_virtual_index(
not support LOCK=NONE when adding an index on newly
added virtual column.*/
while (index != NULL && dict_index_has_virtual(index)
&& !index->is_committed() && index->has_new_v_col) {
&& !index->is_committed() && index->has_new_v_col()) {
index = dict_table_get_next_index(index);
}
}

View File

@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2020, MariaDB Corporation.
Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software