Merge 10.3 into 10.4

This commit is contained in:
Marko Mäkelä 2019-11-19 01:32:50 +02:00
commit 589a1235b6
22 changed files with 439 additions and 46 deletions

View File

@ -0,0 +1,46 @@
--source include/have_partition.inc
--disable_warnings
drop table if exists t1,t3;
--enable_warnings
--echo #
--echo # MDEV-20611: MRR scan over partitioned InnoDB table produces "Out of memory" error
--echo #
create table t1(a int);
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
set @tmp=@@storage_engine;
eval set storage_engine=$engine_type;
create table t3 (
ID bigint(20) NOT NULL AUTO_INCREMENT,
part_id int,
key_col int,
col2 int,
key(key_col),
PRIMARY KEY (ID,part_id)
) PARTITION BY RANGE (part_id)
(PARTITION p1 VALUES LESS THAN (3),
PARTITION p2 VALUES LESS THAN (7),
PARTITION p3 VALUES LESS THAN (10)
);
show create table t3;
set storage_engine= @tmp;
insert into t3 select
A.a+10*B.a,
A.a,
B.a,
123456
from t1 A, t1 B;
set optimizer_switch='mrr=on';
--replace_column 9 #
explain
select * from t3 force index (key_col) where key_col < 3;
select * from t3 force index (key_col) where key_col < 3;
drop table t1,t3;

View File

@ -0,0 +1,79 @@
drop table if exists t1,t3;
#
# MDEV-20611: MRR scan over partitioned InnoDB table produces "Out of memory" error
#
create table t1(a int);
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
set @tmp=@@storage_engine;
set storage_engine=Aria;
create table t3 (
ID bigint(20) NOT NULL AUTO_INCREMENT,
part_id int,
key_col int,
col2 int,
key(key_col),
PRIMARY KEY (ID,part_id)
) PARTITION BY RANGE (part_id)
(PARTITION p1 VALUES LESS THAN (3),
PARTITION p2 VALUES LESS THAN (7),
PARTITION p3 VALUES LESS THAN (10)
);
show create table t3;
Table Create Table
t3 CREATE TABLE `t3` (
`ID` bigint(20) NOT NULL AUTO_INCREMENT,
`part_id` int(11) NOT NULL,
`key_col` int(11) DEFAULT NULL,
`col2` int(11) DEFAULT NULL,
PRIMARY KEY (`ID`,`part_id`),
KEY `key_col` (`key_col`)
) ENGINE=Aria DEFAULT CHARSET=latin1
PARTITION BY RANGE (`part_id`)
(PARTITION `p1` VALUES LESS THAN (3) ENGINE = Aria,
PARTITION `p2` VALUES LESS THAN (7) ENGINE = Aria,
PARTITION `p3` VALUES LESS THAN (10) ENGINE = Aria)
set storage_engine= @tmp;
insert into t3 select
A.a+10*B.a,
A.a,
B.a,
123456
from t1 A, t1 B;
set optimizer_switch='mrr=on';
explain
select * from t3 force index (key_col) where key_col < 3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t3 range key_col key_col 5 NULL # Using where; Rowid-ordered scan
select * from t3 force index (key_col) where key_col < 3;
ID part_id key_col col2
1 0 0 123456
1 1 0 123456
2 2 0 123456
10 0 1 123456
11 1 1 123456
12 2 1 123456
20 0 2 123456
21 1 2 123456
22 2 2 123456
3 3 0 123456
4 4 0 123456
5 5 0 123456
6 6 0 123456
13 3 1 123456
14 4 1 123456
15 5 1 123456
16 6 1 123456
23 3 2 123456
24 4 2 123456
25 5 2 123456
26 6 2 123456
7 7 0 123456
8 8 0 123456
9 9 0 123456
17 7 1 123456
18 8 1 123456
19 9 1 123456
27 7 2 123456
28 8 2 123456
29 9 2 123456
drop table t1,t3;

View File

@ -0,0 +1,2 @@
let $engine_type= Aria;
--source include/partition_mrr.inc

View File

@ -0,0 +1,79 @@
drop table if exists t1,t3;
#
# MDEV-20611: MRR scan over partitioned InnoDB table produces "Out of memory" error
#
create table t1(a int);
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
set @tmp=@@storage_engine;
set storage_engine=InnoDB;
create table t3 (
ID bigint(20) NOT NULL AUTO_INCREMENT,
part_id int,
key_col int,
col2 int,
key(key_col),
PRIMARY KEY (ID,part_id)
) PARTITION BY RANGE (part_id)
(PARTITION p1 VALUES LESS THAN (3),
PARTITION p2 VALUES LESS THAN (7),
PARTITION p3 VALUES LESS THAN (10)
);
show create table t3;
Table Create Table
t3 CREATE TABLE `t3` (
`ID` bigint(20) NOT NULL AUTO_INCREMENT,
`part_id` int(11) NOT NULL,
`key_col` int(11) DEFAULT NULL,
`col2` int(11) DEFAULT NULL,
PRIMARY KEY (`ID`,`part_id`),
KEY `key_col` (`key_col`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
PARTITION BY RANGE (`part_id`)
(PARTITION `p1` VALUES LESS THAN (3) ENGINE = InnoDB,
PARTITION `p2` VALUES LESS THAN (7) ENGINE = InnoDB,
PARTITION `p3` VALUES LESS THAN (10) ENGINE = InnoDB)
set storage_engine= @tmp;
insert into t3 select
A.a+10*B.a,
A.a,
B.a,
123456
from t1 A, t1 B;
set optimizer_switch='mrr=on';
explain
select * from t3 force index (key_col) where key_col < 3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t3 range key_col key_col 5 NULL # Using where; Rowid-ordered scan
select * from t3 force index (key_col) where key_col < 3;
ID part_id key_col col2
1 0 0 123456
1 1 0 123456
2 2 0 123456
10 0 1 123456
11 1 1 123456
12 2 1 123456
20 0 2 123456
21 1 2 123456
22 2 2 123456
3 3 0 123456
4 4 0 123456
5 5 0 123456
6 6 0 123456
13 3 1 123456
14 4 1 123456
15 5 1 123456
16 6 1 123456
23 3 2 123456
24 4 2 123456
25 5 2 123456
26 6 2 123456
7 7 0 123456
8 8 0 123456
9 9 0 123456
17 7 1 123456
18 8 1 123456
19 9 1 123456
27 7 2 123456
28 8 2 123456
29 9 2 123456
drop table t1,t3;

View File

@ -0,0 +1,4 @@
--source include/have_innodb.inc
let $engine_type= InnoDB;
--source include/partition_mrr.inc

View File

@ -0,0 +1,79 @@
drop table if exists t1,t3;
#
# MDEV-20611: MRR scan over partitioned InnoDB table produces "Out of memory" error
#
create table t1(a int);
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
set @tmp=@@storage_engine;
set storage_engine=myisam;
create table t3 (
ID bigint(20) NOT NULL AUTO_INCREMENT,
part_id int,
key_col int,
col2 int,
key(key_col),
PRIMARY KEY (ID,part_id)
) PARTITION BY RANGE (part_id)
(PARTITION p1 VALUES LESS THAN (3),
PARTITION p2 VALUES LESS THAN (7),
PARTITION p3 VALUES LESS THAN (10)
);
show create table t3;
Table Create Table
t3 CREATE TABLE `t3` (
`ID` bigint(20) NOT NULL AUTO_INCREMENT,
`part_id` int(11) NOT NULL,
`key_col` int(11) DEFAULT NULL,
`col2` int(11) DEFAULT NULL,
PRIMARY KEY (`ID`,`part_id`),
KEY `key_col` (`key_col`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1
PARTITION BY RANGE (`part_id`)
(PARTITION `p1` VALUES LESS THAN (3) ENGINE = MyISAM,
PARTITION `p2` VALUES LESS THAN (7) ENGINE = MyISAM,
PARTITION `p3` VALUES LESS THAN (10) ENGINE = MyISAM)
set storage_engine= @tmp;
insert into t3 select
A.a+10*B.a,
A.a,
B.a,
123456
from t1 A, t1 B;
set optimizer_switch='mrr=on';
explain
select * from t3 force index (key_col) where key_col < 3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t3 range key_col key_col 5 NULL # Using where; Rowid-ordered scan
select * from t3 force index (key_col) where key_col < 3;
ID part_id key_col col2
1 0 0 123456
1 1 0 123456
2 2 0 123456
10 0 1 123456
11 1 1 123456
12 2 1 123456
20 0 2 123456
21 1 2 123456
22 2 2 123456
3 3 0 123456
4 4 0 123456
5 5 0 123456
6 6 0 123456
13 3 1 123456
14 4 1 123456
15 5 1 123456
16 6 1 123456
23 3 2 123456
24 4 2 123456
25 5 2 123456
26 6 2 123456
7 7 0 123456
8 8 0 123456
9 9 0 123456
17 7 1 123456
18 8 1 123456
19 9 1 123456
27 7 2 123456
28 8 2 123456
29 9 2 123456
drop table t1,t3;

View File

@ -0,0 +1,3 @@
let $engine_type= myisam;
--source include/partition_mrr.inc

View File

@ -1,3 +1,4 @@
SET GLOBAL innodb_purge_rseg_truncate_frequency=1;
CREATE TABLE t1(
a INT AUTO_INCREMENT PRIMARY KEY,
b CHAR(1),
@ -5,6 +6,7 @@ c INT,
INDEX(b))
ENGINE=InnoDB STATS_PERSISTENT=0;
SET GLOBAL innodb_change_buffering_debug = 1;
BEGIN;
INSERT INTO t1 VALUES(0,'x',1);
INSERT INTO t1 SELECT 0,b,c FROM t1;
INSERT INTO t1 SELECT 0,b,c FROM t1;
@ -18,14 +20,14 @@ INSERT INTO t1 SELECT 0,b,c FROM t1;
INSERT INTO t1 SELECT 0,b,c FROM t1;
INSERT INTO t1 SELECT 0,b,c FROM t1;
INSERT INTO t1 SELECT 0,b,c FROM t1;
COMMIT;
InnoDB 0 transactions not purged
# restart: --innodb-force-recovery=6 --innodb-change-buffer-dump
check table t1;
Table Op Msg_type Msg_text
test.t1 check Warning InnoDB: Index 'b' contains #### entries, should be 4096.
test.t1 check error Corrupt
# restart
SET GLOBAL innodb_purge_rseg_truncate_frequency=1;
InnoDB 0 transactions not purged
SET GLOBAL innodb_fast_shutdown=0;
# restart
DROP TABLE t1;

View File

@ -6,6 +6,7 @@
# The test is not big enough to use change buffering with larger page size.
--source include/have_innodb_max_16k.inc
SET GLOBAL innodb_purge_rseg_truncate_frequency=1;
--disable_query_log
call mtr.add_suppression("InnoDB: Failed to find tablespace for table `test`\\.`t1` in the cache\\. Attempting to load the tablespace with space id");
call mtr.add_suppression("InnoDB: Allocated tablespace ID \\d+ for test.t1, old maximum was");
@ -29,6 +30,7 @@ SET GLOBAL innodb_change_buffering_debug = 1;
# Create enough rows for the table, so that the change buffer will be
# used for modifying the secondary index page. There must be multiple
# index pages, because changes to the root page are never buffered.
BEGIN;
INSERT INTO t1 VALUES(0,'x',1);
INSERT INTO t1 SELECT 0,b,c FROM t1;
INSERT INTO t1 SELECT 0,b,c FROM t1;
@ -42,9 +44,13 @@ INSERT INTO t1 SELECT 0,b,c FROM t1;
INSERT INTO t1 SELECT 0,b,c FROM t1;
INSERT INTO t1 SELECT 0,b,c FROM t1;
INSERT INTO t1 SELECT 0,b,c FROM t1;
COMMIT;
let MYSQLD_DATADIR=`select @@datadir`;
let PAGE_SIZE=`select @@innodb_page_size`;
# Ensure that purge will not access the truncated .ibd file
--source include/wait_all_purged.inc
--source include/shutdown_mysqld.inc
# Corrupt the change buffer bitmap, to claim that pages are clean
@ -84,14 +90,41 @@ EOF
--replace_regex /contains \d+ entries/contains #### entries/
check table t1;
--let $restart_parameters=
--source include/restart_mysqld.inc
--source include/shutdown_mysqld.inc
# Ensure that the slow shutdown will not time out due to running purge.
SET GLOBAL innodb_purge_rseg_truncate_frequency=1;
--source include/wait_all_purged.inc
# The change buffer merge for the injected corruption must complete
# without exceeding the 60-second shutdown_server timeout.
# Truncate the file to 5 pages, as if it were empty
perl;
do "$ENV{MTR_SUITE_DIR}/include/crc32.pl";
my $file = "$ENV{MYSQLD_DATADIR}/test/t1.ibd";
open(FILE, "+<$file") || die "Unable to open $file";
binmode FILE;
my $ps= $ENV{PAGE_SIZE};
my $pages=5;
my $page;
die "Unable to read $file" unless sysread(FILE, $page, $ps) == $ps;
my $full_crc32 = unpack("N",substr($page,54,4)) & 0x10; # FIL_SPACE_FLAGS
substr($page,46,4)=pack("N", $pages);
my $polynomial = 0x82f63b78; # CRC-32C
if ($full_crc32)
{
my $ck = mycrc32(substr($page, 0, $ps-4), 0, $polynomial);
substr($page, $ps-4, 4) = pack("N", $ck);
}
else
{
my $ck= pack("N",mycrc32(substr($page, 4, 22), 0, $polynomial) ^
mycrc32(substr($page, 38, $ps - 38 - 8), 0, $polynomial));
substr($page,0,4)=$ck;
substr($page,$ps-8,4)=$ck;
}
sysseek(FILE, 0, 0) || die "Unable to rewind $file\n";
syswrite(FILE, $page, $ps)==$ps || die "Unable to write $file\n";
truncate(FILE, $ps * $pages);
close(FILE) || die "Unable to close $file";
EOF
--let $restart_parameters=
--source include/start_mysqld.inc
SET GLOBAL innodb_fast_shutdown=0;
--source include/restart_mysqld.inc

View File

@ -5480,6 +5480,13 @@ int ha_partition::index_end()
if ((tmp= (*file)->ha_index_end()))
error= tmp;
}
else if ((*file)->inited == RND)
{
// Possible due to MRR
int tmp;
if ((tmp= (*file)->ha_rnd_end()))
error= tmp;
}
} while (*(++file));
destroy_record_priority_queue();
DBUG_RETURN(error);
@ -6514,8 +6521,11 @@ int ha_partition::multi_range_read_next(range_id_t *range_info)
else if (unlikely((error= handle_unordered_next(table->record[0], FALSE))))
DBUG_RETURN(error);
*range_info=
((PARTITION_KEY_MULTI_RANGE *) m_range_info[m_last_part])->ptr;
if (!(m_mrr_mode & HA_MRR_NO_ASSOCIATION))
{
*range_info=
((PARTITION_KEY_MULTI_RANGE *) m_range_info[m_last_part])->ptr;
}
}
DBUG_RETURN(0);
}

View File

@ -1703,11 +1703,10 @@ bool DsMrr_impl::choose_mrr_impl(uint keyno, ha_rows rows, uint *flags,
}
uint add_len= share->key_info[keyno].key_length + primary_file->ref_length;
*bufsz -= add_len;
if (get_disk_sweep_mrr_cost(keyno, rows, *flags, bufsz, &dsmrr_cost))
if (get_disk_sweep_mrr_cost(keyno, rows, *flags, bufsz, add_len,
&dsmrr_cost))
return TRUE;
*bufsz += add_len;
bool force_dsmrr;
/*
If mrr_cost_based flag is not set, then set cost of DS-MRR to be minimum of
@ -1796,6 +1795,11 @@ static void get_sort_and_sweep_cost(TABLE *table, ha_rows nrows, Cost_estimate *
@param rows E(Number of rows to be scanned)
@param flags Scan parameters (HA_MRR_* flags)
@param buffer_size INOUT Buffer size
IN: Buffer of size 0 means the function
will determine the best size and return it.
@param extra_mem_overhead Extra memory overhead of the MRR implementation
(the function assumes this many bytes of buffer
space will not be usable by DS-MRR)
@param cost OUT The cost
@retval FALSE OK
@ -1804,7 +1808,9 @@ static void get_sort_and_sweep_cost(TABLE *table, ha_rows nrows, Cost_estimate *
*/
bool DsMrr_impl::get_disk_sweep_mrr_cost(uint keynr, ha_rows rows, uint flags,
uint *buffer_size, Cost_estimate *cost)
uint *buffer_size,
uint extra_mem_overhead,
Cost_estimate *cost)
{
ulong max_buff_entries, elem_size;
ha_rows rows_in_full_step;
@ -1814,11 +1820,24 @@ bool DsMrr_impl::get_disk_sweep_mrr_cost(uint keynr, ha_rows rows, uint flags,
elem_size= primary_file->ref_length +
sizeof(void*) * (!MY_TEST(flags & HA_MRR_NO_ASSOCIATION));
max_buff_entries = *buffer_size / elem_size;
if (!max_buff_entries)
if (!*buffer_size)
{
/*
We are requested to determine how much memory we need.
Request memory to finish the scan in one pass but do not request
more than @@mrr_buff_size.
*/
*buffer_size= (uint) MY_MIN(extra_mem_overhead + elem_size*(ulong)rows,
MY_MAX(table->in_use->variables.mrr_buff_size,
extra_mem_overhead));
}
if (elem_size + extra_mem_overhead > *buffer_size)
return TRUE; /* Buffer has not enough space for even 1 rowid */
max_buff_entries = (*buffer_size - extra_mem_overhead) / elem_size;
/* Number of iterations we'll make with full buffer */
n_full_steps= (uint)floor(rows2double(rows) / max_buff_entries);

View File

@ -631,8 +631,9 @@ private:
bool choose_mrr_impl(uint keyno, ha_rows rows, uint *flags, uint *bufsz,
Cost_estimate *cost);
bool get_disk_sweep_mrr_cost(uint keynr, ha_rows rows, uint flags,
uint *buffer_size, Cost_estimate *cost);
bool get_disk_sweep_mrr_cost(uint keynr, ha_rows rows, uint flags,
uint *buffer_size, uint extra_mem_overhead,
Cost_estimate *cost);
bool check_cpk_scan(THD *thd, TABLE_SHARE *share, uint keyno, uint mrr_flags);
bool setup_buffer_sharing(uint key_size_in_keybuf, key_part_map key_tuple_map);

View File

@ -785,8 +785,8 @@ buf_read_ibuf_merge_pages(
#endif
for (ulint i = 0; i < n_stored; i++) {
fil_space_t* s = fil_space_acquire_for_io(space_ids[i]);
if (!s) {
fil_space_t* space = fil_space_acquire_silent(space_ids[i]);
if (!space) {
tablespace_deleted:
/* The tablespace was not found: remove all
entries for it */
@ -798,8 +798,18 @@ tablespace_deleted:
continue;
}
const ulint zip_size = s->zip_size();
s->release_for_io();
if (UNIV_UNLIKELY(page_nos[i] >= space->size)) {
do {
ibuf_delete_recs(page_id_t(space_ids[i],
page_nos[i]));
} while (++i < n_stored
&& space_ids[i - 1] == space_ids[i]
&& page_nos[i] >= space->size);
i--;
next:
space->release();
continue;
}
const page_id_t page_id(space_ids[i], page_nos[i]);
@ -815,7 +825,8 @@ tablespace_deleted:
buf_read_page_low(&err,
sync && (i + 1 == n_stored),
0,
BUF_READ_ANY_PAGE, page_id, zip_size,
BUF_READ_ANY_PAGE, page_id,
space->zip_size(),
true, true /* ignore_missing_space */);
switch(err) {
@ -823,15 +834,20 @@ tablespace_deleted:
case DB_ERROR:
break;
case DB_TABLESPACE_DELETED:
space->release();
goto tablespace_deleted;
case DB_PAGE_CORRUPTED:
case DB_DECRYPTION_FAILED:
ib::error() << "Failed to read or decrypt " << page_id
<< " for change buffer merge";
ib::error() << "Failed to read or decrypt page "
<< page_nos[i]
<< " of '" << space->chain.start->name
<< "' for change buffer merge";
break;
default:
ut_error;
}
goto next;
}
os_aio_simulated_wake_handler_threads();

View File

@ -901,13 +901,12 @@ dict_create_index_tree_in_mem(
/** Drop the index tree associated with a row in SYS_INDEXES table.
@param[in,out] rec SYS_INDEXES record
@param[in,out] pcur persistent cursor on rec
@param[in,out] trx dictionary transaction
@param[in,out] mtr mini-transaction */
void dict_drop_index_tree(rec_t* rec, btr_pcur_t* pcur, mtr_t* mtr)
void dict_drop_index_tree(rec_t* rec, btr_pcur_t* pcur, trx_t* trx, mtr_t* mtr)
{
byte* ptr;
ulint len;
ulint space;
ulint root_page_no;
ut_ad(mutex_own(&dict_sys.mutex));
ut_a(!dict_table_is_comp(dict_sys.sys_indexes));
@ -918,7 +917,7 @@ void dict_drop_index_tree(rec_t* rec, btr_pcur_t* pcur, mtr_t* mtr)
btr_pcur_store_position(pcur, mtr);
root_page_no = mach_read_from_4(ptr);
const uint32_t root_page_no = mach_read_from_4(ptr);
if (root_page_no == FIL_NULL) {
/* The tree has already been freed */
@ -933,18 +932,25 @@ void dict_drop_index_tree(rec_t* rec, btr_pcur_t* pcur, mtr_t* mtr)
ut_ad(len == 4);
space = mach_read_from_4(ptr);
const uint32_t space_id = mach_read_from_4(ptr);
ut_ad(space_id < SRV_TMP_SPACE_ID);
if (space_id != TRX_SYS_SPACE
&& trx_get_dict_operation(trx) == TRX_DICT_OP_TABLE) {
/* We are about to delete the entire .ibd file;
do not bother to free pages inside it. */
return;
}
ptr = rec_get_nth_field_old(
rec, DICT_FLD__SYS_INDEXES__ID, &len);
ut_ad(len == 8);
if (fil_space_t* s = fil_space_acquire_silent(space)) {
if (fil_space_t* s = fil_space_acquire_silent(space_id)) {
/* Ensure that the tablespace file exists
in order to avoid a crash in buf_page_get_gen(). */
if (s->size || fil_space_get_size(space)) {
btr_free_if_exists(page_id_t(space, root_page_no),
if (s->size || fil_space_get_size(space_id)) {
btr_free_if_exists(page_id_t(space_id, root_page_no),
s->zip_size(),
mach_read_from_8(ptr), mtr);
}

View File

@ -6466,7 +6466,7 @@ ha_innobase::clone(
DBUG_ENTER("ha_innobase::clone");
ha_innobase* new_handler = static_cast<ha_innobase*>(
handler::clone(name, mem_root));
handler::clone(m_prebuilt->table->name.m_name, mem_root));
if (new_handler != NULL) {
DBUG_ASSERT(new_handler->m_prebuilt != NULL);

View File

@ -4283,7 +4283,7 @@ This prevents an infinite loop on slow shutdown
in the case where the change buffer bitmap claims that no buffered
changes exist, while entries exist in the change buffer tree.
@param page_id page number for which there should be no unbuffered changes */
ATTRIBUTE_COLD static void ibuf_delete_recs(const page_id_t page_id)
ATTRIBUTE_COLD void ibuf_delete_recs(const page_id_t page_id)
{
ulint dops[IBUF_OP_COUNT];
mtr_t mtr;

View File

@ -99,8 +99,10 @@ dict_create_index_tree(
/** Drop the index tree associated with a row in SYS_INDEXES table.
@param[in,out] rec SYS_INDEXES record
@param[in,out] pcur persistent cursor on rec
@param[in,out] trx dictionary transaction
@param[in,out] mtr mini-transaction */
void dict_drop_index_tree(rec_t* rec, btr_pcur_t* pcur, mtr_t* mtr);
void dict_drop_index_tree(rec_t* rec, btr_pcur_t* pcur, trx_t* trx, mtr_t* mtr)
MY_ATTRIBUTE((nonnull));
/***************************************************************//**
Creates an index tree for the index if it is not a member of a cluster.

View File

@ -317,6 +317,14 @@ ibuf_insert(
ulint zip_size,
que_thr_t* thr);
/**
Delete any buffered entries for a page.
This prevents an infinite loop on slow shutdown
in the case where the change buffer bitmap claims that no buffered
changes exist, while entries exist in the change buffer tree.
@param page_id page number for which there should be no unbuffered changes */
ATTRIBUTE_COLD void ibuf_delete_recs(const page_id_t page_id);
/** When an index page is read from a disk to the buffer pool, this function
applies any buffered operations to the page and deletes the entries from the
insert buffer. If the page is not read, but created in the buffer pool, this

View File

@ -130,7 +130,8 @@ row_undo_ins_remove_clust_rec(
== RW_X_LATCH);
ut_ad(node->rec_type == TRX_UNDO_INSERT_REC);
dict_drop_index_tree(rec, &node->pcur, &mtr);
dict_drop_index_tree(rec, &node->pcur, node->trx,
&mtr);
mtr.commit();
mtr.start();

View File

@ -3125,7 +3125,7 @@ row_upd_clust_step(
ut_ad(!dict_index_is_online_ddl(index));
dict_drop_index_tree(
btr_pcur_get_rec(pcur), pcur, &mtr);
btr_pcur_get_rec(pcur), pcur, trx, &mtr);
mtr.commit();

View File

@ -1024,10 +1024,12 @@ can_enable_indexes(1), bulk_insert_single_undo(BULK_INSERT_NONE)
{}
handler *ha_maria::clone(const char *name, MEM_ROOT *mem_root)
handler *ha_maria::clone(const char *name __attribute__((unused)),
MEM_ROOT *mem_root)
{
ha_maria *new_handler= static_cast <ha_maria *>(handler::clone(name,
mem_root));
ha_maria *new_handler=
static_cast <ha_maria *>(handler::clone(file->s->open_file_name.str,
mem_root));
if (new_handler)
{
new_handler->file->state= file->state;

View File

@ -725,10 +725,11 @@ ha_myisam::ha_myisam(handlerton *hton, TABLE_SHARE *table_arg)
can_enable_indexes(1)
{}
handler *ha_myisam::clone(const char *name, MEM_ROOT *mem_root)
handler *ha_myisam::clone(const char *name __attribute__((unused)),
MEM_ROOT *mem_root)
{
ha_myisam *new_handler= static_cast <ha_myisam *>(handler::clone(name,
mem_root));
ha_myisam *new_handler=
static_cast <ha_myisam *>(handler::clone(file->filename, mem_root));
if (new_handler)
new_handler->file->state= file->state;
return new_handler;