Merge 10.1 into 10.2

This commit is contained in:
Marko Mäkelä 2017-10-02 11:12:19 +03:00
commit 3c4cff3357
28 changed files with 128 additions and 140 deletions

View File

@ -5883,7 +5883,7 @@ where c1 = c2-0 and c2 <= (select max(c3) from t3 where c3 = 2 and @counter:=@co
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2
1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
2 UNCACHEABLE SUBQUERY t3 system NULL NULL NULL NULL 1
2 UNCACHEABLE SUBQUERY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
set @counter=0;
select count(*) from t1 straight_join t2
where c1 = c2-0 and c2 <= (select max(c3) from t3 where c3 = 2 and @counter:=@counter+1);

View File

@ -1,4 +1,3 @@
drop table if exists t1,t2;
set @a := foo;
ERROR 42S22: Unknown column 'foo' in 'field list'
set @a := connection_id() + 3;
@ -126,14 +125,14 @@ select @a+0, @a:=@a+0+count(*), count(*), @a+0 from t1 group by i;
set @a=0;
select @a,@a:="hello",@a,@a:=3,@a,@a:="hello again" from t1 group by i;
@a @a:="hello" @a @a:=3 @a @a:="hello again"
0 hello 0 3 3 hello again
0 hello 0 3 3 hello again
0 hello 0 3 3 hello again
0 hello 0 3 0 hello again
0 hello 0 3 0 hello again
0 hello 0 3 0 hello again
select @a,@a:="hello",@a,@a:=3,@a,@a:="hello again" from t1 group by i;
@a @a:="hello" @a @a:=3 @a @a:="hello again"
hello again hello hello 3 3 hello again
hello again hello hello 3 3 hello again
hello again hello hello 3 3 hello again
hello again hello hello again 3 hello again hello again
hello again hello hello again 3 hello again hello again
hello again hello hello again 3 hello again hello again
drop table t1;
set @a=_latin2'test';
select charset(@a),collation(@a),coercibility(@a);
@ -570,3 +569,6 @@ End of 5.5 tests
#
set @var= repeat('a',20000);
1
explain select @a:=max(seq) from seq_1_to_1000000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away

View File

@ -1548,7 +1548,7 @@ one
1
explain SELECT 1 as 'one' FROM t1 GROUP BY @a:= (SELECT ROUND(f1) FROM t1 WHERE f1 = 0);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort
1 PRIMARY t1 ALL NULL NULL NULL NULL 2
2 SUBQUERY t1 ALL NULL NULL NULL NULL 2 Using where
SELECT 1 as 'one' FROM t1 GROUP BY @a:= (SELECT ROUND(f1) FROM t1 WHERE f1 = 0);
one
@ -1559,7 +1559,7 @@ one
set sql_buffer_result=1;
explain SELECT 1 as 'one' FROM t1 GROUP BY @a:= (SELECT ROUND(f1) FROM t1 WHERE f1 = 0);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using temporary
2 SUBQUERY t1 ALL NULL NULL NULL NULL 2 Using where
SELECT 1 as 'one' FROM t1 GROUP BY @a:= (SELECT ROUND(f1) FROM t1 WHERE f1 = 0);
one

View File

@ -0,0 +1,4 @@
connection node_1;
create table t(a int);
insert into t select 1;
DROP TABLE t;

View File

@ -0,0 +1 @@
--innodb-stats-persistent=1

View File

@ -0,0 +1,6 @@
--source include/galera_cluster.inc
--source include/have_innodb.inc
--connection node_1
create table t(a int);
insert into t select 1;
DROP TABLE t;

View File

@ -1222,7 +1222,7 @@
VARIABLE_NAME INNODB_VERSION
SESSION_VALUE NULL
-GLOBAL_VALUE 5.6.37
+GLOBAL_VALUE 5.6.36-82.1
+GLOBAL_VALUE 5.6.36-82.2
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE NULL
VARIABLE_SCOPE GLOBAL

View File

@ -1,7 +1,5 @@
# Initialise
--disable_warnings
drop table if exists t1,t2;
--enable_warnings
source include/have_sequence.inc;
--error 1054
set @a := foo;
@ -501,3 +499,9 @@ eval select $tmp < $tmp2;
--enable_column_names
--enable_query_log
#
# MDEV-13897 SELECT @a := MAX(col) FROM t requires full index scan
#
explain select @a:=max(seq) from seq_1_to_1000000;
# End of 10.1 tests

View File

@ -1954,12 +1954,6 @@ public:
create_tmp_field(false, table, MY_INT32_NUM_DECIMAL_DIGITS) :
tmp_table_field_from_field_type(table, false, true);
}
table_map used_tables() const
{
return used_tables_cache | RAND_TABLE_BIT;
}
bool const_item() const { return 0; }
bool is_expensive() { return 1; }
void print(String *str, enum_query_type query_type);
enum precedence precedence() const { return ASSIGN_PRECEDENCE; }
void print_as_stmt(String *str, enum_query_type query_type);

View File

@ -5055,7 +5055,6 @@ btr_cur_pessimistic_delete(
ulint n_reserved = 0;
bool success;
ibool ret = FALSE;
ulint level;
mem_heap_t* heap;
ulint* offsets;
#ifdef UNIV_DEBUG
@ -5113,6 +5112,10 @@ btr_cur_pessimistic_delete(
#endif /* UNIV_ZIP_DEBUG */
}
if (flags == 0) {
lock_update_delete(block, rec);
}
if (UNIV_UNLIKELY(page_get_n_recs(page) < 2)
&& UNIV_UNLIKELY(dict_index_get_page(index)
!= block->page.id.page_no())) {
@ -5127,13 +5130,7 @@ btr_cur_pessimistic_delete(
goto return_after_reservations;
}
if (flags == 0) {
lock_update_delete(block, rec);
}
level = btr_page_get_level(page, mtr);
if (level == 0) {
if (page_is_leaf(page)) {
btr_search_update_hash_on_delete(cursor);
} else if (UNIV_UNLIKELY(page_rec_is_first(rec, page))) {
rec_t* next_rec = page_rec_get_next(rec);
@ -5188,6 +5185,7 @@ btr_cur_pessimistic_delete(
on a page, we have to change the parent node pointer
so that it is equal to the new leftmost node pointer
on the page */
ulint level = btr_page_get_level(page, mtr);
btr_node_ptr_delete(index, block, mtr);

View File

@ -100,6 +100,9 @@ fil_compress_page(
int comp_level = int(level);
ulint header_len = FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE;
ulint write_size = 0;
#if HAVE_LZO
lzo_uint write_size_lzo = write_size;
#endif
/* Cache to avoid change during function execution */
ulint comp_method = innodb_compression_algorithm;
bool allocated = false;
@ -182,7 +185,9 @@ fil_compress_page(
#ifdef HAVE_LZO
case PAGE_LZO_ALGORITHM:
err = lzo1x_1_15_compress(
buf, len, out_buf+header_len, &write_size, out_buf+UNIV_PAGE_SIZE);
buf, len, out_buf+header_len, &write_size_lzo, out_buf+UNIV_PAGE_SIZE);
write_size = write_size_lzo;
if (err != LZO_E_OK || write_size > UNIV_PAGE_SIZE-header_len) {
goto err_exit;
@ -523,8 +528,11 @@ fil_decompress_page(
#ifdef HAVE_LZO
case PAGE_LZO_ALGORITHM: {
ulint olen = 0;
lzo_uint olen_lzo = olen;
err = lzo1x_decompress((const unsigned char *)buf+header_len,
actual_size,(unsigned char *)in_buf, &olen, NULL);
actual_size,(unsigned char *)in_buf, &olen_lzo, NULL);
olen = olen_lzo;
if (err != LZO_E_OK || (olen == 0 || olen > UNIV_PAGE_SIZE)) {
len = olen;

View File

@ -4572,7 +4572,7 @@ innobase_commit_low(
}
trx->will_lock = 0;
#ifdef WITH_WSREP
if (wsrep_on(thd)) { thd_proc_info(thd, tmp); }
if (thd && wsrep_on(thd)) { thd_proc_info(thd, tmp); }
#endif /* WITH_WSREP */
}

View File

@ -190,7 +190,7 @@ lock_update_merge_left(
const buf_block_t* right_block); /*!< in: merged index page
which will be discarded */
/*************************************************************//**
Updates the lock table when a page is splited and merged to
Updates the lock table when a page is split and merged to
two pages. */
UNIV_INTERN
void
@ -1073,16 +1073,9 @@ std::string
lock_get_info(
const lock_t*);
/*************************************************************//**
Updates the lock table when a page is split and merged to
two pages. */
UNIV_INTERN
void
lock_update_split_and_merge(
const buf_block_t* left_block, /*!< in: left page to which merged */
const rec_t* orig_pred, /*!< in: original predecessor of
supremum on the left page before merge*/
const buf_block_t* right_block);/*!< in: right page from which merged */
/*******************************************************************//**
@return whether wsrep_on is true on trx->mysql_thd*/
#define wsrep_on_trx(trx) ((trx)->mysql_thd && wsrep_on((trx)->mysql_thd))
#endif /* WITH_WSREP */

View File

@ -1455,7 +1455,7 @@ lock_rec_other_has_conflicting(
if (lock_rec_has_to_wait(true, trx, mode, lock, is_supremum)) {
#ifdef WITH_WSREP
if (wsrep_on(trx->mysql_thd)) {
if (wsrep_on_trx(trx)) {
trx_mutex_enter(lock->trx);
wsrep_kill_victim((trx_t *)trx, (lock_t *)lock);
trx_mutex_exit(lock->trx);
@ -1985,8 +1985,7 @@ RecLock::create(
}
#ifdef WITH_WSREP
if (c_lock &&
wsrep_on(trx->mysql_thd) &&
if (c_lock && wsrep_on_trx(trx) &&
wsrep_thd_is_BF(trx->mysql_thd, FALSE)) {
lock_t *hash = (lock_t *)c_lock->hash;
lock_t *prev = NULL;

View File

@ -189,8 +189,7 @@ wsrep_is_BF_lock_timeout(
/*====================*/
trx_t* trx) /* in: trx to check for lock priority */
{
if (wsrep_on(trx->mysql_thd) &&
wsrep_thd_is_BF(trx->mysql_thd, FALSE)) {
if (wsrep_on_trx(trx) && wsrep_thd_is_BF(trx->mysql_thd, FALSE)) {
fprintf(stderr, "WSREP: BF lock wait long\n");
srv_print_innodb_monitor = TRUE;
srv_print_innodb_lock_monitor = TRUE;
@ -399,7 +398,7 @@ lock_wait_suspend_thread(
if (lock_wait_timeout < 100000000
&& wait_time > (double) lock_wait_timeout
#ifdef WITH_WSREP
&& (!wsrep_on(trx->mysql_thd) ||
&& (!wsrep_on_trx(trx) ||
(!wsrep_is_BF_lock_timeout(trx) && trx->error_state != DB_DEADLOCK))
#endif /* WITH_WSREP */
&& !trx_is_high_priority(trx)) {

View File

@ -1090,8 +1090,8 @@ row_merge_read(
const bool success = os_file_read_no_error_handling_int_fd(
request, fd, buf, ofs, srv_sort_buf_size);
/* For encrypted tables, decrypt data after reading and copy data */
if (log_tmp_is_encrypted()) {
/* If encryption is enabled decrypt buffer */
if (success && log_tmp_is_encrypted()) {
if (!log_tmp_block_decrypt(buf, srv_sort_buf_size,
crypt_buf, ofs, space)) {
return (FALSE);

View File

@ -464,8 +464,8 @@ inline
bool
wsrep_must_process_fk(const upd_node_t* node, const trx_t* trx)
{
if (que_node_get_type(node->common.parent) != QUE_NODE_UPDATE ||
!wsrep_on(trx->mysql_thd)) {
if (que_node_get_type(node->common.parent) != QUE_NODE_UPDATE
|| !wsrep_on_trx(trx)) {
return false;
}

View File

@ -33,6 +33,27 @@
static int compare_columns(MARIA_COLUMNDEF **a, MARIA_COLUMNDEF **b);
static ulonglong update_tot_length(ulonglong tot_length, ulonglong max_rows, uint length)
{
ulonglong tot_length_part;
if (tot_length == ULONGLONG_MAX)
return ULONGLONG_MAX;
tot_length_part= (max_rows/(ulong) ((maria_block_size -
MAX_KEYPAGE_HEADER_SIZE - KEYPAGE_CHECKSUM_SIZE)/
(length*2)));
if (tot_length_part >= ULONGLONG_MAX / maria_block_size)
return ULONGLONG_MAX;
if (tot_length > ULONGLONG_MAX - tot_length_part * maria_block_size)
return ULONGLONG_MAX;
return tot_length + tot_length_part * maria_block_size;
}
/*
Old options is used when recreating database, from maria_chk
*/
@ -57,7 +78,7 @@ int maria_create(const char *name, enum data_file_type datafile_type,
char kfilename[FN_REFLEN], klinkname[FN_REFLEN], *klinkname_ptr;
char dfilename[FN_REFLEN], dlinkname[FN_REFLEN], *dlinkname_ptr;
ulong pack_reclength;
ulonglong tot_length,max_rows, tmp, tot_length_part;
ulonglong tot_length,max_rows, tmp;
enum en_fieldtype type;
enum data_file_type org_datafile_type= datafile_type;
MARIA_SHARE share;
@ -661,23 +682,7 @@ int maria_create(const char *name, enum data_file_type datafile_type,
if (length > max_key_length)
max_key_length= length;
if (tot_length == ULLONG_MAX)
continue;
tot_length_part= (max_rows/(ulong) (((uint) maria_block_size -
MAX_KEYPAGE_HEADER_SIZE -
KEYPAGE_CHECKSUM_SIZE)/
(length*2)));
if (tot_length_part >= (ULLONG_MAX / maria_block_size +
ULLONG_MAX % maria_block_size))
tot_length= ULLONG_MAX;
else
{
if (tot_length > ULLONG_MAX - tot_length_part * maria_block_size)
tot_length= ULLONG_MAX;
else
tot_length+= tot_length_part * maria_block_size;
}
tot_length= update_tot_length(tot_length, max_rows, length);
}
unique_key_parts=0;
@ -687,23 +692,7 @@ int maria_create(const char *name, enum data_file_type datafile_type,
unique_key_parts+=uniquedef->keysegs;
share.state.key_root[keys+i]= HA_OFFSET_ERROR;
if (tot_length == ULLONG_MAX)
continue;
ulonglong tot_length_part= (max_rows/(ulong) (((uint) maria_block_size -
MAX_KEYPAGE_HEADER_SIZE -
KEYPAGE_CHECKSUM_SIZE) /
((MARIA_UNIQUE_HASH_LENGTH + pointer)*2)));
if (tot_length_part >= (ULLONG_MAX / maria_block_size +
ULLONG_MAX % maria_block_size))
tot_length= ULLONG_MAX;
else
{
if (tot_length > ULLONG_MAX - tot_length_part * maria_block_size)
tot_length= ULLONG_MAX;
else
tot_length+= tot_length_part * maria_block_size;
}
tot_length= update_tot_length(tot_length, max_rows, MARIA_UNIQUE_HASH_LENGTH + pointer);
}
keys+=uniques; /* Each unique has 1 key */
key_segs+=uniques; /* Each unique has 1 key seg */

View File

@ -3624,7 +3624,6 @@ btr_cur_pessimistic_delete(
ulint n_reserved = 0;
ibool success;
ibool ret = FALSE;
ulint level;
mem_heap_t* heap;
ulint* offsets;
@ -3677,6 +3676,10 @@ btr_cur_pessimistic_delete(
#endif /* UNIV_ZIP_DEBUG */
}
if (flags == 0) {
lock_update_delete(block, rec);
}
if (UNIV_UNLIKELY(page_get_n_recs(page) < 2)
&& UNIV_UNLIKELY(dict_index_get_page(index)
!= buf_block_get_page_no(block))) {
@ -3691,13 +3694,7 @@ btr_cur_pessimistic_delete(
goto return_after_reservations;
}
if (flags == 0) {
lock_update_delete(block, rec);
}
level = btr_page_get_level(page, mtr);
if (level > 0
if (!page_is_leaf(page)
&& UNIV_UNLIKELY(rec == page_rec_get_next(
page_get_infimum_rec(page)))) {
@ -3720,6 +3717,7 @@ btr_cur_pessimistic_delete(
on a page, we have to change the father node pointer
so that it is equal to the new leftmost node pointer
on the page */
ulint level = btr_page_get_level(page, mtr);
btr_node_ptr_delete(index, block, mtr);

View File

@ -106,6 +106,9 @@ fil_compress_page(
int comp_level = level;
ulint header_len = FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE;
ulint write_size = 0;
#if HAVE_LZO
lzo_uint write_size_lzo = write_size;
#endif
/* Cache to avoid change during function execution */
ulint comp_method = innodb_compression_algorithm;
bool allocated = false;
@ -207,7 +210,9 @@ fil_compress_page(
#ifdef HAVE_LZO
case PAGE_LZO_ALGORITHM:
err = lzo1x_1_15_compress(
buf, len, out_buf+header_len, &write_size, out_buf+UNIV_PAGE_SIZE);
buf, len, out_buf+header_len, &write_size_lzo, out_buf+UNIV_PAGE_SIZE);
write_size = write_size_lzo;
if (err != LZO_E_OK || write_size > UNIV_PAGE_SIZE-header_len) {
if (space && !space->printed_compression_failure) {
@ -604,8 +609,11 @@ fil_decompress_page(
#ifdef HAVE_LZO
case PAGE_LZO_ALGORITHM: {
ulint olen = 0;
lzo_uint olen_lzo = olen;
err = lzo1x_decompress((const unsigned char *)buf+header_len,
actual_size,(unsigned char *)in_buf, &olen, NULL);
actual_size,(unsigned char *)in_buf, &olen_lzo, NULL);
olen = olen_lzo;
if (err != LZO_E_OK || (olen == 0 || olen > UNIV_PAGE_SIZE)) {
ib_logf(IB_LOG_LEVEL_ERROR,

View File

@ -4602,7 +4602,7 @@ innobase_commit_low(
trx_commit_for_mysql(trx);
}
#ifdef WITH_WSREP
if (wsrep_on(thd)) { thd_proc_info(thd, tmp); }
if (thd && wsrep_on(thd)) { thd_proc_info(thd, tmp); }
#endif /* WITH_WSREP */
}

View File

@ -1029,6 +1029,8 @@ std::string
lock_get_info(
const lock_t*);
#define wsrep_on_trx(trx) ((trx)->mysql_thd && wsrep_on((trx)->mysql_thd))
#ifndef UNIV_NONINL
#include "lock0lock.ic"
#endif

View File

@ -1835,7 +1835,7 @@ lock_rec_other_has_conflicting(
#ifdef WITH_WSREP
if (lock_rec_has_to_wait(TRUE, trx, mode, lock, is_supremum)) {
if (wsrep_on(trx->mysql_thd)) {
if (wsrep_on_trx(trx)) {
trx_mutex_enter(lock->trx);
wsrep_kill_victim(trx, lock);
trx_mutex_exit(lock->trx);
@ -2290,7 +2290,7 @@ lock_rec_create(
#ifdef WITH_WSREP
if (c_lock &&
wsrep_on(trx->mysql_thd) &&
wsrep_on_trx(trx) &&
wsrep_thd_is_BF(trx->mysql_thd, FALSE)) {
lock_t *hash = (lock_t *)c_lock->hash;
lock_t *prev = NULL;

View File

@ -197,7 +197,7 @@ wsrep_is_BF_lock_timeout(
/*====================*/
trx_t* trx) /* in: trx to check for lock priority */
{
if (wsrep_on(trx->mysql_thd) &&
if (wsrep_on_trx(trx) &&
wsrep_thd_is_BF(trx->mysql_thd, FALSE)) {
fprintf(stderr, "WSREP: BF lock wait long\n");
srv_print_innodb_monitor = TRUE;
@ -402,7 +402,7 @@ lock_wait_suspend_thread(
if (lock_wait_timeout < 100000000
&& wait_time > (double) lock_wait_timeout) {
#ifdef WITH_WSREP
if (!wsrep_on(trx->mysql_thd) ||
if (!wsrep_on_trx(trx) ||
(!wsrep_is_BF_lock_timeout(trx) &&
trx->error_state != DB_DEADLOCK)) {
#endif /* WITH_WSREP */

View File

@ -245,7 +245,7 @@ next:
ENCRYPTION_FLAG_DECRYPT
@param[in] offs offset to block
@param[in] space_id tablespace id
@return true if successfull, false in case of failure
@return true if successful, false in case of failure
*/
static
bool

View File

@ -182,7 +182,6 @@ struct row_log_t {
dict_table_t* table; /*!< table that is being rebuilt,
or NULL when this is a secondary
index that is being created online */
dict_index_t* index; /*!< index to be build */
bool same_pk;/*!< whether the definition of the PRIMARY KEY
has remained the same */
const dtuple_t* add_cols;
@ -385,7 +384,7 @@ row_log_online_op(
byte_offset,
index->table->space)) {
log->error = DB_DECRYPTION_FAILED;
goto err_exit;
goto write_failed;
}
srv_stats.n_rowlog_blocks_encrypted.inc();
@ -479,13 +478,15 @@ static MY_ATTRIBUTE((nonnull))
void
row_log_table_close_func(
/*=====================*/
row_log_t* log, /*!< in/out: online rebuild log */
dict_index_t* index, /*!< in/out: online rebuilt index */
#ifdef UNIV_DEBUG
const byte* b, /*!< in: end of log record */
#endif /* UNIV_DEBUG */
ulint size, /*!< in: size of log record */
ulint avail) /*!< in: available size for log record */
{
row_log_t* log = index->online_log;
ut_ad(mutex_own(&log->mutex));
if (size >= avail) {
@ -520,7 +521,7 @@ row_log_table_close_func(
srv_sort_buf_size,
log->crypt_tail,
byte_offset,
log->index->table->space)) {
index->table->space)) {
log->error = DB_DECRYPTION_FAILED;
goto err_exit;
}
@ -559,11 +560,11 @@ err_exit:
}
#ifdef UNIV_DEBUG
# define row_log_table_close(log, b, size, avail) \
row_log_table_close_func(log, b, size, avail)
# define row_log_table_close(index, b, size, avail) \
row_log_table_close_func(index, b, size, avail)
#else /* UNIV_DEBUG */
# define row_log_table_close(log, b, size, avail) \
row_log_table_close_func(log, size, avail)
row_log_table_close_func(index, size, avail)
#endif /* UNIV_DEBUG */
/******************************************************//**
@ -735,8 +736,7 @@ row_log_table_delete(
b += ext_size;
}
row_log_table_close(
index->online_log, b, mrec_size, avail_size);
row_log_table_close(index, b, mrec_size, avail_size);
}
func_exit:
@ -859,8 +859,7 @@ row_log_table_low_redundant(
b + extra_size, index, tuple->fields, tuple->n_fields);
b += size;
row_log_table_close(
index->online_log, b, mrec_size, avail_size);
row_log_table_close(index, b, mrec_size, avail_size);
}
mem_heap_free(heap);
@ -969,8 +968,7 @@ row_log_table_low(
memcpy(b, rec, rec_offs_data_size(offsets));
b += rec_offs_data_size(offsets);
row_log_table_close(
index->online_log, b, mrec_size, avail_size);
row_log_table_close(index, b, mrec_size, avail_size);
}
}
@ -2675,7 +2673,7 @@ all_done:
/* If encryption is enabled decrypt buffer after reading it
from file system. */
if (log_tmp_is_encrypted()) {
if (success && log_tmp_is_encrypted()) {
if (!log_tmp_block_decrypt(buf,
srv_sort_buf_size,
index->online_log->crypt_head,
@ -2996,7 +2994,6 @@ row_log_allocate(
log->head.total = 0;
log->path = path;
log->crypt_tail = log->crypt_head = NULL;
log->index = index;
dict_index_set_online_status(index, ONLINE_INDEX_CREATION);
index->online_log = log;
@ -3542,7 +3539,7 @@ all_done:
/* If encryption is enabled decrypt buffer after reading it
from file system. */
if (log_tmp_is_encrypted()) {
if (success && log_tmp_is_encrypted()) {
if (!log_tmp_block_decrypt(buf,
srv_sort_buf_size,
index->online_log->crypt_head,

View File

@ -887,8 +887,8 @@ row_merge_read(
success = os_file_read_no_error_handling_int_fd(fd, buf,
ofs, srv_sort_buf_size);
/* For encrypted tables, decrypt data after reading and copy data */
if (log_tmp_is_encrypted()) {
/* If encryption is enabled decrypt buffer */
if (success && log_tmp_is_encrypted()) {
if (!log_tmp_block_decrypt(buf, srv_sort_buf_size,
crypt_buf, ofs, space)) {
return (FALSE);
@ -3918,22 +3918,13 @@ row_merge_build_indexes(
DBUG_RETURN(DB_OUT_OF_MEMORY);
}
/* Get crypt data from tablespace if present. We should be protected
from concurrent DDL (e.g. drop table) by MDL-locks. */
fil_space_t* space = fil_space_acquire(new_table->space);
if (!space) {
DBUG_RETURN(DB_TABLESPACE_NOT_FOUND);
}
/* If temporal log file is encrypted allocate memory for
/* If temporary log file is encrypted allocate memory for
encryption/decryption. */
if (log_tmp_is_encrypted()) {
crypt_block = static_cast<row_merge_block_t*>(
os_mem_alloc_large(&block_size));
if (crypt_block == NULL) {
fil_space_release(space);
DBUG_RETURN(DB_OUT_OF_MEMORY);
}
}
@ -4313,9 +4304,5 @@ func_exit:
}
}
if (space) {
fil_space_release(space);
}
DBUG_RETURN(error);
}

View File

@ -1989,7 +1989,7 @@ row_upd_sec_index_entry(
}
#ifdef WITH_WSREP
if (err == DB_SUCCESS && !referenced &&
wsrep_on(trx->mysql_thd) &&
wsrep_on_trx(trx) &&
!wsrep_thd_is_BF(trx->mysql_thd, FALSE) &&
!(parent && que_node_get_type(parent) ==
QUE_NODE_UPDATE &&
@ -2279,7 +2279,7 @@ err_exit:
}
}
#ifdef WITH_WSREP
if (!referenced && wsrep_on(trx->mysql_thd) &&
if (!referenced && wsrep_on_trx(trx) &&
!(parent && que_node_get_type(parent) == QUE_NODE_UPDATE &&
((upd_node_t*)parent)->cascade_node == node) &&
foreign
@ -2548,8 +2548,7 @@ row_upd_del_mark_clust_rec(
}
#ifdef WITH_WSREP
trx_t* trx = thr_get_trx(thr) ;
if (err == DB_SUCCESS && !referenced && trx && wsrep_on(trx->mysql_thd) &&
if (err == DB_SUCCESS && !referenced && wsrep_on_trx(trx) &&
!(parent && que_node_get_type(parent) == QUE_NODE_UPDATE &&
((upd_node_t*)parent)->cascade_node == node) &&
foreign