Improve update handler (long unique keys on blobs)
MDEV-21606 Improve update handler (long unique keys on blobs) MDEV-21470 MyISAM and Aria start_bulk_insert doesn't work with long unique MDEV-21606 Bug fix for previous version of this code MDEV-21819 2 Assertion `inited == NONE || update_handler != this' - Move update_handler from TABLE to handler - Move out initialization of update handler from ha_write_row() to prepare_for_insert() - Fixed that INSERT DELAYED works with update handler - Give an error if using long unique with an autoincrement column - Added handler function to check if table has long unique hash indexes - Disable write cache in MyISAM and Aria when using update_handler as if cache is used, the row will not be inserted until end of statement and update_handler would not find conflicting rows. - Removed not used handler argument from check_duplicate_long_entries_update() - Syntax cleanups - Indentation fixes - Don't use single character indentifiers for arguments
This commit is contained in:
parent
736998cb75
commit
4ef437558a
@ -1477,4 +1477,28 @@ id select_type table type possible_keys key key_len ref rows Extra
|
|||||||
SELECT t2.b FROM t1 JOIN t2 ON t1.d = t2.f WHERE t2.pk >= 20;
|
SELECT t2.b FROM t1 JOIN t2 ON t1.d = t2.f WHERE t2.pk >= 20;
|
||||||
b
|
b
|
||||||
drop table t1,t2;
|
drop table t1,t2;
|
||||||
|
#
|
||||||
|
# MDEV-21470 MyISAM start_bulk_insert doesn't work with long unique
|
||||||
|
#
|
||||||
|
CREATE TABLE t1 (a INT, b BLOB) ENGINE=MyISAM;
|
||||||
|
INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
|
||||||
|
CREATE TABLE t2 (c BIT, d BLOB, UNIQUE(d)) ENGINE=MyISAM;
|
||||||
|
INSERT INTO t2 SELECT * FROM t1;
|
||||||
|
Warnings:
|
||||||
|
Warning 1264 Out of range value for column 'c' at row 2
|
||||||
|
DROP TABLE t1, t2;
|
||||||
|
#
|
||||||
|
# MDEV-19338 Using AUTO_INCREMENT with long unique
|
||||||
|
#
|
||||||
|
CREATE TABLE t1 (pk INT, a TEXT NOT NULL DEFAULT '', PRIMARY KEY (pk), b INT AUTO_INCREMENT, UNIQUE(b), UNIQUE (a,b)) ENGINE=myisam;
|
||||||
|
ERROR HY000: AUTO_INCREMENT column `b` cannot be used in the UNIQUE index `a`
|
||||||
|
#
|
||||||
|
# MDEV-21819 Assertion `inited == NONE || update_handler != this'
|
||||||
|
# failed in handler::ha_write_row
|
||||||
|
#
|
||||||
|
CREATE OR REPLACE TABLE t1 (a INT, b BLOB, s DATE, e DATE, PERIOD FOR app(s,e), UNIQUE(b)) ENGINE=MyISAM PARTITION BY HASH(a) PARTITIONS 2;
|
||||||
|
INSERT INTO t1 VALUES (1,'foo','2022-01-01', '2025-01-01');
|
||||||
|
DELETE FROM t1 FOR PORTION OF app FROM '2023-01-01' TO '2024-01-01';
|
||||||
|
ERROR 23000: Duplicate entry 'foo' for key 'b'
|
||||||
|
DROP TABLE t1;
|
||||||
set @@GLOBAL.max_allowed_packet= @allowed_packet;
|
set @@GLOBAL.max_allowed_packet= @allowed_packet;
|
||||||
|
@ -556,4 +556,33 @@ SELECT t2.b FROM t1 JOIN t2 ON t1.d = t2.f WHERE t2.pk >= 20;
|
|||||||
SELECT t2.b FROM t1 JOIN t2 ON t1.d = t2.f WHERE t2.pk >= 20;
|
SELECT t2.b FROM t1 JOIN t2 ON t1.d = t2.f WHERE t2.pk >= 20;
|
||||||
drop table t1,t2;
|
drop table t1,t2;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-21470 MyISAM start_bulk_insert doesn't work with long unique
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
CREATE TABLE t1 (a INT, b BLOB) ENGINE=MyISAM;
|
||||||
|
INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
|
||||||
|
CREATE TABLE t2 (c BIT, d BLOB, UNIQUE(d)) ENGINE=MyISAM;
|
||||||
|
INSERT INTO t2 SELECT * FROM t1;
|
||||||
|
DROP TABLE t1, t2;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-19338 Using AUTO_INCREMENT with long unique
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
--error ER_NO_AUTOINCREMENT_WITH_UNIQUE
|
||||||
|
CREATE TABLE t1 (pk INT, a TEXT NOT NULL DEFAULT '', PRIMARY KEY (pk), b INT AUTO_INCREMENT, UNIQUE(b), UNIQUE (a,b)) ENGINE=myisam;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-21819 Assertion `inited == NONE || update_handler != this'
|
||||||
|
--echo # failed in handler::ha_write_row
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
CREATE OR REPLACE TABLE t1 (a INT, b BLOB, s DATE, e DATE, PERIOD FOR app(s,e), UNIQUE(b)) ENGINE=MyISAM PARTITION BY HASH(a) PARTITIONS 2;
|
||||||
|
INSERT INTO t1 VALUES (1,'foo','2022-01-01', '2025-01-01');
|
||||||
|
--error ER_DUP_ENTRY
|
||||||
|
DELETE FROM t1 FOR PORTION OF app FROM '2023-01-01' TO '2024-01-01';
|
||||||
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
set @@GLOBAL.max_allowed_packet= @allowed_packet;
|
set @@GLOBAL.max_allowed_packet= @allowed_packet;
|
||||||
|
19
mysql-test/main/long_unique_delayed.result
Normal file
19
mysql-test/main/long_unique_delayed.result
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
#
|
||||||
|
# Test insert delayed with long unique keys
|
||||||
|
#
|
||||||
|
create table t1(a blob unique) engine=myisam;
|
||||||
|
insert delayed into t1 values(1),(2),(3),(56),('sachin'),('maria'),(123456789034567891),(null),(null),(123456789034567890),('maria');
|
||||||
|
insert delayed into t1 values(1),(9);
|
||||||
|
flush tables t1;
|
||||||
|
select count(*) from t1;
|
||||||
|
count(*)
|
||||||
|
11
|
||||||
|
drop table t1;
|
||||||
|
#
|
||||||
|
# MDEV-19848
|
||||||
|
# Server crashes in check_vcol_forward_refs upon INSERT DELAYED into
|
||||||
|
# table with long blob key
|
||||||
|
#
|
||||||
|
CREATE TABLE t1 (a BLOB, UNIQUE(a)) ENGINE=MyISAM;
|
||||||
|
INSERT DELAYED t1 () VALUES ();
|
||||||
|
DROP TABLE t1;
|
22
mysql-test/main/long_unique_delayed.test
Normal file
22
mysql-test/main/long_unique_delayed.test
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
--source include/not_embedded.inc
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # Test insert delayed with long unique keys
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
create table t1(a blob unique) engine=myisam;
|
||||||
|
insert delayed into t1 values(1),(2),(3),(56),('sachin'),('maria'),(123456789034567891),(null),(null),(123456789034567890),('maria');
|
||||||
|
insert delayed into t1 values(1),(9);
|
||||||
|
flush tables t1;
|
||||||
|
select count(*) from t1;
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-19848
|
||||||
|
--echo # Server crashes in check_vcol_forward_refs upon INSERT DELAYED into
|
||||||
|
--echo # table with long blob key
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
CREATE TABLE t1 (a BLOB, UNIQUE(a)) ENGINE=MyISAM;
|
||||||
|
INSERT DELAYED t1 () VALUES ();
|
||||||
|
DROP TABLE t1;
|
8
mysql-test/suite/versioning/r/long_unique.result
Normal file
8
mysql-test/suite/versioning/r/long_unique.result
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
#
|
||||||
|
# Assertion `inited == NONE || update_handler != this' failed in
|
||||||
|
# handler::ha_write_row
|
||||||
|
#
|
||||||
|
CREATE TABLE t1 (f VARCHAR(4096), s DATE, e DATE, PERIOD FOR app(s,e), UNIQUE(f)) ENGINE=MyISAM;
|
||||||
|
INSERT INTO t1 VALUES ('foo', '2023-08-30', '2025-07-09'),('bar', '2021-01-01', '2021-12-31');
|
||||||
|
DELETE FROM t1 FOR PORTION OF app FROM '2023-08-29' TO '2025-07-01';
|
||||||
|
DROP TABLE t1;
|
9
mysql-test/suite/versioning/t/long_unique.test
Normal file
9
mysql-test/suite/versioning/t/long_unique.test
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
--echo #
|
||||||
|
--echo # Assertion `inited == NONE || update_handler != this' failed in
|
||||||
|
--echo # handler::ha_write_row
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
CREATE TABLE t1 (f VARCHAR(4096), s DATE, e DATE, PERIOD FOR app(s,e), UNIQUE(f)) ENGINE=MyISAM;
|
||||||
|
INSERT INTO t1 VALUES ('foo', '2023-08-30', '2025-07-09'),('bar', '2021-01-01', '2021-12-31');
|
||||||
|
DELETE FROM t1 FOR PORTION OF app FROM '2023-08-29' TO '2025-07-01';
|
||||||
|
DROP TABLE t1;
|
@ -4344,7 +4344,6 @@ int ha_partition::write_row(const uchar * buf)
|
|||||||
thd->variables.sql_mode|= MODE_NO_AUTO_VALUE_ON_ZERO;
|
thd->variables.sql_mode|= MODE_NO_AUTO_VALUE_ON_ZERO;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
old_map= dbug_tmp_use_all_columns(table, table->read_set);
|
old_map= dbug_tmp_use_all_columns(table, table->read_set);
|
||||||
error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value);
|
error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value);
|
||||||
dbug_tmp_restore_column_map(table->read_set, old_map);
|
dbug_tmp_restore_column_map(table->read_set, old_map);
|
||||||
@ -4362,6 +4361,17 @@ int ha_partition::write_row(const uchar * buf)
|
|||||||
}
|
}
|
||||||
m_last_part= part_id;
|
m_last_part= part_id;
|
||||||
DBUG_PRINT("info", ("Insert in partition %u", part_id));
|
DBUG_PRINT("info", ("Insert in partition %u", part_id));
|
||||||
|
/*
|
||||||
|
We have to call prepare_for_insert() if we have an update handler
|
||||||
|
in the underlying table (to clone the handler). This is because for
|
||||||
|
INSERT's prepare_for_insert() is only called for the main table,
|
||||||
|
not for all partitions. This is to reduce the huge overhead of cloning
|
||||||
|
a possible not needed handler if there are many partitions.
|
||||||
|
*/
|
||||||
|
if (table->s->long_unique_table &&
|
||||||
|
m_file[part_id]->update_handler == m_file[part_id] && inited == RND)
|
||||||
|
m_file[part_id]->prepare_for_insert(0);
|
||||||
|
|
||||||
start_part_bulk_insert(thd, part_id);
|
start_part_bulk_insert(thd, part_id);
|
||||||
|
|
||||||
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
|
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
|
||||||
|
132
sql/handler.cc
132
sql/handler.cc
@ -2753,6 +2753,7 @@ handler *handler::clone(const char *name, MEM_ROOT *mem_root)
|
|||||||
HA_OPEN_IGNORE_IF_LOCKED, mem_root))
|
HA_OPEN_IGNORE_IF_LOCKED, mem_root))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
new_handler->update_handler= new_handler;
|
||||||
return new_handler;
|
return new_handler;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
@ -2760,6 +2761,40 @@ err:
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
Creates a clone of handler used in update for unique hash key.
|
||||||
|
*/
|
||||||
|
|
||||||
|
bool handler::clone_handler_for_update()
|
||||||
|
{
|
||||||
|
handler *tmp;
|
||||||
|
DBUG_ASSERT(table->s->long_unique_table);
|
||||||
|
|
||||||
|
if (update_handler != this)
|
||||||
|
return 0; // Already done
|
||||||
|
if (!(tmp= clone(table->s->normalized_path.str, table->in_use->mem_root)))
|
||||||
|
return 1;
|
||||||
|
update_handler= tmp;
|
||||||
|
/* The update handler is only used to check if a row exists */
|
||||||
|
update_handler->ha_external_lock(table->in_use, F_RDLCK);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
Delete update handler object if it exists
|
||||||
|
*/
|
||||||
|
void handler::delete_update_handler()
|
||||||
|
{
|
||||||
|
if (update_handler != this)
|
||||||
|
{
|
||||||
|
update_handler->ha_external_lock(table->in_use, F_UNLCK);
|
||||||
|
update_handler->ha_close();
|
||||||
|
delete update_handler;
|
||||||
|
}
|
||||||
|
update_handler= this;
|
||||||
|
}
|
||||||
|
|
||||||
LEX_CSTRING *handler::engine_name()
|
LEX_CSTRING *handler::engine_name()
|
||||||
{
|
{
|
||||||
return hton_name(ht);
|
return hton_name(ht);
|
||||||
@ -2917,7 +2952,7 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
|
|||||||
}
|
}
|
||||||
reset_statistics();
|
reset_statistics();
|
||||||
internal_tmp_table= MY_TEST(test_if_locked & HA_OPEN_INTERNAL_TABLE);
|
internal_tmp_table= MY_TEST(test_if_locked & HA_OPEN_INTERNAL_TABLE);
|
||||||
|
update_handler= this;
|
||||||
DBUG_RETURN(error);
|
DBUG_RETURN(error);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -6622,6 +6657,8 @@ int handler::ha_reset()
|
|||||||
DBUG_ASSERT(inited == NONE);
|
DBUG_ASSERT(inited == NONE);
|
||||||
/* reset the bitmaps to point to defaults */
|
/* reset the bitmaps to point to defaults */
|
||||||
table->default_column_bitmaps();
|
table->default_column_bitmaps();
|
||||||
|
if (update_handler != this)
|
||||||
|
delete_update_handler();
|
||||||
pushed_cond= NULL;
|
pushed_cond= NULL;
|
||||||
tracker= NULL;
|
tracker= NULL;
|
||||||
mark_trx_read_write_done= 0;
|
mark_trx_read_write_done= 0;
|
||||||
@ -6656,7 +6693,12 @@ static int wsrep_after_row(THD *thd)
|
|||||||
}
|
}
|
||||||
#endif /* WITH_WSREP */
|
#endif /* WITH_WSREP */
|
||||||
|
|
||||||
static int check_duplicate_long_entry_key(TABLE *table, handler *h,
|
|
||||||
|
/**
|
||||||
|
Check if there is a conflicting unique hash key
|
||||||
|
*/
|
||||||
|
|
||||||
|
static int check_duplicate_long_entry_key(TABLE *table, handler *handler,
|
||||||
const uchar *new_rec, uint key_no)
|
const uchar *new_rec, uint key_no)
|
||||||
{
|
{
|
||||||
Field *hash_field;
|
Field *hash_field;
|
||||||
@ -6664,13 +6706,14 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *h,
|
|||||||
KEY *key_info= table->key_info + key_no;
|
KEY *key_info= table->key_info + key_no;
|
||||||
hash_field= key_info->key_part->field;
|
hash_field= key_info->key_part->field;
|
||||||
uchar ptr[HA_HASH_KEY_LENGTH_WITH_NULL];
|
uchar ptr[HA_HASH_KEY_LENGTH_WITH_NULL];
|
||||||
|
DBUG_ENTER("check_duplicate_long_entry_key");
|
||||||
|
|
||||||
DBUG_ASSERT((key_info->flags & HA_NULL_PART_KEY &&
|
DBUG_ASSERT((key_info->flags & HA_NULL_PART_KEY &&
|
||||||
key_info->key_length == HA_HASH_KEY_LENGTH_WITH_NULL)
|
key_info->key_length == HA_HASH_KEY_LENGTH_WITH_NULL) ||
|
||||||
|| key_info->key_length == HA_HASH_KEY_LENGTH_WITHOUT_NULL);
|
key_info->key_length == HA_HASH_KEY_LENGTH_WITHOUT_NULL);
|
||||||
|
|
||||||
if (hash_field->is_real_null())
|
if (hash_field->is_real_null())
|
||||||
return 0;
|
DBUG_RETURN(0);
|
||||||
|
|
||||||
key_copy(ptr, new_rec, key_info, key_info->key_length, false);
|
key_copy(ptr, new_rec, key_info, key_info->key_length, false);
|
||||||
|
|
||||||
@ -6678,11 +6721,11 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *h,
|
|||||||
table->check_unique_buf= (uchar *)alloc_root(&table->mem_root,
|
table->check_unique_buf= (uchar *)alloc_root(&table->mem_root,
|
||||||
table->s->reclength);
|
table->s->reclength);
|
||||||
|
|
||||||
result= h->ha_index_init(key_no, 0);
|
result= handler->ha_index_init(key_no, 0);
|
||||||
if (result)
|
if (result)
|
||||||
return result;
|
DBUG_RETURN(result);
|
||||||
store_record(table, check_unique_buf);
|
store_record(table, check_unique_buf);
|
||||||
result= h->ha_index_read_map(table->record[0],
|
result= handler->ha_index_read_map(table->record[0],
|
||||||
ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT);
|
ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT);
|
||||||
if (!result)
|
if (!result)
|
||||||
{
|
{
|
||||||
@ -6718,7 +6761,7 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *h,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
while (!is_same && !(result= h->ha_index_next_same(table->record[0],
|
while (!is_same && !(result= handler->ha_index_next_same(table->record[0],
|
||||||
ptr, key_info->key_length)));
|
ptr, key_info->key_length)));
|
||||||
if (is_same)
|
if (is_same)
|
||||||
error= HA_ERR_FOUND_DUPP_KEY;
|
error= HA_ERR_FOUND_DUPP_KEY;
|
||||||
@ -6730,15 +6773,15 @@ exit:
|
|||||||
if (error == HA_ERR_FOUND_DUPP_KEY)
|
if (error == HA_ERR_FOUND_DUPP_KEY)
|
||||||
{
|
{
|
||||||
table->file->errkey= key_no;
|
table->file->errkey= key_no;
|
||||||
if (h->ha_table_flags() & HA_DUPLICATE_POS)
|
if (handler->ha_table_flags() & HA_DUPLICATE_POS)
|
||||||
{
|
{
|
||||||
h->position(table->record[0]);
|
handler->position(table->record[0]);
|
||||||
memcpy(table->file->dup_ref, h->ref, h->ref_length);
|
memcpy(table->file->dup_ref, handler->ref, handler->ref_length);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
restore_record(table, check_unique_buf);
|
restore_record(table, check_unique_buf);
|
||||||
h->ha_index_end();
|
handler->ha_index_end();
|
||||||
return error;
|
DBUG_RETURN(error);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @brief
|
/** @brief
|
||||||
@ -6746,20 +6789,22 @@ exit:
|
|||||||
unique constraint on long columns.
|
unique constraint on long columns.
|
||||||
@returns 0 if no duplicate else returns error
|
@returns 0 if no duplicate else returns error
|
||||||
*/
|
*/
|
||||||
static int check_duplicate_long_entries(TABLE *table, handler *h,
|
|
||||||
|
static int check_duplicate_long_entries(TABLE *table, handler *handler,
|
||||||
const uchar *new_rec)
|
const uchar *new_rec)
|
||||||
{
|
{
|
||||||
table->file->errkey= -1;
|
table->file->errkey= -1;
|
||||||
int result;
|
|
||||||
for (uint i= 0; i < table->s->keys; i++)
|
for (uint i= 0; i < table->s->keys; i++)
|
||||||
{
|
{
|
||||||
|
int result;
|
||||||
if (table->key_info[i].algorithm == HA_KEY_ALG_LONG_HASH &&
|
if (table->key_info[i].algorithm == HA_KEY_ALG_LONG_HASH &&
|
||||||
(result= check_duplicate_long_entry_key(table, h, new_rec, i)))
|
(result= check_duplicate_long_entry_key(table, handler, new_rec, i)))
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/** @brief
|
/** @brief
|
||||||
check whether updated records breaks the
|
check whether updated records breaks the
|
||||||
unique constraint on long columns.
|
unique constraint on long columns.
|
||||||
@ -6774,11 +6819,11 @@ static int check_duplicate_long_entries(TABLE *table, handler *h,
|
|||||||
key as a parameter in normal insert key should be -1
|
key as a parameter in normal insert key should be -1
|
||||||
@returns 0 if no duplicate else returns error
|
@returns 0 if no duplicate else returns error
|
||||||
*/
|
*/
|
||||||
static int check_duplicate_long_entries_update(TABLE *table, handler *h, uchar *new_rec)
|
|
||||||
|
static int check_duplicate_long_entries_update(TABLE *table, uchar *new_rec)
|
||||||
{
|
{
|
||||||
Field *field;
|
Field *field;
|
||||||
uint key_parts;
|
uint key_parts;
|
||||||
int error= 0;
|
|
||||||
KEY *keyinfo;
|
KEY *keyinfo;
|
||||||
KEY_PART_INFO *keypart;
|
KEY_PART_INFO *keypart;
|
||||||
/*
|
/*
|
||||||
@ -6786,7 +6831,7 @@ static int check_duplicate_long_entries_update(TABLE *table, handler *h, uchar *
|
|||||||
with respect to fields in hash_str
|
with respect to fields in hash_str
|
||||||
*/
|
*/
|
||||||
uint reclength= (uint) (table->record[1] - table->record[0]);
|
uint reclength= (uint) (table->record[1] - table->record[0]);
|
||||||
table->clone_handler_for_update();
|
table->file->clone_handler_for_update();
|
||||||
for (uint i= 0; i < table->s->keys; i++)
|
for (uint i= 0; i < table->s->keys; i++)
|
||||||
{
|
{
|
||||||
keyinfo= table->key_info + i;
|
keyinfo= table->key_info + i;
|
||||||
@ -6796,13 +6841,15 @@ static int check_duplicate_long_entries_update(TABLE *table, handler *h, uchar *
|
|||||||
keypart= keyinfo->key_part - key_parts;
|
keypart= keyinfo->key_part - key_parts;
|
||||||
for (uint j= 0; j < key_parts; j++, keypart++)
|
for (uint j= 0; j < key_parts; j++, keypart++)
|
||||||
{
|
{
|
||||||
|
int error;
|
||||||
field= keypart->field;
|
field= keypart->field;
|
||||||
/* Compare fields if they are different then check for duplicates */
|
/* Compare fields if they are different then check for duplicates */
|
||||||
if (field->cmp_binary_offset(reclength))
|
if (field->cmp_binary_offset(reclength))
|
||||||
{
|
{
|
||||||
if((error= check_duplicate_long_entry_key(table, table->update_handler,
|
if ((error= (check_duplicate_long_entry_key(table,
|
||||||
new_rec, i)))
|
table->file->update_handler,
|
||||||
goto exit;
|
new_rec, i))))
|
||||||
|
return error;
|
||||||
/*
|
/*
|
||||||
break because check_duplicate_long_entries_key will
|
break because check_duplicate_long_entries_key will
|
||||||
take care of remaining fields
|
take care of remaining fields
|
||||||
@ -6812,10 +6859,35 @@ static int check_duplicate_long_entries_update(TABLE *table, handler *h, uchar *
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
exit:
|
return 0;
|
||||||
return error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
Do all initialization needed for insert
|
||||||
|
|
||||||
|
@param force_update_handler Set to TRUE if we should always create an
|
||||||
|
update handler. Needed if we don't know if we
|
||||||
|
are going to do inserts while a scan is in
|
||||||
|
progress.
|
||||||
|
*/
|
||||||
|
|
||||||
|
int handler::prepare_for_insert(bool force_update_handler)
|
||||||
|
{
|
||||||
|
/* Preparation for unique of blob's */
|
||||||
|
if (table->s->long_unique_table && (inited == RND || force_update_handler))
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
When doing a scan we can't use the same handler to check
|
||||||
|
duplicate rows. Create a new temporary one
|
||||||
|
*/
|
||||||
|
if (clone_handler_for_update())
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
int handler::ha_write_row(const uchar *buf)
|
int handler::ha_write_row(const uchar *buf)
|
||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
@ -6831,10 +6903,8 @@ int handler::ha_write_row(const uchar *buf)
|
|||||||
|
|
||||||
if (table->s->long_unique_table && this == table->file)
|
if (table->s->long_unique_table && this == table->file)
|
||||||
{
|
{
|
||||||
if (inited == RND)
|
DBUG_ASSERT(inited == NONE || update_handler != this);
|
||||||
table->clone_handler_for_update();
|
if ((error= check_duplicate_long_entries(table, update_handler, buf)))
|
||||||
handler *h= table->update_handler ? table->update_handler : table->file;
|
|
||||||
if ((error= check_duplicate_long_entries(table, h, buf)))
|
|
||||||
DBUG_RETURN(error);
|
DBUG_RETURN(error);
|
||||||
}
|
}
|
||||||
TABLE_IO_WAIT(tracker, PSI_TABLE_WRITE_ROW, MAX_KEY, error,
|
TABLE_IO_WAIT(tracker, PSI_TABLE_WRITE_ROW, MAX_KEY, error,
|
||||||
@ -6877,10 +6947,8 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data)
|
|||||||
mark_trx_read_write();
|
mark_trx_read_write();
|
||||||
increment_statistics(&SSV::ha_update_count);
|
increment_statistics(&SSV::ha_update_count);
|
||||||
if (table->s->long_unique_table &&
|
if (table->s->long_unique_table &&
|
||||||
(error= check_duplicate_long_entries_update(table, table->file, (uchar *)new_data)))
|
(error= check_duplicate_long_entries_update(table, (uchar*) new_data)))
|
||||||
{
|
|
||||||
return error;
|
return error;
|
||||||
}
|
|
||||||
|
|
||||||
TABLE_IO_WAIT(tracker, PSI_TABLE_UPDATE_ROW, active_index, error,
|
TABLE_IO_WAIT(tracker, PSI_TABLE_UPDATE_ROW, active_index, error,
|
||||||
{ error= update_row(old_data, new_data);})
|
{ error= update_row(old_data, new_data);})
|
||||||
|
@ -3065,6 +3065,7 @@ public:
|
|||||||
/** Length of ref (1-8 or the clustered key length) */
|
/** Length of ref (1-8 or the clustered key length) */
|
||||||
uint ref_length;
|
uint ref_length;
|
||||||
FT_INFO *ft_handler;
|
FT_INFO *ft_handler;
|
||||||
|
handler *update_handler; /* Handler used in case of update */
|
||||||
enum init_stat { NONE=0, INDEX, RND };
|
enum init_stat { NONE=0, INDEX, RND };
|
||||||
init_stat inited, pre_inited;
|
init_stat inited, pre_inited;
|
||||||
|
|
||||||
@ -3254,6 +3255,8 @@ public:
|
|||||||
DBUG_ASSERT(inited == NONE);
|
DBUG_ASSERT(inited == NONE);
|
||||||
}
|
}
|
||||||
virtual handler *clone(const char *name, MEM_ROOT *mem_root);
|
virtual handler *clone(const char *name, MEM_ROOT *mem_root);
|
||||||
|
bool clone_handler_for_update();
|
||||||
|
void delete_update_handler();
|
||||||
/** This is called after create to allow us to set up cached variables */
|
/** This is called after create to allow us to set up cached variables */
|
||||||
void init()
|
void init()
|
||||||
{
|
{
|
||||||
@ -4596,6 +4599,7 @@ protected:
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
bool check_table_binlog_row_based(bool binlog_row);
|
bool check_table_binlog_row_based(bool binlog_row);
|
||||||
|
int prepare_for_insert(bool force_update_handler= 0);
|
||||||
|
|
||||||
inline void clear_cached_table_binlog_row_based_flag()
|
inline void clear_cached_table_binlog_row_based_flag()
|
||||||
{
|
{
|
||||||
@ -4931,6 +4935,8 @@ public:
|
|||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
/* If the table is using sql level unique constraints on some column */
|
||||||
|
inline bool has_long_unique();
|
||||||
|
|
||||||
/* Used for ALTER TABLE.
|
/* Used for ALTER TABLE.
|
||||||
Some engines can handle some differences in indexes by themself. */
|
Some engines can handle some differences in indexes by themself. */
|
||||||
|
@ -7957,3 +7957,5 @@ ER_SLAVE_IGNORED_SHARED_TABLE
|
|||||||
por "Slave SQL thread ignorado a consulta devido '%s'"
|
por "Slave SQL thread ignorado a consulta devido '%s'"
|
||||||
spa "Slave SQL thread ignorado el query '%s'"
|
spa "Slave SQL thread ignorado el query '%s'"
|
||||||
swe "Slav SQL tråden ignorerade '%s' pga tabellen är delad"
|
swe "Slav SQL tråden ignorerade '%s' pga tabellen är delad"
|
||||||
|
ER_NO_AUTOINCREMENT_WITH_UNIQUE
|
||||||
|
eng "AUTO_INCREMENT column %`s cannot be used in the UNIQUE index %`s"
|
||||||
|
@ -889,9 +889,6 @@ void close_thread_tables(THD *thd)
|
|||||||
|
|
||||||
for (table= thd->open_tables; table; table= table->next)
|
for (table= thd->open_tables; table; table= table->next)
|
||||||
{
|
{
|
||||||
if (table->update_handler)
|
|
||||||
table->delete_update_handler();
|
|
||||||
|
|
||||||
/* Table might be in use by some outer statement. */
|
/* Table might be in use by some outer statement. */
|
||||||
DBUG_PRINT("tcache", ("table: '%s' query_id: %lu",
|
DBUG_PRINT("tcache", ("table: '%s' query_id: %lu",
|
||||||
table->s->table_name.str, (ulong) table->query_id));
|
table->s->table_name.str, (ulong) table->query_id));
|
||||||
@ -8725,7 +8722,7 @@ fill_record(THD *thd, TABLE *table, Field **ptr, List<Item> &values,
|
|||||||
|
|
||||||
if (unlikely(field->invisible))
|
if (unlikely(field->invisible))
|
||||||
continue;
|
continue;
|
||||||
else
|
|
||||||
value=v++;
|
value=v++;
|
||||||
|
|
||||||
bool vers_sys_field= table->versioned() && field->vers_sys_field();
|
bool vers_sys_field= table->versioned() && field->vers_sys_field();
|
||||||
|
@ -6964,6 +6964,11 @@ inline int handler::ha_update_tmp_row(const uchar *old_data, uchar *new_data)
|
|||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline bool handler::has_long_unique()
|
||||||
|
{
|
||||||
|
return table->s->long_unique_table;
|
||||||
|
}
|
||||||
|
|
||||||
extern pthread_attr_t *get_connection_attrib(void);
|
extern pthread_attr_t *get_connection_attrib(void);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -751,6 +751,10 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||||||
&& !table->versioned()
|
&& !table->versioned()
|
||||||
&& table->file->has_transactions();
|
&& table->file->has_transactions();
|
||||||
|
|
||||||
|
if (table->versioned(VERS_TIMESTAMP) ||
|
||||||
|
(table_list->has_period() && !portion_of_time_through_update))
|
||||||
|
table->file->prepare_for_insert(1);
|
||||||
|
|
||||||
THD_STAGE_INFO(thd, stage_updating);
|
THD_STAGE_INFO(thd, stage_updating);
|
||||||
while (likely(!(error=info.read_record())) && likely(!thd->killed) &&
|
while (likely(!(error=info.read_record())) && likely(!thd->killed) &&
|
||||||
likely(!thd->is_error()))
|
likely(!thd->is_error()))
|
||||||
@ -1237,6 +1241,9 @@ multi_delete::initialize_tables(JOIN *join)
|
|||||||
normal_tables= 1;
|
normal_tables= 1;
|
||||||
tbl->prepare_triggers_for_delete_stmt_or_event();
|
tbl->prepare_triggers_for_delete_stmt_or_event();
|
||||||
tbl->prepare_for_position();
|
tbl->prepare_for_position();
|
||||||
|
|
||||||
|
if (tbl->versioned(VERS_TIMESTAMP))
|
||||||
|
tbl->file->prepare_for_insert(1);
|
||||||
}
|
}
|
||||||
else if ((tab->type != JT_SYSTEM && tab->type != JT_CONST) &&
|
else if ((tab->type != JT_SYSTEM && tab->type != JT_CONST) &&
|
||||||
walk == delete_tables)
|
walk == delete_tables)
|
||||||
|
@ -882,9 +882,12 @@ bool mysql_insert(THD *thd, TABLE_LIST *table_list,
|
|||||||
if (duplic != DUP_ERROR || ignore)
|
if (duplic != DUP_ERROR || ignore)
|
||||||
{
|
{
|
||||||
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
|
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
|
||||||
if (table->file->ha_table_flags() & HA_DUPLICATE_POS &&
|
if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
|
||||||
table->file->ha_rnd_init_with_error(0))
|
{
|
||||||
|
if (table->file->ha_rnd_init_with_error(0))
|
||||||
goto abort;
|
goto abort;
|
||||||
|
table->file->prepare_for_insert();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
This is a simple check for the case when the table has a trigger
|
This is a simple check for the case when the table has a trigger
|
||||||
@ -2541,6 +2544,11 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
|
|||||||
uchar *bitmap;
|
uchar *bitmap;
|
||||||
char *copy_tmp;
|
char *copy_tmp;
|
||||||
uint bitmaps_used;
|
uint bitmaps_used;
|
||||||
|
KEY_PART_INFO *key_part, *end_part;
|
||||||
|
Field **default_fields, **virtual_fields;
|
||||||
|
KEY *keys;
|
||||||
|
KEY_PART_INFO *key_parts;
|
||||||
|
uchar *record;
|
||||||
DBUG_ENTER("Delayed_insert::get_local_table");
|
DBUG_ENTER("Delayed_insert::get_local_table");
|
||||||
|
|
||||||
/* First request insert thread to get a lock */
|
/* First request insert thread to get a lock */
|
||||||
@ -2587,18 +2595,32 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
|
|||||||
share= table->s;
|
share= table->s;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Allocate memory for the TABLE object, the field pointers array, and
|
Allocate memory for the TABLE object, the field pointers array,
|
||||||
one record buffer of reclength size. Normally a table has three
|
and one record buffer of reclength size.
|
||||||
record buffers of rec_buff_length size, which includes alignment
|
Normally a table has three record buffers of rec_buff_length size,
|
||||||
bytes. Since the table copy is used for creating one record only,
|
which includes alignment bytes. Since the table copy is used for
|
||||||
the other record buffers and alignment are unnecessary.
|
creating one record only, the other record buffers and alignment
|
||||||
|
are unnecessary.
|
||||||
|
As the table will also need to calculate default values and
|
||||||
|
expresions, we have to allocate own version of fields. keys and key
|
||||||
|
parts. The key and key parts are needed as parse_vcol_defs() changes
|
||||||
|
them in case of long hash keys.
|
||||||
*/
|
*/
|
||||||
THD_STAGE_INFO(client_thd, stage_allocating_local_table);
|
THD_STAGE_INFO(client_thd, stage_allocating_local_table);
|
||||||
copy_tmp= (char*) client_thd->alloc(sizeof(*copy)+
|
if (!multi_alloc_root(client_thd->mem_root,
|
||||||
(share->fields+1)*sizeof(Field**)+
|
©_tmp, sizeof(*table),
|
||||||
share->reclength +
|
&field, (uint) (share->fields+1)*sizeof(Field**),
|
||||||
share->column_bitmap_size*4);
|
&default_fields,
|
||||||
if (!copy_tmp)
|
(share->default_fields +
|
||||||
|
share->default_expressions + 1) * sizeof(Field*),
|
||||||
|
&virtual_fields,
|
||||||
|
(share->virtual_fields + 1) * sizeof(Field*),
|
||||||
|
&keys, share->keys * sizeof(KEY),
|
||||||
|
&key_parts,
|
||||||
|
share->ext_key_parts * sizeof(KEY_PART_INFO),
|
||||||
|
&record, (uint) share->reclength,
|
||||||
|
&bitmap, (uint) share->column_bitmap_size*4,
|
||||||
|
NullS))
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
/* Copy the TABLE object. */
|
/* Copy the TABLE object. */
|
||||||
@ -2607,27 +2629,21 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
|
|||||||
|
|
||||||
/* We don't need to change the file handler here */
|
/* We don't need to change the file handler here */
|
||||||
/* Assign the pointers for the field pointers array and the record. */
|
/* Assign the pointers for the field pointers array and the record. */
|
||||||
field= copy->field= (Field**) (copy + 1);
|
copy->field= field;
|
||||||
bitmap= (uchar*) (field + share->fields + 1);
|
copy->record[0]= record;
|
||||||
copy->record[0]= (bitmap + share->column_bitmap_size*4);
|
|
||||||
memcpy((char*) copy->record[0], (char*) table->record[0], share->reclength);
|
memcpy((char*) copy->record[0], (char*) table->record[0], share->reclength);
|
||||||
if (share->default_fields || share->default_expressions)
|
if (share->default_fields || share->default_expressions)
|
||||||
{
|
copy->default_field= default_fields;
|
||||||
copy->default_field= (Field**)
|
|
||||||
client_thd->alloc((share->default_fields +
|
|
||||||
share->default_expressions + 1)*
|
|
||||||
sizeof(Field*));
|
|
||||||
if (!copy->default_field)
|
|
||||||
goto error;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (share->virtual_fields)
|
if (share->virtual_fields)
|
||||||
{
|
copy->vfield= virtual_fields;
|
||||||
copy->vfield= (Field **) client_thd->alloc((share->virtual_fields+1)*
|
copy->key_info= keys;
|
||||||
sizeof(Field*));
|
copy->base_key_part= key_parts;
|
||||||
if (!copy->vfield)
|
|
||||||
goto error;
|
/* Copy key and key parts from original table */
|
||||||
}
|
memcpy(keys, table->key_info, sizeof(KEY) * share->keys);
|
||||||
|
memcpy(key_parts, table->base_key_part,
|
||||||
|
sizeof(KEY_PART_INFO) *share->ext_key_parts);
|
||||||
|
|
||||||
copy->expr_arena= NULL;
|
copy->expr_arena= NULL;
|
||||||
|
|
||||||
/* Ensure we don't use the table list of the original table */
|
/* Ensure we don't use the table list of the original table */
|
||||||
@ -2649,6 +2665,8 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
|
|||||||
(*field)->unireg_check= (*org_field)->unireg_check;
|
(*field)->unireg_check= (*org_field)->unireg_check;
|
||||||
(*field)->orig_table= copy; // Remove connection
|
(*field)->orig_table= copy; // Remove connection
|
||||||
(*field)->move_field_offset(adjust_ptrs); // Point at copy->record[0]
|
(*field)->move_field_offset(adjust_ptrs); // Point at copy->record[0]
|
||||||
|
(*field)->flags|= ((*org_field)->flags & LONG_UNIQUE_HASH_FIELD);
|
||||||
|
(*field)->invisible= (*org_field)->invisible;
|
||||||
memdup_vcol(client_thd, (*field)->vcol_info);
|
memdup_vcol(client_thd, (*field)->vcol_info);
|
||||||
memdup_vcol(client_thd, (*field)->default_value);
|
memdup_vcol(client_thd, (*field)->default_value);
|
||||||
memdup_vcol(client_thd, (*field)->check_constraint);
|
memdup_vcol(client_thd, (*field)->check_constraint);
|
||||||
@ -2657,6 +2675,35 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
|
|||||||
}
|
}
|
||||||
*field=0;
|
*field=0;
|
||||||
|
|
||||||
|
/* The following is needed for long hash key */
|
||||||
|
key_part= copy->base_key_part;
|
||||||
|
for (KEY *key= copy->key_info, *end_key= key + share->keys ;
|
||||||
|
key < end_key;
|
||||||
|
key++)
|
||||||
|
{
|
||||||
|
key->key_part= key_part;
|
||||||
|
key_part+= key->ext_key_parts;
|
||||||
|
if (key->algorithm == HA_KEY_ALG_LONG_HASH)
|
||||||
|
key_part++;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (key_part= copy->base_key_part,
|
||||||
|
end_part= key_part + share->ext_key_parts ;
|
||||||
|
key_part < end_part ;
|
||||||
|
key_part++)
|
||||||
|
{
|
||||||
|
Field *field= key_part->field= copy->field[key_part->fieldnr - 1];
|
||||||
|
|
||||||
|
/* Fix partial fields, like in open_table_from_share() */
|
||||||
|
if (field->key_length() != key_part->length &&
|
||||||
|
!(field->flags & BLOB_FLAG))
|
||||||
|
{
|
||||||
|
field= key_part->field= field->make_new_field(client_thd->mem_root,
|
||||||
|
copy, 0);
|
||||||
|
field->field_length= key_part->length;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (share->virtual_fields || share->default_expressions ||
|
if (share->virtual_fields || share->default_expressions ||
|
||||||
share->default_fields)
|
share->default_fields)
|
||||||
{
|
{
|
||||||
@ -3259,6 +3306,12 @@ pthread_handler_t handle_delayed_insert(void *arg)
|
|||||||
di->table->file->ha_release_auto_increment();
|
di->table->file->ha_release_auto_increment();
|
||||||
mysql_unlock_tables(thd, lock);
|
mysql_unlock_tables(thd, lock);
|
||||||
trans_commit_stmt(thd);
|
trans_commit_stmt(thd);
|
||||||
|
/*
|
||||||
|
We have to delete update handler as we need to create a new one
|
||||||
|
for the next lock table to ensure they have both the same read
|
||||||
|
view.
|
||||||
|
*/
|
||||||
|
di->table->file->delete_update_handler();
|
||||||
di->group_count=0;
|
di->group_count=0;
|
||||||
mysql_audit_release(thd);
|
mysql_audit_release(thd);
|
||||||
mysql_mutex_lock(&di->mutex);
|
mysql_mutex_lock(&di->mutex);
|
||||||
@ -3390,6 +3443,7 @@ bool Delayed_insert::handle_inserts(void)
|
|||||||
|
|
||||||
if (table->file->ha_rnd_init_with_error(0))
|
if (table->file->ha_rnd_init_with_error(0))
|
||||||
goto err;
|
goto err;
|
||||||
|
table->file->prepare_for_insert();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
We can't use row caching when using the binary log because if
|
We can't use row caching when using the binary log because if
|
||||||
@ -3876,9 +3930,12 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
|
|||||||
if (info.ignore || info.handle_duplicates != DUP_ERROR)
|
if (info.ignore || info.handle_duplicates != DUP_ERROR)
|
||||||
{
|
{
|
||||||
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
|
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
|
||||||
if (table->file->ha_table_flags() & HA_DUPLICATE_POS &&
|
if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
|
||||||
table->file->ha_rnd_init_with_error(0))
|
{
|
||||||
|
if (table->file->ha_rnd_init_with_error(0))
|
||||||
DBUG_RETURN(1);
|
DBUG_RETURN(1);
|
||||||
|
table->file->prepare_for_insert();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (info.handle_duplicates == DUP_REPLACE &&
|
if (info.handle_duplicates == DUP_REPLACE &&
|
||||||
(!table->triggers || !table->triggers->has_delete_triggers()))
|
(!table->triggers || !table->triggers->has_delete_triggers()))
|
||||||
@ -4628,9 +4685,12 @@ select_create::prepare(List<Item> &_values, SELECT_LEX_UNIT *u)
|
|||||||
if (info.ignore || info.handle_duplicates != DUP_ERROR)
|
if (info.ignore || info.handle_duplicates != DUP_ERROR)
|
||||||
{
|
{
|
||||||
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
|
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
|
||||||
if (table->file->ha_table_flags() & HA_DUPLICATE_POS &&
|
if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
|
||||||
table->file->ha_rnd_init_with_error(0))
|
{
|
||||||
|
if (table->file->ha_rnd_init_with_error(0))
|
||||||
DBUG_RETURN(1);
|
DBUG_RETURN(1);
|
||||||
|
table->file->prepare_for_insert();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (info.handle_duplicates == DUP_REPLACE &&
|
if (info.handle_duplicates == DUP_REPLACE &&
|
||||||
(!table->triggers || !table->triggers->has_delete_triggers()))
|
(!table->triggers || !table->triggers->has_delete_triggers()))
|
||||||
|
@ -391,6 +391,7 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list,
|
|||||||
DBUG_RETURN(TRUE);
|
DBUG_RETURN(TRUE);
|
||||||
if (thd->lex->handle_list_of_derived(table_list, DT_PREPARE))
|
if (thd->lex->handle_list_of_derived(table_list, DT_PREPARE))
|
||||||
DBUG_RETURN(TRUE);
|
DBUG_RETURN(TRUE);
|
||||||
|
|
||||||
if (setup_tables_and_check_access(thd,
|
if (setup_tables_and_check_access(thd,
|
||||||
&thd->lex->first_select_lex()->context,
|
&thd->lex->first_select_lex()->context,
|
||||||
&thd->lex->first_select_lex()->
|
&thd->lex->first_select_lex()->
|
||||||
@ -647,10 +648,12 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list,
|
|||||||
|
|
||||||
thd->abort_on_warning= !ignore && thd->is_strict_mode();
|
thd->abort_on_warning= !ignore && thd->is_strict_mode();
|
||||||
|
|
||||||
if ((table_list->table->file->ha_table_flags() & HA_DUPLICATE_POS) &&
|
if ((table_list->table->file->ha_table_flags() & HA_DUPLICATE_POS))
|
||||||
(error= table_list->table->file->ha_rnd_init_with_error(0)))
|
{
|
||||||
|
if ((error= table_list->table->file->ha_rnd_init_with_error(0)))
|
||||||
goto err;
|
goto err;
|
||||||
|
table->file->prepare_for_insert();
|
||||||
|
}
|
||||||
thd_progress_init(thd, 2);
|
thd_progress_init(thd, 2);
|
||||||
if (table_list->table->validate_default_values_of_unset_fields(thd))
|
if (table_list->table->validate_default_values_of_unset_fields(thd))
|
||||||
{
|
{
|
||||||
|
@ -18316,7 +18316,6 @@ TABLE *Create_tmp_table::start(THD *thd,
|
|||||||
table->copy_blobs= 1;
|
table->copy_blobs= 1;
|
||||||
table->in_use= thd;
|
table->in_use= thd;
|
||||||
table->no_rows_with_nulls= param->force_not_null_cols;
|
table->no_rows_with_nulls= param->force_not_null_cols;
|
||||||
table->update_handler= NULL;
|
|
||||||
table->check_unique_buf= NULL;
|
table->check_unique_buf= NULL;
|
||||||
|
|
||||||
table->s= share;
|
table->s= share;
|
||||||
|
@ -3820,6 +3820,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
|
|||||||
for (; (key=key_iterator++) ; key_number++)
|
for (; (key=key_iterator++) ; key_number++)
|
||||||
{
|
{
|
||||||
uint key_length=0;
|
uint key_length=0;
|
||||||
|
Create_field *auto_increment_key= 0;
|
||||||
Key_part_spec *column;
|
Key_part_spec *column;
|
||||||
|
|
||||||
is_hash_field_needed= false;
|
is_hash_field_needed= false;
|
||||||
@ -4069,6 +4070,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
|
|||||||
DBUG_ASSERT(key->type != Key::SPATIAL);
|
DBUG_ASSERT(key->type != Key::SPATIAL);
|
||||||
if (column_nr == 0 || (file->ha_table_flags() & HA_AUTO_PART_KEY))
|
if (column_nr == 0 || (file->ha_table_flags() & HA_AUTO_PART_KEY))
|
||||||
auto_increment--; // Field is used
|
auto_increment--; // Field is used
|
||||||
|
auto_increment_key= sql_field;
|
||||||
}
|
}
|
||||||
|
|
||||||
key_part_info->fieldnr= field;
|
key_part_info->fieldnr= field;
|
||||||
@ -4157,6 +4159,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We can not store key_part_length more then 2^16 - 1 in frm */
|
/* We can not store key_part_length more then 2^16 - 1 in frm */
|
||||||
if (is_hash_field_needed && column->length > UINT_MAX16)
|
if (is_hash_field_needed && column->length > UINT_MAX16)
|
||||||
{
|
{
|
||||||
@ -4223,12 +4226,23 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
|
|||||||
DBUG_RETURN(TRUE);
|
DBUG_RETURN(TRUE);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_hash_field_needed && key_info->algorithm != HA_KEY_ALG_UNDEF &&
|
/* Check long unique keys */
|
||||||
|
if (is_hash_field_needed)
|
||||||
|
{
|
||||||
|
if (auto_increment_key)
|
||||||
|
{
|
||||||
|
my_error(ER_NO_AUTOINCREMENT_WITH_UNIQUE, MYF(0),
|
||||||
|
sql_field->field_name.str,
|
||||||
|
key_info->name.str);
|
||||||
|
DBUG_RETURN(TRUE);
|
||||||
|
}
|
||||||
|
if (key_info->algorithm != HA_KEY_ALG_UNDEF &&
|
||||||
key_info->algorithm != HA_KEY_ALG_HASH )
|
key_info->algorithm != HA_KEY_ALG_HASH )
|
||||||
{
|
{
|
||||||
my_error(ER_TOO_LONG_KEY, MYF(0), max_key_length);
|
my_error(ER_TOO_LONG_KEY, MYF(0), max_key_length);
|
||||||
DBUG_RETURN(TRUE);
|
DBUG_RETURN(TRUE);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if (is_hash_field_needed ||
|
if (is_hash_field_needed ||
|
||||||
(key_info->algorithm == HA_KEY_ALG_HASH &&
|
(key_info->algorithm == HA_KEY_ALG_HASH &&
|
||||||
key->type != Key::PRIMARY &&
|
key->type != Key::PRIMARY &&
|
||||||
|
@ -968,6 +968,9 @@ update_begin:
|
|||||||
can_compare_record= records_are_comparable(table);
|
can_compare_record= records_are_comparable(table);
|
||||||
explain->tracker.on_scan_init();
|
explain->tracker.on_scan_init();
|
||||||
|
|
||||||
|
if (table->versioned(VERS_TIMESTAMP) || table_list->has_period())
|
||||||
|
table->file->prepare_for_insert(1);
|
||||||
|
|
||||||
THD_STAGE_INFO(thd, stage_updating);
|
THD_STAGE_INFO(thd, stage_updating);
|
||||||
while (!(error=info.read_record()) && !thd->killed)
|
while (!(error=info.read_record()) && !thd->killed)
|
||||||
{
|
{
|
||||||
@ -1848,9 +1851,8 @@ int mysql_multi_update_prepare(THD *thd)
|
|||||||
/* now lock and fill tables */
|
/* now lock and fill tables */
|
||||||
if (!thd->stmt_arena->is_stmt_prepare() &&
|
if (!thd->stmt_arena->is_stmt_prepare() &&
|
||||||
lock_tables(thd, table_list, table_count, 0))
|
lock_tables(thd, table_list, table_count, 0))
|
||||||
{
|
|
||||||
DBUG_RETURN(TRUE);
|
DBUG_RETURN(TRUE);
|
||||||
}
|
|
||||||
(void) read_statistics_for_tables_if_needed(thd, table_list);
|
(void) read_statistics_for_tables_if_needed(thd, table_list);
|
||||||
/* @todo: downgrade the metadata locks here. */
|
/* @todo: downgrade the metadata locks here. */
|
||||||
|
|
||||||
@ -2026,6 +2028,8 @@ int multi_update::prepare(List<Item> ¬_used_values,
|
|||||||
{
|
{
|
||||||
table->read_set= &table->def_read_set;
|
table->read_set= &table->def_read_set;
|
||||||
bitmap_union(table->read_set, &table->tmp_set);
|
bitmap_union(table->read_set, &table->tmp_set);
|
||||||
|
if (table->versioned(VERS_TIMESTAMP))
|
||||||
|
table->file->prepare_for_insert(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (unlikely(error))
|
if (unlikely(error))
|
||||||
|
70
sql/table.cc
70
sql/table.cc
@ -1243,31 +1243,46 @@ bool parse_vcol_defs(THD *thd, MEM_ROOT *mem_root, TABLE *table,
|
|||||||
Item *list_item;
|
Item *list_item;
|
||||||
KEY *key= 0;
|
KEY *key= 0;
|
||||||
uint key_index, parts= 0;
|
uint key_index, parts= 0;
|
||||||
|
KEY_PART_INFO *key_part= table->base_key_part;
|
||||||
|
|
||||||
for (key_index= 0; key_index < table->s->keys; key_index++)
|
for (key_index= 0; key_index < table->s->keys; key_index++)
|
||||||
{
|
{
|
||||||
key=table->key_info + key_index;
|
/*
|
||||||
|
We have to use key from share as this function may have changed
|
||||||
|
table->key_info if it was ever invoked before. This could happen
|
||||||
|
in case of INSERT DELAYED.
|
||||||
|
*/
|
||||||
|
key= table->s->key_info + key_index;
|
||||||
|
if (key->algorithm == HA_KEY_ALG_LONG_HASH)
|
||||||
|
{
|
||||||
parts= key->user_defined_key_parts;
|
parts= key->user_defined_key_parts;
|
||||||
if (key->key_part[parts].fieldnr == field->field_index + 1)
|
if (key_part[parts].fieldnr == field->field_index + 1)
|
||||||
break;
|
break;
|
||||||
|
key_part++;
|
||||||
}
|
}
|
||||||
if (!key || key->algorithm != HA_KEY_ALG_LONG_HASH)
|
key_part+= key->ext_key_parts;
|
||||||
|
}
|
||||||
|
if (key_index == table->s->keys)
|
||||||
goto end;
|
goto end;
|
||||||
KEY_PART_INFO *keypart;
|
|
||||||
for (uint i=0; i < parts; i++)
|
/* Correct the key & key_parts if this function has been called before */
|
||||||
|
key= table->key_info + key_index;
|
||||||
|
key->key_part= key_part;
|
||||||
|
|
||||||
|
for (uint i=0; i < parts; i++, key_part++)
|
||||||
{
|
{
|
||||||
keypart= key->key_part + i;
|
if (key_part->key_part_flag & HA_PART_KEY_SEG)
|
||||||
if (keypart->key_part_flag & HA_PART_KEY_SEG)
|
|
||||||
{
|
{
|
||||||
int length= keypart->length/keypart->field->charset()->mbmaxlen;
|
int length= key_part->length/key_part->field->charset()->mbmaxlen;
|
||||||
list_item= new (mem_root) Item_func_left(thd,
|
list_item= new (mem_root) Item_func_left(thd,
|
||||||
new (mem_root) Item_field(thd, keypart->field),
|
new (mem_root) Item_field(thd, key_part->field),
|
||||||
new (mem_root) Item_int(thd, length));
|
new (mem_root) Item_int(thd, length));
|
||||||
list_item->fix_fields(thd, NULL);
|
list_item->fix_fields(thd, NULL);
|
||||||
keypart->field->vcol_info=
|
key_part->field->vcol_info=
|
||||||
table->field[keypart->field->field_index]->vcol_info;
|
table->field[key_part->field->field_index]->vcol_info;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
list_item= new (mem_root) Item_field(thd, keypart->field);
|
list_item= new (mem_root) Item_field(thd, key_part->field);
|
||||||
field_list->push_back(list_item, mem_root);
|
field_list->push_back(list_item, mem_root);
|
||||||
}
|
}
|
||||||
Item_func_hash *hash_item= new(mem_root)Item_func_hash(thd, *field_list);
|
Item_func_hash *hash_item= new(mem_root)Item_func_hash(thd, *field_list);
|
||||||
@ -3871,6 +3886,7 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
|
|||||||
goto err;
|
goto err;
|
||||||
outparam->key_info= key_info;
|
outparam->key_info= key_info;
|
||||||
key_part= (reinterpret_cast<KEY_PART_INFO*>(key_info+share->keys));
|
key_part= (reinterpret_cast<KEY_PART_INFO*>(key_info+share->keys));
|
||||||
|
outparam->base_key_part= key_part;
|
||||||
|
|
||||||
memcpy(key_info, share->key_info, sizeof(*key_info)*share->keys);
|
memcpy(key_info, share->key_info, sizeof(*key_info)*share->keys);
|
||||||
memcpy(key_part, share->key_info[0].key_part, (sizeof(*key_part) *
|
memcpy(key_part, share->key_info[0].key_part, (sizeof(*key_part) *
|
||||||
@ -5244,7 +5260,6 @@ void TABLE::init(THD *thd, TABLE_LIST *tl)
|
|||||||
range_rowid_filter_cost_info_elems= 0;
|
range_rowid_filter_cost_info_elems= 0;
|
||||||
range_rowid_filter_cost_info_ptr= NULL;
|
range_rowid_filter_cost_info_ptr= NULL;
|
||||||
range_rowid_filter_cost_info= NULL;
|
range_rowid_filter_cost_info= NULL;
|
||||||
update_handler= NULL;
|
|
||||||
check_unique_buf= NULL;
|
check_unique_buf= NULL;
|
||||||
vers_write= s->versioned;
|
vers_write= s->versioned;
|
||||||
quick_condition_rows=0;
|
quick_condition_rows=0;
|
||||||
@ -9246,35 +9261,6 @@ void re_setup_keyinfo_hash(KEY *key_info)
|
|||||||
key_info->ext_key_parts= 1;
|
key_info->ext_key_parts= 1;
|
||||||
key_info->flags&= ~HA_NOSAME;
|
key_info->flags&= ~HA_NOSAME;
|
||||||
}
|
}
|
||||||
/**
|
|
||||||
@brief clone of current handler.
|
|
||||||
Creates a clone of handler used in update for
|
|
||||||
unique hash key.
|
|
||||||
*/
|
|
||||||
void TABLE::clone_handler_for_update()
|
|
||||||
{
|
|
||||||
if (this->update_handler)
|
|
||||||
return;
|
|
||||||
handler *update_handler= NULL;
|
|
||||||
if (!s->long_unique_table)
|
|
||||||
return;
|
|
||||||
update_handler= file->clone(s->normalized_path.str,
|
|
||||||
in_use->mem_root);
|
|
||||||
update_handler->ha_external_lock(in_use, F_RDLCK);
|
|
||||||
this->update_handler= update_handler;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
@brief Deletes update handler object
|
|
||||||
*/
|
|
||||||
void TABLE::delete_update_handler()
|
|
||||||
{
|
|
||||||
update_handler->ha_external_lock(in_use, F_UNLCK);
|
|
||||||
update_handler->ha_close();
|
|
||||||
delete update_handler;
|
|
||||||
this->update_handler= NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
LEX_CSTRING *fk_option_name(enum_fk_option opt)
|
LEX_CSTRING *fk_option_name(enum_fk_option opt)
|
||||||
{
|
{
|
||||||
|
@ -1153,7 +1153,6 @@ public:
|
|||||||
uchar *record[3]; /* Pointer to records */
|
uchar *record[3]; /* Pointer to records */
|
||||||
/* record buf to resolve hash collisions for long UNIQUE constraints */
|
/* record buf to resolve hash collisions for long UNIQUE constraints */
|
||||||
uchar *check_unique_buf;
|
uchar *check_unique_buf;
|
||||||
handler *update_handler; /* Handler used in case of update */
|
|
||||||
uchar *write_row_record; /* Used as optimisation in
|
uchar *write_row_record; /* Used as optimisation in
|
||||||
THD::write_row */
|
THD::write_row */
|
||||||
uchar *insert_values; /* used by INSERT ... UPDATE */
|
uchar *insert_values; /* used by INSERT ... UPDATE */
|
||||||
@ -1182,6 +1181,7 @@ public:
|
|||||||
/* Map of keys dependent on some constraint */
|
/* Map of keys dependent on some constraint */
|
||||||
key_map constraint_dependent_keys;
|
key_map constraint_dependent_keys;
|
||||||
KEY *key_info; /* data of keys in database */
|
KEY *key_info; /* data of keys in database */
|
||||||
|
KEY_PART_INFO *base_key_part; /* Where key parts are stored */
|
||||||
|
|
||||||
Field **field; /* Pointer to fields */
|
Field **field; /* Pointer to fields */
|
||||||
Field **vfield; /* Pointer to virtual fields*/
|
Field **vfield; /* Pointer to virtual fields*/
|
||||||
@ -1640,8 +1640,6 @@ public:
|
|||||||
void vers_update_fields();
|
void vers_update_fields();
|
||||||
void vers_update_end();
|
void vers_update_end();
|
||||||
void find_constraint_correlated_indexes();
|
void find_constraint_correlated_indexes();
|
||||||
void clone_handler_for_update();
|
|
||||||
void delete_update_handler();
|
|
||||||
|
|
||||||
/** Number of additional fields used in versioned tables */
|
/** Number of additional fields used in versioned tables */
|
||||||
#define VERSIONING_FIELDS 2
|
#define VERSIONING_FIELDS 2
|
||||||
|
@ -748,13 +748,9 @@ void THD::mark_tmp_tables_as_free_for_reuse()
|
|||||||
while ((table= tables_it++))
|
while ((table= tables_it++))
|
||||||
{
|
{
|
||||||
if ((table->query_id == query_id) && !table->open_by_handler)
|
if ((table->query_id == query_id) && !table->open_by_handler)
|
||||||
{
|
|
||||||
if (table->update_handler)
|
|
||||||
table->delete_update_handler();
|
|
||||||
mark_tmp_table_as_free_for_reuse(table);
|
mark_tmp_table_as_free_for_reuse(table);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (locked)
|
if (locked)
|
||||||
{
|
{
|
||||||
|
@ -2605,6 +2605,8 @@ int ha_maria::extra(enum ha_extra_function operation)
|
|||||||
if (operation == HA_EXTRA_MMAP && !opt_maria_use_mmap)
|
if (operation == HA_EXTRA_MMAP && !opt_maria_use_mmap)
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
|
if (operation == HA_EXTRA_WRITE_CACHE && has_long_unique())
|
||||||
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
We have to set file->trn here because in some cases we call
|
We have to set file->trn here because in some cases we call
|
||||||
|
@ -2120,7 +2120,8 @@ int ha_myisam::info(uint flag)
|
|||||||
|
|
||||||
int ha_myisam::extra(enum ha_extra_function operation)
|
int ha_myisam::extra(enum ha_extra_function operation)
|
||||||
{
|
{
|
||||||
if (operation == HA_EXTRA_MMAP && !opt_myisam_use_mmap)
|
if ((operation == HA_EXTRA_MMAP && !opt_myisam_use_mmap) ||
|
||||||
|
(operation == HA_EXTRA_WRITE_CACHE && has_long_unique()))
|
||||||
return 0;
|
return 0;
|
||||||
return mi_extra(file, operation, 0);
|
return mi_extra(file, operation, 0);
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user