Merge branch '10.2' into 10.3

This commit is contained in:
Sergei Golubchik 2019-05-17 17:23:01 +02:00
commit c1fd027115
59 changed files with 1322 additions and 820 deletions

View File

@ -306,7 +306,7 @@ select json_merge('string', 123);
json_merge('string', 123) json_merge('string', 123)
NULL NULL
Warnings: Warnings:
Warning 4038 Syntax error in JSON text in argument 1 to function 'json_merge' at position 1 Warning 4038 Syntax error in JSON text in argument 1 to function 'json_merge_preserve' at position 1
select json_merge('"string"', 123); select json_merge('"string"', 123);
json_merge('"string"', 123) json_merge('"string"', 123)
["string", 123] ["string", 123]
@ -326,7 +326,7 @@ select json_merge('a','b');
json_merge('a','b') json_merge('a','b')
NULL NULL
Warnings: Warnings:
Warning 4038 Syntax error in JSON text in argument 1 to function 'json_merge' at position 1 Warning 4038 Syntax error in JSON text in argument 1 to function 'json_merge_preserve' at position 1
select json_merge('{"a":"b"}','{"c":"d"}'); select json_merge('{"a":"b"}','{"c":"d"}');
json_merge('{"a":"b"}','{"c":"d"}') json_merge('{"a":"b"}','{"c":"d"}')
{"a": "b", "c": "d"} {"a": "b", "c": "d"}
@ -843,6 +843,77 @@ SELECT CHARSET(JSON_OBJECT());
CHARSET(JSON_OBJECT()) CHARSET(JSON_OBJECT())
latin1 latin1
# #
# MDEV-13992 Implement JSON_MERGE_PATCH
#
CREATE TABLE merge_t(
id INT PRIMARY KEY AUTO_INCREMENT,
target VARCHAR(100), patch VARCHAR(100)
);
INSERT INTO merge_t(target, patch) VALUES
('{"a":"b"}', '{"a":"c"}'),
('{"a":"b"}', '{"b":"c"}'),
('{"a":"b"}', '{"a":null}'),
('{"a":"b", "b":"c"}', '{"a":null}'),
('{"a":["b"]}', '{"a":"c"}'),
('{"a":"c"}', '{"a":["b"]}'),
('{"a": {"b":"c"}}', '{"a": {"b":"d", "c":null}}'),
('{"a":[{"b":"c"}]}', '{"a": [1]}'),
('["a","b"]', '["c","d"]'),
('{"a":"b"}', '["c"]'),
('{"a":"foo"}', 'null'),
('{"a":"foo"}', '"bar"'),
('{"e":null}', '{"a":1}'),
('[1,2]', '{"a":"b", "c":null}'),
('{}', '{"a":{"bb":{"ccc":null}}}'),
(NULL, '{}'),
('{}', NULL);
SELECT id, target, patch,
JSON_MERGE_PATCH(target, patch) AS merged,
JSON_EXTRACT(JSON_MERGE_PATCH(target, patch), '$.a') AS a
FROM merge_t ORDER BY id;
id target patch merged a
1 {"a":"b"} {"a":"c"} {"a": "c"} "c"
2 {"a":"b"} {"b":"c"} {"a": "b", "b": "c"} "b"
3 {"a":"b"} {"a":null} {} NULL
4 {"a":"b", "b":"c"} {"a":null} {"b": "c"} NULL
5 {"a":["b"]} {"a":"c"} {"a": "c"} "c"
6 {"a":"c"} {"a":["b"]} {"a": ["b"]} ["b"]
7 {"a": {"b":"c"}} {"a": {"b":"d", "c":null}} {"a": {"b": "d"}} {"b": "d"}
8 {"a":[{"b":"c"}]} {"a": [1]} {"a": [1]} [1]
9 ["a","b"] ["c","d"] ["c", "d"] NULL
10 {"a":"b"} ["c"] ["c"] NULL
11 {"a":"foo"} null null NULL
12 {"a":"foo"} "bar" "bar" NULL
13 {"e":null} {"a":1} {"e": null, "a": 1} 1
14 [1,2] {"a":"b", "c":null} {"a": "b"} "b"
15 {} {"a":{"bb":{"ccc":null}}} {"a": {"bb": {}}} {"bb": {}}
16 NULL {} NULL NULL
17 {} NULL NULL NULL
DROP TABLE merge_t;
SELECT JSON_MERGE_PATCH('{"a":"b"}', NULL, '{"c":"d"}');
JSON_MERGE_PATCH('{"a":"b"}', NULL, '{"c":"d"}')
NULL
SELECT JSON_MERGE_PATCH(NULL, '[1,2,3]');
JSON_MERGE_PATCH(NULL, '[1,2,3]')
[1, 2, 3]
SELECT JSON_MERGE_PATCH('{"a":"b"}', NULL, '[1,2,3]', '{"c":null,"d":"e"}');
JSON_MERGE_PATCH('{"a":"b"}', NULL, '[1,2,3]', '{"c":null,"d":"e"}')
{"d": "e"}
SELECT JSON_MERGE_PATCH();
ERROR 42000: Incorrect parameter count in the call to native function 'JSON_MERGE_PATCH'
SELECT JSON_MERGE_PATCH('{}');
ERROR 42000: Incorrect parameter count in the call to native function 'JSON_MERGE_PATCH'
SELECT JSON_MERGE_PATCH('{', '[1,2,3]');
JSON_MERGE_PATCH('{', '[1,2,3]')
NULL
Warnings:
Warning 4037 Unexpected end of JSON text in argument 1 to function 'json_merge_patch'
SELECT JSON_MERGE_PATCH('{"a":"b"}', '[1,');
JSON_MERGE_PATCH('{"a":"b"}', '[1,')
NULL
Warnings:
Warning 4037 Unexpected end of JSON text in argument 2 to function 'json_merge_patch'
#
# End of 10.2 tests # End of 10.2 tests
# #
# #

View File

@ -491,6 +491,49 @@ SELECT JSON_ARRAY(_UTF8 'str', JSON_OBJECT(_LATIN1 'plugin', _LATIN1'unix_socket
SELECT CHARSET(JSON_ARRAY()); SELECT CHARSET(JSON_ARRAY());
SELECT CHARSET(JSON_OBJECT()); SELECT CHARSET(JSON_OBJECT());
--echo #
--echo # MDEV-13992 Implement JSON_MERGE_PATCH
--echo #
CREATE TABLE merge_t(
id INT PRIMARY KEY AUTO_INCREMENT,
target VARCHAR(100), patch VARCHAR(100)
);
INSERT INTO merge_t(target, patch) VALUES
('{"a":"b"}', '{"a":"c"}'),
('{"a":"b"}', '{"b":"c"}'),
('{"a":"b"}', '{"a":null}'),
('{"a":"b", "b":"c"}', '{"a":null}'),
('{"a":["b"]}', '{"a":"c"}'),
('{"a":"c"}', '{"a":["b"]}'),
('{"a": {"b":"c"}}', '{"a": {"b":"d", "c":null}}'),
('{"a":[{"b":"c"}]}', '{"a": [1]}'),
('["a","b"]', '["c","d"]'),
('{"a":"b"}', '["c"]'),
('{"a":"foo"}', 'null'),
('{"a":"foo"}', '"bar"'),
('{"e":null}', '{"a":1}'),
('[1,2]', '{"a":"b", "c":null}'),
('{}', '{"a":{"bb":{"ccc":null}}}'),
(NULL, '{}'),
('{}', NULL);
SELECT id, target, patch,
JSON_MERGE_PATCH(target, patch) AS merged,
JSON_EXTRACT(JSON_MERGE_PATCH(target, patch), '$.a') AS a
FROM merge_t ORDER BY id;
DROP TABLE merge_t;
SELECT JSON_MERGE_PATCH('{"a":"b"}', NULL, '{"c":"d"}');
SELECT JSON_MERGE_PATCH(NULL, '[1,2,3]');
SELECT JSON_MERGE_PATCH('{"a":"b"}', NULL, '[1,2,3]', '{"c":null,"d":"e"}');
--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT
SELECT JSON_MERGE_PATCH();
--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT
SELECT JSON_MERGE_PATCH('{}');
SELECT JSON_MERGE_PATCH('{', '[1,2,3]');
SELECT JSON_MERGE_PATCH('{"a":"b"}', '[1,');
--echo # --echo #
--echo # End of 10.2 tests --echo # End of 10.2 tests
--echo # --echo #

View File

@ -821,13 +821,13 @@ select json_merge( '[1, 2]', '[3, 4' );
json_merge( '[1, 2]', '[3, 4' ) json_merge( '[1, 2]', '[3, 4' )
NULL NULL
Warnings: Warnings:
Warning 4037 Unexpected end of JSON text in argument 2 to function 'json_merge' Warning 4037 Unexpected end of JSON text in argument 2 to function 'json_merge_preserve'
error ER_INVALID_JSON_TEXT_IN_PARAM error ER_INVALID_JSON_TEXT_IN_PARAM
select json_merge( '[1, 2', '[3, 4]' ); select json_merge( '[1, 2', '[3, 4]' );
json_merge( '[1, 2', '[3, 4]' ) json_merge( '[1, 2', '[3, 4]' )
NULL NULL
Warnings: Warnings:
Warning 4037 Unexpected end of JSON text in argument 1 to function 'json_merge' Warning 4037 Unexpected end of JSON text in argument 1 to function 'json_merge_preserve'
select json_merge( '1', '2' ); select json_merge( '1', '2' );
json_merge( '1', '2' ) json_merge( '1', '2' )
[1, 2] [1, 2]

View File

@ -2,9 +2,9 @@ select plugin_status from information_schema.plugins where plugin_name='feedback
plugin_status plugin_status
ACTIVE ACTIVE
SELECT variable_value INTO @feedback_used FROM information_schema.feedback where variable_name = 'FEEDBACK used'; SELECT variable_value INTO @feedback_used FROM information_schema.feedback where variable_name = 'FEEDBACK used';
SELECT variable_value = @feedback_used + 1 FROM information_schema.feedback where variable_name = 'FEEDBACK used'; SELECT variable_value = @feedback_used + 1 as 'MUST BE 1' FROM information_schema.feedback where variable_name = 'FEEDBACK used';
variable_value = @feedback_used + 1 MUST BE 1
0 1
select * from information_schema.feedback where variable_name like 'feed%' select * from information_schema.feedback where variable_name like 'feed%'
and variable_name not like '%_uid' and variable_name not like 'FEEDBACK used' and variable_name not like '%_uid' and variable_name not like 'FEEDBACK used'
and variable_name not like '%debug%'; and variable_name not like '%debug%';

View File

@ -17,7 +17,7 @@ select plugin_status from information_schema.plugins where plugin_name='feedback
SELECT variable_value INTO @feedback_used FROM information_schema.feedback where variable_name = 'FEEDBACK used'; SELECT variable_value INTO @feedback_used FROM information_schema.feedback where variable_name = 'FEEDBACK used';
# Now $feedback_used == X+1, and 'FEEDBACK used' is also X+1. And variable_value is increased again when we run the next SELECT # Now $feedback_used == X+1, and 'FEEDBACK used' is also X+1. And variable_value is increased again when we run the next SELECT
SELECT variable_value = @feedback_used + 1 FROM information_schema.feedback where variable_name = 'FEEDBACK used'; SELECT variable_value = @feedback_used + 1 as 'MUST BE 1' FROM information_schema.feedback where variable_name = 'FEEDBACK used';
# Now when we are happy with 'FEEDBACK used', we can check everything else # Now when we are happy with 'FEEDBACK used', we can check everything else

View File

@ -1982,6 +1982,19 @@ protected:
}; };
class Create_func_json_merge_patch : public Create_native_func
{
public:
virtual Item *create_native(THD *thd, LEX_CSTRING *name, List<Item> *item_list);
static Create_func_json_merge_patch s_singleton;
protected:
Create_func_json_merge_patch() {}
virtual ~Create_func_json_merge_patch() {}
};
class Create_func_json_quote : public Create_func_arg1 class Create_func_json_quote : public Create_func_arg1
{ {
public: public:
@ -5514,6 +5527,30 @@ Create_func_json_merge::create_native(THD *thd, LEX_CSTRING *name,
} }
Create_func_json_merge_patch Create_func_json_merge_patch::s_singleton;
Item*
Create_func_json_merge_patch::create_native(THD *thd, LEX_CSTRING *name,
List<Item> *item_list)
{
Item *func;
int arg_count;
if (item_list == NULL ||
(arg_count= item_list->elements) < 2) // json, json
{
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
func= NULL;
}
else
{
func= new (thd->mem_root) Item_func_json_merge_patch(thd, *item_list);
}
return func;
}
Create_func_json_contains Create_func_json_contains::s_singleton; Create_func_json_contains Create_func_json_contains::s_singleton;
Item* Item*
@ -7108,6 +7145,8 @@ static Native_func_registry func_array[] =
{ { STRING_WITH_LEN("JSON_LENGTH") }, BUILDER(Create_func_json_length)}, { { STRING_WITH_LEN("JSON_LENGTH") }, BUILDER(Create_func_json_length)},
{ { STRING_WITH_LEN("JSON_LOOSE") }, BUILDER(Create_func_json_loose)}, { { STRING_WITH_LEN("JSON_LOOSE") }, BUILDER(Create_func_json_loose)},
{ { STRING_WITH_LEN("JSON_MERGE") }, BUILDER(Create_func_json_merge)}, { { STRING_WITH_LEN("JSON_MERGE") }, BUILDER(Create_func_json_merge)},
{ { STRING_WITH_LEN("JSON_MERGE_PATCH") }, BUILDER(Create_func_json_merge_patch)},
{ { STRING_WITH_LEN("JSON_MERGE_PRESERVE") }, BUILDER(Create_func_json_merge)},
{ { STRING_WITH_LEN("JSON_QUERY") }, BUILDER(Create_func_json_query)}, { { STRING_WITH_LEN("JSON_QUERY") }, BUILDER(Create_func_json_query)},
{ { STRING_WITH_LEN("JSON_QUOTE") }, BUILDER(Create_func_json_quote)}, { { STRING_WITH_LEN("JSON_QUOTE") }, BUILDER(Create_func_json_quote)},
{ { STRING_WITH_LEN("JSON_OBJECT") }, BUILDER(Create_func_json_object)}, { { STRING_WITH_LEN("JSON_OBJECT") }, BUILDER(Create_func_json_object)},

View File

@ -2152,6 +2152,331 @@ null_return:
} }
static int copy_value_patch(String *str, json_engine_t *je)
{
int first_key= 1;
if (je->value_type != JSON_VALUE_OBJECT)
{
const uchar *beg, *end;
beg= je->value_begin;
if (!json_value_scalar(je))
{
if (json_skip_level(je))
return 1;
end= je->s.c_str;
}
else
end= je->value_end;
if (append_simple(str, beg, end-beg))
return 1;
return 0;
}
/* JSON_VALUE_OBJECT */
if (str->append("{", 1))
return 1;
while (json_scan_next(je) == 0 && je->state != JST_OBJ_END)
{
const uchar *key_start;
/* Loop through the Json_1 keys and compare with the Json_2 keys. */
DBUG_ASSERT(je->state == JST_KEY);
key_start= je->s.c_str;
if (json_read_value(je))
return 1;
if (je->value_type == JSON_VALUE_NULL)
continue;
if (!first_key)
{
if (str->append(", ", 2))
return 3;
}
else
first_key= 0;
if (str->append("\"", 1) ||
append_simple(str, key_start, je->value_begin - key_start) ||
copy_value_patch(str, je))
return 1;
}
if (str->append("}", 1))
return 1;
return 0;
}
static int do_merge_patch(String *str, json_engine_t *je1, json_engine_t *je2,
bool *empty_result)
{
if (json_read_value(je1) || json_read_value(je2))
return 1;
if (je1->value_type == JSON_VALUE_OBJECT &&
je2->value_type == JSON_VALUE_OBJECT)
{
json_engine_t sav_je1= *je1;
json_engine_t sav_je2= *je2;
int first_key= 1;
json_string_t key_name;
size_t sav_len;
bool mrg_empty;
*empty_result= FALSE;
json_string_set_cs(&key_name, je1->s.cs);
if (str->append("{", 1))
return 3;
while (json_scan_next(je1) == 0 &&
je1->state != JST_OBJ_END)
{
const uchar *key_start, *key_end;
/* Loop through the Json_1 keys and compare with the Json_2 keys. */
DBUG_ASSERT(je1->state == JST_KEY);
key_start= je1->s.c_str;
do
{
key_end= je1->s.c_str;
} while (json_read_keyname_chr(je1) == 0);
if (je1->s.error)
return 1;
sav_len= str->length();
if (!first_key)
{
if (str->append(", ", 2))
return 3;
*je2= sav_je2;
}
if (str->append("\"", 1) ||
append_simple(str, key_start, key_end - key_start) ||
str->append("\":", 2))
return 3;
while (json_scan_next(je2) == 0 &&
je2->state != JST_OBJ_END)
{
int ires;
DBUG_ASSERT(je2->state == JST_KEY);
json_string_set_str(&key_name, key_start, key_end);
if (!json_key_matches(je2, &key_name))
{
if (je2->s.error || json_skip_key(je2))
return 2;
continue;
}
/* Json_2 has same key as Json_1. Merge them. */
if ((ires= do_merge_patch(str, je1, je2, &mrg_empty)))
return ires;
if (mrg_empty)
str->length(sav_len);
else
first_key= 0;
goto merged_j1;
}
if (je2->s.error)
return 2;
key_start= je1->s.c_str;
/* Just append the Json_1 key value. */
if (json_skip_key(je1))
return 1;
if (append_simple(str, key_start, je1->s.c_str - key_start))
return 3;
first_key= 0;
merged_j1:
continue;
}
*je2= sav_je2;
/*
Now loop through the Json_2 keys.
Skip if there is same key in Json_1
*/
while (json_scan_next(je2) == 0 &&
je2->state != JST_OBJ_END)
{
const uchar *key_start, *key_end;
DBUG_ASSERT(je2->state == JST_KEY);
key_start= je2->s.c_str;
do
{
key_end= je2->s.c_str;
} while (json_read_keyname_chr(je2) == 0);
if (je2->s.error)
return 1;
*je1= sav_je1;
while (json_scan_next(je1) == 0 &&
je1->state != JST_OBJ_END)
{
DBUG_ASSERT(je1->state == JST_KEY);
json_string_set_str(&key_name, key_start, key_end);
if (!json_key_matches(je1, &key_name))
{
if (je1->s.error || json_skip_key(je1))
return 2;
continue;
}
if (json_skip_key(je2) ||
json_skip_level(je1))
return 1;
goto continue_j2;
}
if (je1->s.error)
return 2;
sav_len= str->length();
if (!first_key && str->append(", ", 2))
return 3;
if (str->append("\"", 1) ||
append_simple(str, key_start, key_end - key_start) ||
str->append("\":", 2))
return 3;
if (json_read_value(je2))
return 1;
if (je2->value_type == JSON_VALUE_NULL)
str->length(sav_len);
else
{
if (copy_value_patch(str, je2))
return 1;
first_key= 0;
}
continue_j2:
continue;
}
if (str->append("}", 1))
return 3;
}
else
{
if (!json_value_scalar(je1) && json_skip_level(je1))
return 1;
*empty_result= je2->value_type == JSON_VALUE_NULL;
if (!(*empty_result) && copy_value_patch(str, je2))
return 1;
}
return 0;
}
String *Item_func_json_merge_patch::val_str(String *str)
{
DBUG_ASSERT(fixed == 1);
json_engine_t je1, je2;
String *js1= args[0]->val_json(&tmp_js1), *js2=NULL;
uint n_arg;
bool empty_result, merge_to_null;
merge_to_null= args[0]->null_value;
for (n_arg=1; n_arg < arg_count; n_arg++)
{
js2= args[n_arg]->val_json(&tmp_js2);
if (args[n_arg]->null_value)
{
merge_to_null= true;
goto cont_point;
}
json_scan_start(&je2, js2->charset(),(const uchar *) js2->ptr(),
(const uchar *) js2->ptr() + js2->length());
if (merge_to_null)
{
if (json_read_value(&je2))
goto error_return;
if (je2.value_type == JSON_VALUE_OBJECT)
{
merge_to_null= true;
goto cont_point;
}
merge_to_null= false;
str->set(js2->ptr(), js2->length(), js2->charset());
goto cont_point;
}
str->set_charset(js1->charset());
str->length(0);
json_scan_start(&je1, js1->charset(),(const uchar *) js1->ptr(),
(const uchar *) js1->ptr() + js1->length());
if (do_merge_patch(str, &je1, &je2, &empty_result))
goto error_return;
if (empty_result)
str->append("null");
cont_point:
{
/* Swap str and js1. */
if (str == &tmp_js1)
{
str= js1;
js1= &tmp_js1;
}
else
{
js1= str;
str= &tmp_js1;
}
}
}
if (merge_to_null)
goto null_return;
json_scan_start(&je1, js1->charset(),(const uchar *) js1->ptr(),
(const uchar *) js1->ptr() + js1->length());
str->length(0);
str->set_charset(js1->charset());
if (json_nice(&je1, str, Item_func_json_format::LOOSE))
goto error_return;
null_value= 0;
return str;
error_return:
if (je1.s.error)
report_json_error(js1, &je1, 0);
if (je2.s.error)
report_json_error(js2, &je2, n_arg);
null_return:
null_value= 1;
return NULL;
}
bool Item_func_json_length::fix_length_and_dec() bool Item_func_json_length::fix_length_and_dec()
{ {
if (arg_count > 1) if (arg_count > 1)

View File

@ -289,11 +289,21 @@ public:
Item_func_json_array(thd, list) {} Item_func_json_array(thd, list) {}
String *val_str(String *); String *val_str(String *);
bool is_json_type() { return true; } bool is_json_type() { return true; }
const char *func_name() const { return "json_merge"; } const char *func_name() const { return "json_merge_preserve"; }
Item *get_copy(THD *thd) Item *get_copy(THD *thd)
{ return get_item_copy<Item_func_json_merge>(thd, this); } { return get_item_copy<Item_func_json_merge>(thd, this); }
}; };
class Item_func_json_merge_patch: public Item_func_json_merge
{
public:
Item_func_json_merge_patch(THD *thd, List<Item> &list):
Item_func_json_merge(thd, list) {}
const char *func_name() const { return "json_merge_patch"; }
String *val_str(String *);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_func_json_merge_patch>(thd, this); }
};
class Item_func_json_length: public Item_long_func class Item_func_json_length: public Item_long_func
{ {

View File

@ -7831,7 +7831,6 @@ struct my_option my_long_options[]=
MYSQL_TO_BE_IMPLEMENTED_OPTION("optimizer-trace-offset"), // OPTIMIZER_TRACE MYSQL_TO_BE_IMPLEMENTED_OPTION("optimizer-trace-offset"), // OPTIMIZER_TRACE
MYSQL_TO_BE_IMPLEMENTED_OPTION("optimizer-trace-limit"), // OPTIMIZER_TRACE MYSQL_TO_BE_IMPLEMENTED_OPTION("optimizer-trace-limit"), // OPTIMIZER_TRACE
MYSQL_TO_BE_IMPLEMENTED_OPTION("optimizer-trace-max-mem-size"), // OPTIMIZER_TRACE MYSQL_TO_BE_IMPLEMENTED_OPTION("optimizer-trace-max-mem-size"), // OPTIMIZER_TRACE
MYSQL_TO_BE_IMPLEMENTED_OPTION("eq-range-index-dive-limit"),
MYSQL_COMPATIBILITY_OPTION("server-id-bits"), MYSQL_COMPATIBILITY_OPTION("server-id-bits"),
MYSQL_TO_BE_IMPLEMENTED_OPTION("slave-rows-search-algorithms"), // HAVE_REPLICATION MYSQL_TO_BE_IMPLEMENTED_OPTION("slave-rows-search-algorithms"), // HAVE_REPLICATION
MYSQL_TO_BE_IMPLEMENTED_OPTION("slave-allow-batching"), // HAVE_REPLICATION MYSQL_TO_BE_IMPLEMENTED_OPTION("slave-allow-batching"), // HAVE_REPLICATION

View File

@ -3502,8 +3502,7 @@ open_and_process_table(THD *thd, TABLE_LIST *tables, uint *counter, uint flags,
Check whether the information schema contains a table Check whether the information schema contains a table
whose name is tables->schema_table_name whose name is tables->schema_table_name
*/ */
ST_SCHEMA_TABLE *schema_table; ST_SCHEMA_TABLE *schema_table= tables->schema_table;
schema_table= find_schema_table(thd, &tables->schema_table_name);
if (!schema_table || if (!schema_table ||
(schema_table->hidden && (schema_table->hidden &&
((sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) == 0 || ((sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) == 0 ||
@ -3514,7 +3513,7 @@ open_and_process_table(THD *thd, TABLE_LIST *tables, uint *counter, uint flags,
lex->sql_command == SQLCOM_SHOW_KEYS))) lex->sql_command == SQLCOM_SHOW_KEYS)))
{ {
my_error(ER_UNKNOWN_TABLE, MYF(0), my_error(ER_UNKNOWN_TABLE, MYF(0),
tables->schema_table_name.str, INFORMATION_SCHEMA_NAME.str); tables->table_name.str, INFORMATION_SCHEMA_NAME.str);
DBUG_RETURN(1); DBUG_RETURN(1);
} }
} }

View File

@ -1848,7 +1848,7 @@ thd_innodb_tmpdir(
@return reference to transaction pointer */ @return reference to transaction pointer */
static trx_t* thd_to_trx(THD* thd) static trx_t* thd_to_trx(THD* thd)
{ {
return *reinterpret_cast<trx_t**>(thd_ha_data(thd, innodb_hton_ptr)); return reinterpret_cast<trx_t*>(thd_get_ha_data(thd, innodb_hton_ptr));
} }
#ifdef WITH_WSREP #ifdef WITH_WSREP

View File

@ -128,10 +128,15 @@ CHECK_CXX_SOURCE_COMPILES("
#endif #endif
int main() { int main() {
static __thread int tls; static __thread int tls;
tls=0;
return tls;
} }
" HAVE_THREAD_LOCAL) " HAVE_THREAD_LOCAL)
if(HAVE_THREAD_LOCAL) if(HAVE_THREAD_LOCAL)
ADD_DEFINITIONS(-DROCKSDB_SUPPORT_THREAD_LOCAL) ADD_DEFINITIONS(-DROCKSDB_SUPPORT_THREAD_LOCAL)
else()
MESSAGE(SEND_ERROR "The compiler failed the check for ROCKSDB_SUPPORT_THREAD_LOCAL. "
"MyRocks requires that feature.")
endif() endif()
INCLUDE(build_rocksdb.cmake) INCLUDE(build_rocksdb.cmake)

View File

@ -3290,9 +3290,9 @@ void Rdb_snapshot_notifier::SnapshotCreated(
std::multiset<Rdb_transaction *> Rdb_transaction::s_tx_list; std::multiset<Rdb_transaction *> Rdb_transaction::s_tx_list;
mysql_mutex_t Rdb_transaction::s_tx_list_mutex; mysql_mutex_t Rdb_transaction::s_tx_list_mutex;
static Rdb_transaction *&get_tx_from_thd(THD *const thd) { static Rdb_transaction *get_tx_from_thd(THD *const thd) {
return *reinterpret_cast<Rdb_transaction **>( return reinterpret_cast<Rdb_transaction *>(
my_core::thd_ha_data(thd, rocksdb_hton)); my_core::thd_get_ha_data(thd, rocksdb_hton));
} }
namespace { namespace {
@ -3339,7 +3339,7 @@ class Rdb_perf_context_guard {
*/ */
static Rdb_transaction *get_or_create_tx(THD *const thd) { static Rdb_transaction *get_or_create_tx(THD *const thd) {
Rdb_transaction *&tx = get_tx_from_thd(thd); Rdb_transaction *tx = get_tx_from_thd(thd);
// TODO: this is called too many times.. O(#rows) // TODO: this is called too many times.. O(#rows)
if (tx == nullptr) { if (tx == nullptr) {
bool rpl_skip_tx_api= false; // MARIAROCKS_NOT_YET. bool rpl_skip_tx_api= false; // MARIAROCKS_NOT_YET.
@ -3354,6 +3354,7 @@ static Rdb_transaction *get_or_create_tx(THD *const thd) {
} }
tx->set_params(THDVAR(thd, lock_wait_timeout), THDVAR(thd, max_row_locks)); tx->set_params(THDVAR(thd, lock_wait_timeout), THDVAR(thd, max_row_locks));
tx->start_tx(); tx->start_tx();
my_core::thd_set_ha_data(thd, rocksdb_hton, tx);
} else { } else {
tx->set_params(THDVAR(thd, lock_wait_timeout), THDVAR(thd, max_row_locks)); tx->set_params(THDVAR(thd, lock_wait_timeout), THDVAR(thd, max_row_locks));
if (!tx->is_tx_started()) { if (!tx->is_tx_started()) {
@ -3365,7 +3366,7 @@ static Rdb_transaction *get_or_create_tx(THD *const thd) {
} }
static int rocksdb_close_connection(handlerton *const hton, THD *const thd) { static int rocksdb_close_connection(handlerton *const hton, THD *const thd) {
Rdb_transaction *&tx = get_tx_from_thd(thd); Rdb_transaction *tx = get_tx_from_thd(thd);
if (tx != nullptr) { if (tx != nullptr) {
int rc = tx->finish_bulk_load(false); int rc = tx->finish_bulk_load(false);
if (rc != 0) { if (rc != 0) {
@ -3376,7 +3377,6 @@ static int rocksdb_close_connection(handlerton *const hton, THD *const thd) {
} }
delete tx; delete tx;
tx = nullptr;
} }
return HA_EXIT_SUCCESS; return HA_EXIT_SUCCESS;
} }
@ -3444,7 +3444,7 @@ static int rocksdb_prepare(handlerton* hton, THD* thd, bool prepare_tx)
{ {
bool async=false; // This is "ASYNC_COMMIT" feature which is only present in webscalesql bool async=false; // This is "ASYNC_COMMIT" feature which is only present in webscalesql
Rdb_transaction *&tx = get_tx_from_thd(thd); Rdb_transaction *tx = get_tx_from_thd(thd);
if (!tx->can_prepare()) { if (!tx->can_prepare()) {
return HA_EXIT_FAILURE; return HA_EXIT_FAILURE;
} }
@ -3695,7 +3695,7 @@ static void rocksdb_commit_ordered(handlerton *hton, THD* thd, bool all)
// Same assert as InnoDB has // Same assert as InnoDB has
DBUG_ASSERT(all || (!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | DBUG_ASSERT(all || (!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT |
OPTION_BEGIN))); OPTION_BEGIN)));
Rdb_transaction *&tx = get_tx_from_thd(thd); Rdb_transaction *tx = get_tx_from_thd(thd);
if (!tx->is_two_phase()) { if (!tx->is_two_phase()) {
/* /*
ordered_commit is supposedly slower as it is done sequentially ordered_commit is supposedly slower as it is done sequentially
@ -3727,7 +3727,7 @@ static int rocksdb_commit(handlerton* hton, THD* thd, bool commit_tx)
rocksdb::StopWatchNano timer(rocksdb::Env::Default(), true); rocksdb::StopWatchNano timer(rocksdb::Env::Default(), true);
/* note: h->external_lock(F_UNLCK) is called after this function is called) */ /* note: h->external_lock(F_UNLCK) is called after this function is called) */
Rdb_transaction *&tx = get_tx_from_thd(thd); Rdb_transaction *tx = get_tx_from_thd(thd);
/* this will trigger saving of perf_context information */ /* this will trigger saving of perf_context information */
Rdb_perf_context_guard guard(tx, rocksdb_perf_context_level(thd)); Rdb_perf_context_guard guard(tx, rocksdb_perf_context_level(thd));
@ -3800,7 +3800,7 @@ static int rocksdb_commit(handlerton* hton, THD* thd, bool commit_tx)
static int rocksdb_rollback(handlerton *const hton, THD *const thd, static int rocksdb_rollback(handlerton *const hton, THD *const thd,
bool rollback_tx) { bool rollback_tx) {
Rdb_transaction *&tx = get_tx_from_thd(thd); Rdb_transaction *tx = get_tx_from_thd(thd);
Rdb_perf_context_guard guard(tx, rocksdb_perf_context_level(thd)); Rdb_perf_context_guard guard(tx, rocksdb_perf_context_level(thd));
if (tx != nullptr) { if (tx != nullptr) {
@ -4607,7 +4607,7 @@ static int rocksdb_savepoint(handlerton *const hton, THD *const thd,
static int rocksdb_rollback_to_savepoint(handlerton *const hton, THD *const thd, static int rocksdb_rollback_to_savepoint(handlerton *const hton, THD *const thd,
void *const savepoint) { void *const savepoint) {
Rdb_transaction *&tx = get_tx_from_thd(thd); Rdb_transaction *tx = get_tx_from_thd(thd);
return tx->rollback_to_savepoint(savepoint); return tx->rollback_to_savepoint(savepoint);
} }
@ -5346,49 +5346,6 @@ static int rocksdb_done_func(void *const p) {
error = 1; error = 1;
} }
/*
MariaDB: When the plugin is unloaded with UNINSTALL SONAME command, some
connections may still have Rdb_transaction objects.
These objects are not genuine transactions (as SQL layer makes sure that
a plugin that is being unloaded has no open tables), they are empty
Rdb_transaction objects that were left there to save on object
creation/deletion.
Go through the list and delete them.
*/
{
class Rdb_trx_deleter: public Rdb_tx_list_walker {
public:
std::set<Rdb_transaction*> rdb_trxs;
void process_tran(const Rdb_transaction *const tx) override {
/*
Check if the transaction is really empty. We only check
non-WriteBatch-based transactions, because there is no easy way to
check WriteBatch-based transactions.
*/
if (!tx->is_writebatch_trx()) {
const auto tx_impl = static_cast<const Rdb_transaction_impl *>(tx);
DBUG_ASSERT(tx_impl);
if (tx_impl->get_rdb_trx())
DBUG_ASSERT(0);
}
rdb_trxs.insert((Rdb_transaction*)tx);
};
} deleter;
Rdb_transaction::walk_tx_list(&deleter);
for (std::set<Rdb_transaction*>::iterator it= deleter.rdb_trxs.begin();
it != deleter.rdb_trxs.end();
++it)
{
// When a transaction is deleted, it removes itself from s_tx_list.
delete *it;
}
}
/* /*
destructors for static objects can be called at _exit(), destructors for static objects can be called at _exit(),
but we want to free the memory at dlclose() but we want to free the memory at dlclose()
@ -13833,7 +13790,7 @@ int rocksdb_check_bulk_load(
return 1; return 1;
} }
Rdb_transaction *&tx = get_tx_from_thd(thd); Rdb_transaction *tx = get_tx_from_thd(thd);
if (tx != nullptr) { if (tx != nullptr) {
const int rc = tx->finish_bulk_load(); const int rc = tx->finish_bulk_load();
if (rc != 0) { if (rc != 0) {

View File

@ -1,674 +1 @@
tests moved to rocksdb.locking_issues_case*
-----------------------------------------------------------------------
- Locking issues case 1.1:
- Locking rows that do not exist when using all primary key columns in
- a WHERE clause
- using REPEATABLE READ transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
id1 id2 value
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
INSERT INTO t0 VALUES (1,5,0);
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;
-----------------------------------------------------------------------
- Locking issues case 1.1:
- Locking rows that do not exist when using all primary key columns in
- a WHERE clause
- using READ COMMITTED transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
id1 id2 value
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
INSERT INTO t0 VALUES (1,5,0);
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;
-----------------------------------------------------------------------
- Locking issues case 1.2:
- Locking rows that do not exist without using all primary key
- columns in a WHERE clause
- using REPEATABLE READ transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
SELECT * FROM t0 WHERE id1=1 FOR UPDATE;
id1 id2 value
1 1 0
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
SELECT * FROM t0 WHERE id1=1 AND id2=4 FOR UPDATE;
id1 id2 value
INSERT INTO t0 VALUES (1,5,0);
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;
-----------------------------------------------------------------------
- Locking issues case 1.2:
- Locking rows that do not exist without using all primary key
- columns in a WHERE clause
- using READ COMMITTED transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
SELECT * FROM t0 WHERE id1=1 FOR UPDATE;
id1 id2 value
1 1 0
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
SELECT * FROM t0 WHERE id1=1 AND id2=4 FOR UPDATE;
id1 id2 value
INSERT INTO t0 VALUES (1,5,0);
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;
-----------------------------------------------------------------------
- Locking issues case 2:
- Rows that are scanned but do not match the WHERE are not locked
- using REPEATABLE READ transaction isolation level unless
- rocksdb_lock_scanned_rows is on
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
SELECT @@global.rocksdb_lock_scanned_rows;
@@global.rocksdb_lock_scanned_rows
0
CREATE TABLE t0(id INT PRIMARY KEY, value INT);
INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
connection con1;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
id value
2 1
5 1
connection con2;
UPDATE t0 SET VALUE=10 WHERE id=1;
UPDATE t0 SET VALUE=10 WHERE id=5;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
connection con1;
UPDATE t0 SET value=100 WHERE id in (4,5) and value>0;
connection con2;
SELECT * FROM t0 WHERE id=4 FOR UPDATE;
id value
4 0
COMMIT;
SELECT * FROM t0;
id value
1 10
2 1
3 0
4 0
5 1
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;
-----------------------------------------------------------------------
- Locking issues case 2:
- Rows that are scanned but do not match the WHERE are not locked
- using READ COMMITTED transaction isolation level unless
- rocksdb_lock_scanned_rows is on
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
SELECT @@global.rocksdb_lock_scanned_rows;
@@global.rocksdb_lock_scanned_rows
0
CREATE TABLE t0(id INT PRIMARY KEY, value INT);
INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
connection con1;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
id value
2 1
5 1
connection con2;
UPDATE t0 SET VALUE=10 WHERE id=1;
UPDATE t0 SET VALUE=10 WHERE id=5;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
connection con1;
UPDATE t0 SET value=100 WHERE id in (4,5) and value>0;
connection con2;
SELECT * FROM t0 WHERE id=4 FOR UPDATE;
id value
4 0
COMMIT;
SELECT * FROM t0;
id value
1 10
2 1
3 0
4 0
5 1
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;
-----------------------------------------------------------------------
- Locking issues case 2:
- Rows that are scanned but do not match the WHERE are not locked
- using REPEATABLE READ transaction isolation level unless
- rocksdb_lock_scanned_rows is on
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
SELECT @@global.rocksdb_lock_scanned_rows;
@@global.rocksdb_lock_scanned_rows
0
SET GLOBAL rocksdb_lock_scanned_rows=ON;
CREATE TABLE t0(id INT PRIMARY KEY, value INT);
INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
connection con1;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
id value
2 1
5 1
connection con2;
UPDATE t0 SET VALUE=10 WHERE id=1;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;
SET GLOBAL rocksdb_lock_scanned_rows=0;
-----------------------------------------------------------------------
- Locking issues case 2:
- Rows that are scanned but do not match the WHERE are not locked
- using READ COMMITTED transaction isolation level unless
- rocksdb_lock_scanned_rows is on
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
SELECT @@global.rocksdb_lock_scanned_rows;
@@global.rocksdb_lock_scanned_rows
0
SET GLOBAL rocksdb_lock_scanned_rows=ON;
CREATE TABLE t0(id INT PRIMARY KEY, value INT);
INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
connection con1;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
id value
2 1
5 1
connection con2;
UPDATE t0 SET VALUE=10 WHERE id=1;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;
SET GLOBAL rocksdb_lock_scanned_rows=0;
-----------------------------------------------------------------------
- Locking issues case 3:
- After creating a snapshot, other clients updating rows
- using REPEATABLE READ transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
Inserting 200,000 rows
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000;
connection con1;
ERROR: 1213
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;
-----------------------------------------------------------------------
- Locking issues case 3:
- After creating a snapshot, other clients updating rows
- using READ COMMITTED transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
Inserting 200,000 rows
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000;
connection con1;
id value
190000 1
ERROR: 0
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;
-----------------------------------------------------------------------
- Locking issues case 4:
- Phantom rows
- using REPEATABLE READ transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
Inserting 200,000 rows
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
INSERT INTO t0 VALUES(200001,1), (-1,1);
connection con1;
id value
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;
-----------------------------------------------------------------------
- Locking issues case 4:
- Phantom rows
- using READ COMMITTED transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
Inserting 200,000 rows
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
INSERT INTO t0 VALUES(200001,1), (-1,1);
connection con1;
id value
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;
-----------------------------------------------------------------------
- Locking issues case 5:
- Deleting primary key
- using REPEATABLE READ transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
Inserting 200,000 rows
UPDATE t0 SET value=100 WHERE id=190000;
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
DELETE FROM t0 WHERE id=190000;
COMMIT;
connection con1;
ERROR: 1213
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;
-----------------------------------------------------------------------
- Locking issues case 5:
- Deleting primary key
- using READ COMMITTED transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
Inserting 200,000 rows
UPDATE t0 SET value=100 WHERE id=190000;
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
DELETE FROM t0 WHERE id=190000;
COMMIT;
connection con1;
id value
ERROR: 0
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;
-----------------------------------------------------------------------
- Locking issues case 6:
- Changing primary key
- using REPEATABLE READ transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
Inserting 200,000 rows
UPDATE t0 SET value=100 WHERE id=190000;
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
UPDATE t0 SET id=200001 WHERE id=190000;
COMMIT;
connection con1;
ERROR: 1213
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;
-----------------------------------------------------------------------
- Locking issues case 6:
- Changing primary key
- using READ COMMITTED transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
Inserting 200,000 rows
UPDATE t0 SET value=100 WHERE id=190000;
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
UPDATE t0 SET id=200001 WHERE id=190000;
COMMIT;
connection con1;
id value
ERROR: 0
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;
-----------------------------------------------------------------------
- Locking issues case 7:
- Rows that are scanned as part of a query but not in the table being
- updated should not be locked unless rocksdb_lock_scanned_rows is on
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t1, t2;
SELECT @@global.rocksdb_lock_scanned_rows;
@@global.rocksdb_lock_scanned_rows
0
CREATE TABLE t1(id INT PRIMARY KEY, value INT);
CREATE TABLE t2(id INT PRIMARY KEY, value INT);
INSERT INTO t1 VALUES (1,1), (2,2), (3,3);
INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
lock_scanned_rows is 0
connection con1;
UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
connection con2;
UPDATE t2 SET value=value+100;
SELECT * FROM t2;
id value
1 101
2 102
3 103
4 104
5 105
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t1;
DROP TABLE t2;
-----------------------------------------------------------------------
- Locking issues case 7:
- Rows that are scanned as part of a query but not in the table being
- updated should not be locked unless rocksdb_lock_scanned_rows is on
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t1, t2;
SELECT @@global.rocksdb_lock_scanned_rows;
@@global.rocksdb_lock_scanned_rows
0
CREATE TABLE t1(id INT PRIMARY KEY, value INT);
CREATE TABLE t2(id INT PRIMARY KEY, value INT);
INSERT INTO t1 VALUES (1,1), (2,2), (3,3);
INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
lock_scanned_rows is 0
connection con1;
UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
connection con2;
UPDATE t2 SET value=value+100;
SELECT * FROM t2;
id value
1 101
2 102
3 103
4 104
5 105
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t1;
DROP TABLE t2;
-----------------------------------------------------------------------
- Locking issues case 7:
- Rows that are scanned as part of a query but not in the table being
- updated should not be locked unless rocksdb_lock_scanned_rows is on
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t1, t2;
SELECT @@global.rocksdb_lock_scanned_rows;
@@global.rocksdb_lock_scanned_rows
0
SET GLOBAL rocksdb_lock_scanned_rows=ON;
CREATE TABLE t1(id INT PRIMARY KEY, value INT);
CREATE TABLE t2(id INT PRIMARY KEY, value INT);
INSERT INTO t1 VALUES (1,1), (2,2), (3,3);
INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
lock_scanned_rows is 1
connection con1;
UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
connection con2;
UPDATE t2 SET value=value+100 WHERE id=3;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
UPDATE t2 SET value=value+100 WHERE id IN (1,2,4,5);
SELECT * FROM t2;
id value
1 101
2 102
3 3
4 104
5 105
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t1;
DROP TABLE t2;
SET GLOBAL rocksdb_lock_scanned_rows=0;
-----------------------------------------------------------------------
- Locking issues case 7:
- Rows that are scanned as part of a query but not in the table being
- updated should not be locked unless rocksdb_lock_scanned_rows is on
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t1, t2;
SELECT @@global.rocksdb_lock_scanned_rows;
@@global.rocksdb_lock_scanned_rows
0
SET GLOBAL rocksdb_lock_scanned_rows=ON;
CREATE TABLE t1(id INT PRIMARY KEY, value INT);
CREATE TABLE t2(id INT PRIMARY KEY, value INT);
INSERT INTO t1 VALUES (1,1), (2,2), (3,3);
INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
lock_scanned_rows is 1
connection con1;
UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
connection con2;
UPDATE t2 SET value=value+100 WHERE id=3;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
UPDATE t2 SET value=value+100 WHERE id IN (1,2,4,5);
SELECT * FROM t2;
id value
1 101
2 102
3 3
4 104
5 105
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t1;
DROP TABLE t2;
SET GLOBAL rocksdb_lock_scanned_rows=0;

View File

@ -0,0 +1,30 @@
-----------------------------------------------------------------------
- Locking issues case 1.1:
- Locking rows that do not exist when using all primary key columns in
- a WHERE clause
- using READ COMMITTED transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
id1 id2 value
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
INSERT INTO t0 VALUES (1,5,0);
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;

View File

@ -0,0 +1,30 @@
-----------------------------------------------------------------------
- Locking issues case 1.1:
- Locking rows that do not exist when using all primary key columns in
- a WHERE clause
- using REPEATABLE READ transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
id1 id2 value
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
INSERT INTO t0 VALUES (1,5,0);
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;

View File

@ -0,0 +1,30 @@
-----------------------------------------------------------------------
- Locking issues case 1.2:
- Locking rows that do not exist without using all primary key
- columns in a WHERE clause
- using READ COMMITTED transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
SELECT * FROM t0 WHERE id1=1 FOR UPDATE;
id1 id2 value
1 1 0
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
SELECT * FROM t0 WHERE id1=1 AND id2=4 FOR UPDATE;
id1 id2 value
INSERT INTO t0 VALUES (1,5,0);
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;

View File

@ -0,0 +1,30 @@
-----------------------------------------------------------------------
- Locking issues case 1.2:
- Locking rows that do not exist without using all primary key
- columns in a WHERE clause
- using REPEATABLE READ transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
SELECT * FROM t0 WHERE id1=1 FOR UPDATE;
id1 id2 value
1 1 0
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
SELECT * FROM t0 WHERE id1=1 AND id2=4 FOR UPDATE;
id1 id2 value
INSERT INTO t0 VALUES (1,5,0);
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;

View File

@ -0,0 +1,50 @@
-----------------------------------------------------------------------
- Locking issues case 2:
- Rows that are scanned but do not match the WHERE are not locked
- using READ COMMITTED transaction isolation level unless
- rocksdb_lock_scanned_rows is on
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
SELECT @@global.rocksdb_lock_scanned_rows;
@@global.rocksdb_lock_scanned_rows
0
CREATE TABLE t0(id INT PRIMARY KEY, value INT);
INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
connection con1;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
id value
2 1
5 1
connection con2;
UPDATE t0 SET VALUE=10 WHERE id=1;
UPDATE t0 SET VALUE=10 WHERE id=5;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
connection con1;
UPDATE t0 SET value=100 WHERE id in (4,5) and value>0;
connection con2;
SELECT * FROM t0 WHERE id=4 FOR UPDATE;
id value
4 0
COMMIT;
SELECT * FROM t0;
id value
1 10
2 1
3 0
4 0
5 1
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;

View File

@ -0,0 +1,37 @@
-----------------------------------------------------------------------
- Locking issues case 2:
- Rows that are scanned but do not match the WHERE are not locked
- using READ COMMITTED transaction isolation level unless
- rocksdb_lock_scanned_rows is on
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
SELECT @@global.rocksdb_lock_scanned_rows;
@@global.rocksdb_lock_scanned_rows
0
SET GLOBAL rocksdb_lock_scanned_rows=ON;
CREATE TABLE t0(id INT PRIMARY KEY, value INT);
INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
connection con1;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
id value
2 1
5 1
connection con2;
UPDATE t0 SET VALUE=10 WHERE id=1;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;
SET GLOBAL rocksdb_lock_scanned_rows=0;

View File

@ -0,0 +1,50 @@
-----------------------------------------------------------------------
- Locking issues case 2:
- Rows that are scanned but do not match the WHERE are not locked
- using REPEATABLE READ transaction isolation level unless
- rocksdb_lock_scanned_rows is on
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
SELECT @@global.rocksdb_lock_scanned_rows;
@@global.rocksdb_lock_scanned_rows
0
CREATE TABLE t0(id INT PRIMARY KEY, value INT);
INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
connection con1;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
id value
2 1
5 1
connection con2;
UPDATE t0 SET VALUE=10 WHERE id=1;
UPDATE t0 SET VALUE=10 WHERE id=5;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
connection con1;
UPDATE t0 SET value=100 WHERE id in (4,5) and value>0;
connection con2;
SELECT * FROM t0 WHERE id=4 FOR UPDATE;
id value
4 0
COMMIT;
SELECT * FROM t0;
id value
1 10
2 1
3 0
4 0
5 1
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;

View File

@ -0,0 +1,37 @@
-----------------------------------------------------------------------
- Locking issues case 2:
- Rows that are scanned but do not match the WHERE are not locked
- using REPEATABLE READ transaction isolation level unless
- rocksdb_lock_scanned_rows is on
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
SELECT @@global.rocksdb_lock_scanned_rows;
@@global.rocksdb_lock_scanned_rows
0
SET GLOBAL rocksdb_lock_scanned_rows=ON;
CREATE TABLE t0(id INT PRIMARY KEY, value INT);
INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
connection con1;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
id value
2 1
5 1
connection con2;
UPDATE t0 SET VALUE=10 WHERE id=1;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;
SET GLOBAL rocksdb_lock_scanned_rows=0;

View File

@ -0,0 +1,25 @@
-----------------------------------------------------------------------
- Locking issues case 3:
- After creating a snapshot, other clients updating rows
- using READ COMMITTED transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
Inserting 200,000 rows
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000;
connection con1;
id value
190000 1
ERROR: 0
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;

View File

@ -0,0 +1,23 @@
-----------------------------------------------------------------------
- Locking issues case 3:
- After creating a snapshot, other clients updating rows
- using REPEATABLE READ transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
Inserting 200,000 rows
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000;
connection con1;
ERROR: 1213
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;

View File

@ -0,0 +1,23 @@
-----------------------------------------------------------------------
- Locking issues case 4:
- Phantom rows
- using READ COMMITTED transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
Inserting 200,000 rows
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
INSERT INTO t0 VALUES(200001,1), (-1,1);
connection con1;
id value
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;

View File

@ -0,0 +1,23 @@
-----------------------------------------------------------------------
- Locking issues case 4:
- Phantom rows
- using REPEATABLE READ transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
Inserting 200,000 rows
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
INSERT INTO t0 VALUES(200001,1), (-1,1);
connection con1;
id value
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;

View File

@ -0,0 +1,29 @@
-----------------------------------------------------------------------
- Locking issues case 5:
- Deleting primary key
- using READ COMMITTED transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
Inserting 200,000 rows
UPDATE t0 SET value=100 WHERE id=190000;
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
DELETE FROM t0 WHERE id=190000;
COMMIT;
connection con1;
id value
ERROR: 0
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;

View File

@ -0,0 +1,28 @@
-----------------------------------------------------------------------
- Locking issues case 5:
- Deleting primary key
- using REPEATABLE READ transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
Inserting 200,000 rows
UPDATE t0 SET value=100 WHERE id=190000;
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
DELETE FROM t0 WHERE id=190000;
COMMIT;
connection con1;
ERROR: 1213
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;

View File

@ -0,0 +1,29 @@
-----------------------------------------------------------------------
- Locking issues case 6:
- Changing primary key
- using READ COMMITTED transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
Inserting 200,000 rows
UPDATE t0 SET value=100 WHERE id=190000;
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
UPDATE t0 SET id=200001 WHERE id=190000;
COMMIT;
connection con1;
id value
ERROR: 0
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;

View File

@ -0,0 +1,28 @@
-----------------------------------------------------------------------
- Locking issues case 6:
- Changing primary key
- using REPEATABLE READ transaction isolation level
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t0;
CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
Inserting 200,000 rows
UPDATE t0 SET value=100 WHERE id=190000;
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
UPDATE t0 SET id=200001 WHERE id=190000;
COMMIT;
connection con1;
ERROR: 1213
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t0;

View File

@ -0,0 +1,41 @@
-----------------------------------------------------------------------
- Locking issues case 7:
- Rows that are scanned as part of a query but not in the table being
- updated should not be locked unless rocksdb_lock_scanned_rows is on
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t1, t2;
SELECT @@global.rocksdb_lock_scanned_rows;
@@global.rocksdb_lock_scanned_rows
0
CREATE TABLE t1(id INT PRIMARY KEY, value INT);
CREATE TABLE t2(id INT PRIMARY KEY, value INT);
INSERT INTO t1 VALUES (1,1), (2,2), (3,3);
INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
lock_scanned_rows is 0
connection con1;
UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
connection con2;
UPDATE t2 SET value=value+100;
SELECT * FROM t2;
id value
1 101
2 102
3 103
4 104
5 105
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t1;
DROP TABLE t2;

View File

@ -0,0 +1,45 @@
-----------------------------------------------------------------------
- Locking issues case 7:
- Rows that are scanned as part of a query but not in the table being
- updated should not be locked unless rocksdb_lock_scanned_rows is on
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t1, t2;
SELECT @@global.rocksdb_lock_scanned_rows;
@@global.rocksdb_lock_scanned_rows
0
SET GLOBAL rocksdb_lock_scanned_rows=ON;
CREATE TABLE t1(id INT PRIMARY KEY, value INT);
CREATE TABLE t2(id INT PRIMARY KEY, value INT);
INSERT INTO t1 VALUES (1,1), (2,2), (3,3);
INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
BEGIN;
lock_scanned_rows is 1
connection con1;
UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
connection con2;
UPDATE t2 SET value=value+100 WHERE id=3;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
UPDATE t2 SET value=value+100 WHERE id IN (1,2,4,5);
SELECT * FROM t2;
id value
1 101
2 102
3 3
4 104
5 105
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t1;
DROP TABLE t2;
SET GLOBAL rocksdb_lock_scanned_rows=0;

View File

@ -0,0 +1,41 @@
-----------------------------------------------------------------------
- Locking issues case 7:
- Rows that are scanned as part of a query but not in the table being
- updated should not be locked unless rocksdb_lock_scanned_rows is on
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t1, t2;
SELECT @@global.rocksdb_lock_scanned_rows;
@@global.rocksdb_lock_scanned_rows
0
CREATE TABLE t1(id INT PRIMARY KEY, value INT);
CREATE TABLE t2(id INT PRIMARY KEY, value INT);
INSERT INTO t1 VALUES (1,1), (2,2), (3,3);
INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
lock_scanned_rows is 0
connection con1;
UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
connection con2;
UPDATE t2 SET value=value+100;
SELECT * FROM t2;
id value
1 101
2 102
3 103
4 104
5 105
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t1;
DROP TABLE t2;

View File

@ -0,0 +1,45 @@
-----------------------------------------------------------------------
- Locking issues case 7:
- Rows that are scanned as part of a query but not in the table being
- updated should not be locked unless rocksdb_lock_scanned_rows is on
-----------------------------------------------------------------------
DROP TABLE IF EXISTS t1, t2;
SELECT @@global.rocksdb_lock_scanned_rows;
@@global.rocksdb_lock_scanned_rows
0
SET GLOBAL rocksdb_lock_scanned_rows=ON;
CREATE TABLE t1(id INT PRIMARY KEY, value INT);
CREATE TABLE t2(id INT PRIMARY KEY, value INT);
INSERT INTO t1 VALUES (1,1), (2,2), (3,3);
INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
connect con1,localhost,root,,;
connect con2,localhost,root,,;
connection con1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
connection con2;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN;
lock_scanned_rows is 1
connection con1;
UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
connection con2;
UPDATE t2 SET value=value+100 WHERE id=3;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
UPDATE t2 SET value=value+100 WHERE id IN (1,2,4,5);
SELECT * FROM t2;
id value
1 101
2 102
3 3
4 104
5 105
connection con1;
COMMIT;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t1;
DROP TABLE t2;
SET GLOBAL rocksdb_lock_scanned_rows=0;

View File

@ -2,14 +2,18 @@
# MDEV-14843: Assertion `s_tx_list.size() == 0' failed in myrocks::Rdb_transaction::term_mutex # MDEV-14843: Assertion `s_tx_list.size() == 0' failed in myrocks::Rdb_transaction::term_mutex
# #
INSTALL SONAME 'ha_rocksdb'; INSTALL SONAME 'ha_rocksdb';
connect con1,localhost,root,,test;
CREATE TABLE t1 (i INT) ENGINE=RocksDB; CREATE TABLE t1 (i INT) ENGINE=RocksDB;
insert into t1 values (1); insert into t1 values (1);
connect con1,localhost,root,,;
connection con1;
insert into test.t1 values (1);
connection default;
DROP TABLE t1; DROP TABLE t1;
connection default;
UNINSTALL SONAME 'ha_rocksdb'; UNINSTALL SONAME 'ha_rocksdb';
Warnings:
Warning 1620 Plugin is busy and will be uninstalled on shutdown
SELECT ENGINE, SUPPORT FROM INFORMATION_SCHEMA.ENGINES WHERE ENGINE='ROCKSDB';
ENGINE SUPPORT
ROCKSDB NO
disconnect con1;
# #
# MDEV-15686: Loading MyRocks plugin back after it has been unloaded causes a crash # MDEV-15686: Loading MyRocks plugin back after it has been unloaded causes a crash
# #

View File

@ -1,67 +1,3 @@
--source include/have_rocksdb.inc --source include/have_rocksdb.inc
let $isolation_level = REPEATABLE READ; --echo tests moved to rocksdb.locking_issues_case*
--source include/locking_issues_case1_1.inc
let $isolation_level = READ COMMITTED;
--source include/locking_issues_case1_1.inc
let $isolation_level = REPEATABLE READ;
--source include/locking_issues_case1_2.inc
let $isolation_level = READ COMMITTED;
--source include/locking_issues_case1_2.inc
let $lock_scanned_rows=0;
let $isolation_level = REPEATABLE READ;
--source include/locking_issues_case2.inc
let $isolation_level = READ COMMITTED;
--source include/locking_issues_case2.inc
# Rerun the case2 tests with rocksdb_lock_scanned_rows on
let $lock_scanned_rows=1;
let $isolation_level = REPEATABLE READ;
--source include/locking_issues_case2.inc
let $isolation_level = READ COMMITTED;
--source include/locking_issues_case2.inc
let $isolation_level = REPEATABLE READ;
--source include/locking_issues_case3.inc
let $isolation_level = READ COMMITTED;
--source include/locking_issues_case3.inc
let $isolation_level = REPEATABLE READ;
--source include/locking_issues_case4.inc
let $isolation_level = READ COMMITTED;
--source include/locking_issues_case4.inc
let $isolation_level = REPEATABLE READ;
--source include/locking_issues_case5.inc
let $isolation_level = READ COMMITTED;
--source include/locking_issues_case5.inc
let $isolation_level = REPEATABLE READ;
--source include/locking_issues_case6.inc
let $isolation_level = READ COMMITTED;
--source include/locking_issues_case6.inc
let $lock_scanned_rows=0;
let $isolation_level = REPEATABLE READ;
--source include/locking_issues_case7.inc
let $isolation_level = READ COMMITTED;
--source include/locking_issues_case7.inc
# Rerun the case7 tests with rocksdb_lock_scanned_rows on
let $lock_scanned_rows=1;
let $isolation_level = REPEATABLE READ;
--source include/locking_issues_case7.inc
let $isolation_level = READ COMMITTED;
--source include/locking_issues_case7.inc

View File

@ -0,0 +1,4 @@
--source include/have_rocksdb.inc
let $isolation_level = READ COMMITTED;
--source include/locking_issues_case1_1.inc

View File

@ -0,0 +1,4 @@
--source include/have_rocksdb.inc
let $isolation_level = REPEATABLE READ;
--source include/locking_issues_case1_1.inc

View File

@ -0,0 +1,4 @@
--source include/have_rocksdb.inc
let $isolation_level = READ COMMITTED;
--source include/locking_issues_case1_2.inc

View File

@ -0,0 +1,4 @@
--source include/have_rocksdb.inc
let $isolation_level = REPEATABLE READ;
--source include/locking_issues_case1_2.inc

View File

@ -0,0 +1,5 @@
--source include/have_rocksdb.inc
let $lock_scanned_rows=0;
let $isolation_level = READ COMMITTED;
--source include/locking_issues_case2.inc

View File

@ -0,0 +1,5 @@
--source include/have_rocksdb.inc
let $lock_scanned_rows=1;
let $isolation_level = READ COMMITTED;
--source include/locking_issues_case2.inc

View File

@ -0,0 +1,5 @@
--source include/have_rocksdb.inc
let $lock_scanned_rows=0;
let $isolation_level = REPEATABLE READ;
--source include/locking_issues_case2.inc

View File

@ -0,0 +1,5 @@
--source include/have_rocksdb.inc
let $lock_scanned_rows=1;
let $isolation_level = REPEATABLE READ;
--source include/locking_issues_case2.inc

View File

@ -0,0 +1,4 @@
--source include/have_rocksdb.inc
let $isolation_level = READ COMMITTED;
--source include/locking_issues_case3.inc

View File

@ -0,0 +1,4 @@
--source include/have_rocksdb.inc
let $isolation_level = REPEATABLE READ;
--source include/locking_issues_case3.inc

View File

@ -0,0 +1,4 @@
--source include/have_rocksdb.inc
let $isolation_level = READ COMMITTED;
--source include/locking_issues_case4.inc

View File

@ -0,0 +1,4 @@
--source include/have_rocksdb.inc
let $isolation_level = REPEATABLE READ;
--source include/locking_issues_case4.inc

View File

@ -0,0 +1,4 @@
--source include/have_rocksdb.inc
let $isolation_level = READ COMMITTED;
--source include/locking_issues_case5.inc

View File

@ -0,0 +1,4 @@
--source include/have_rocksdb.inc
let $isolation_level = REPEATABLE READ;
--source include/locking_issues_case5.inc

View File

@ -0,0 +1,4 @@
--source include/have_rocksdb.inc
let $isolation_level = READ COMMITTED;
--source include/locking_issues_case6.inc

View File

@ -0,0 +1,4 @@
--source include/have_rocksdb.inc
let $isolation_level = REPEATABLE READ;
--source include/locking_issues_case6.inc

View File

@ -0,0 +1,5 @@
--source include/have_rocksdb.inc
let $lock_scanned_rows=0;
let $isolation_level = READ COMMITTED;
--source include/locking_issues_case7.inc

View File

@ -0,0 +1,5 @@
--source include/have_rocksdb.inc
let $lock_scanned_rows=1;
let $isolation_level = READ COMMITTED;
--source include/locking_issues_case7.inc

View File

@ -0,0 +1,5 @@
--source include/have_rocksdb.inc
let $lock_scanned_rows=0;
let $isolation_level = REPEATABLE READ;
--source include/locking_issues_case7.inc

View File

@ -0,0 +1,5 @@
--source include/have_rocksdb.inc
let $lock_scanned_rows=1;
let $isolation_level = REPEATABLE READ;
--source include/locking_issues_case7.inc

View File

@ -1,5 +1,6 @@
--source include/have_log_bin.inc --source include/have_log_bin.inc
--source include/have_binlog_format_row.inc --source include/have_binlog_format_row.inc
--source include/not_windows.inc
--echo # --echo #
--echo # MDEV-14843: Assertion `s_tx_list.size() == 0' failed in myrocks::Rdb_transaction::term_mutex --echo # MDEV-14843: Assertion `s_tx_list.size() == 0' failed in myrocks::Rdb_transaction::term_mutex
@ -14,18 +15,21 @@
INSTALL SONAME 'ha_rocksdb'; INSTALL SONAME 'ha_rocksdb';
--enable_warnings --enable_warnings
connect (con1,localhost,root,,test);
CREATE TABLE t1 (i INT) ENGINE=RocksDB; CREATE TABLE t1 (i INT) ENGINE=RocksDB;
insert into t1 values (1); insert into t1 values (1);
DROP TABLE t1;
connect (con1,localhost,root,,);
connection con1;
insert into test.t1 values (1);
connection default; connection default;
# Cleanup # Cleanup
DROP TABLE t1;
UNINSTALL SONAME 'ha_rocksdb'; UNINSTALL SONAME 'ha_rocksdb';
SELECT ENGINE, SUPPORT FROM INFORMATION_SCHEMA.ENGINES WHERE ENGINE='ROCKSDB';
disconnect con1;
# Unfortunately this is the only more or less reliable way to wait until
# connection done ha_close_connections(). It doesn't work on Windows due
# to different thread handling.
let $wait_condition= SELECT VARIABLE_VALUE=1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME='Threads_cached';
--source include/wait_condition.inc
--echo # --echo #
--echo # MDEV-15686: Loading MyRocks plugin back after it has been unloaded causes a crash --echo # MDEV-15686: Loading MyRocks plugin back after it has been unloaded causes a crash

View File

@ -52,8 +52,8 @@ namespace myrocks {
Since we cannot or don't want to change the API in any way, we can use this Since we cannot or don't want to change the API in any way, we can use this
mechanism to define readability tokens that look like C++ namespaces, but are mechanism to define readability tokens that look like C++ namespaces, but are
not enforced in any way by the compiler, since the pre-compiler strips them not enforced in any way by the compiler, since the pre-compiler strips them
out. However, on the calling side, code looks like my_core::thd_ha_data() out. However, on the calling side, code looks like my_core::thd_get_ha_data()
rather than plain a thd_ha_data() call. This technique adds an immediate rather than plain a thd_get_ha_data() call. This technique adds an immediate
visible cue on what type of API we are calling into. visible cue on what type of API we are calling into.
*/ */

View File

@ -8397,6 +8397,26 @@ static void test_list_fields()
} }
/* Test mysql_list_fields() with information_schema */
static void test_list_information_schema_fields()
{
MYSQL_RES *result;
int rc;
myheader("test_list_information_schema_fields");
rc= mysql_select_db(mysql, "information_schema");
myquery(rc);
result= mysql_list_fields(mysql, "all_plugins", NULL);
mytest(result);
rc= my_process_result_set(result);
DIE_UNLESS(rc == 0);
mysql_free_result(result);
rc= mysql_select_db(mysql, current_db);
myquery(rc);
}
static void test_list_fields_default() static void test_list_fields_default()
{ {
int rc, i; int rc, i;
@ -20865,6 +20885,7 @@ static struct my_tests_st my_tests[]= {
{ "test_fetch_column", test_fetch_column }, { "test_fetch_column", test_fetch_column },
{ "test_mem_overun", test_mem_overun }, { "test_mem_overun", test_mem_overun },
{ "test_list_fields", test_list_fields }, { "test_list_fields", test_list_fields },
{ "test_list_information_schema_fields", test_list_information_schema_fields },
{ "test_list_fields_default", test_list_fields_default }, { "test_list_fields_default", test_list_fields_default },
{ "test_free_result", test_free_result }, { "test_free_result", test_free_result },
{ "test_free_store_result", test_free_store_result }, { "test_free_store_result", test_free_store_result },