Merge 10.3 into 10.4
This commit is contained in:
commit
2fd82471ab
@ -72,10 +72,10 @@ MYSQL_ADD_EXECUTABLE(mysql_plugin mysql_plugin.c)
|
|||||||
TARGET_LINK_LIBRARIES(mysql_plugin ${CLIENT_LIB})
|
TARGET_LINK_LIBRARIES(mysql_plugin ${CLIENT_LIB})
|
||||||
|
|
||||||
MYSQL_ADD_EXECUTABLE(mysqlbinlog mysqlbinlog.cc)
|
MYSQL_ADD_EXECUTABLE(mysqlbinlog mysqlbinlog.cc)
|
||||||
TARGET_LINK_LIBRARIES(mysqlbinlog ${CLIENT_LIB})
|
TARGET_LINK_LIBRARIES(mysqlbinlog ${CLIENT_LIB} mysys_ssl)
|
||||||
|
|
||||||
MYSQL_ADD_EXECUTABLE(mysqladmin mysqladmin.cc ../sql/password.c)
|
MYSQL_ADD_EXECUTABLE(mysqladmin mysqladmin.cc ../sql/password.c)
|
||||||
TARGET_LINK_LIBRARIES(mysqladmin ${CLIENT_LIB})
|
TARGET_LINK_LIBRARIES(mysqladmin ${CLIENT_LIB} mysys_ssl)
|
||||||
|
|
||||||
MYSQL_ADD_EXECUTABLE(mysqlslap mysqlslap.c)
|
MYSQL_ADD_EXECUTABLE(mysqlslap mysqlslap.c)
|
||||||
SET_SOURCE_FILES_PROPERTIES(mysqlslap.c PROPERTIES COMPILE_FLAGS "-DTHREADS")
|
SET_SOURCE_FILES_PROPERTIES(mysqlslap.c PROPERTIES COMPILE_FLAGS "-DTHREADS")
|
||||||
|
@ -971,6 +971,10 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
case (int) OPT_DEFAULT_CHARSET:
|
||||||
|
if (default_charset == disabled_my_option)
|
||||||
|
default_charset= (char *)mysql_universal_client_charset;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -5205,7 +5205,11 @@ next_file_item_1:
|
|||||||
goto next_datadir_item;
|
goto next_datadir_item;
|
||||||
}
|
}
|
||||||
|
|
||||||
snprintf(dbpath, sizeof(dbpath)-1, "%s/%s", path, dbinfo.name);
|
snprintf(dbpath, sizeof(dbpath), "%.*s/%.*s",
|
||||||
|
OS_FILE_MAX_PATH/2-1,
|
||||||
|
path,
|
||||||
|
OS_FILE_MAX_PATH/2-1,
|
||||||
|
dbinfo.name);
|
||||||
|
|
||||||
os_normalize_path(dbpath);
|
os_normalize_path(dbpath);
|
||||||
|
|
||||||
|
@ -21,6 +21,17 @@ use strict;
|
|||||||
use warnings;
|
use warnings;
|
||||||
use Carp;
|
use Carp;
|
||||||
|
|
||||||
|
# Define all MariaDB options that the user should be able to specify
|
||||||
|
# many times in the config file. Note that options must be written
|
||||||
|
# using '-' instead of '_' here!
|
||||||
|
|
||||||
|
my %multipart_options=
|
||||||
|
(
|
||||||
|
"plugin-load-add" => 1,
|
||||||
|
"optimizer-switch" => 1,
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
sub new {
|
sub new {
|
||||||
my ($class, $option_name, $option_value)= @_;
|
my ($class, $option_name, $option_value)= @_;
|
||||||
my $self= bless { name => $option_name,
|
my $self= bless { name => $option_name,
|
||||||
@ -327,7 +338,6 @@ sub new {
|
|||||||
# Skip comment
|
# Skip comment
|
||||||
next;
|
next;
|
||||||
}
|
}
|
||||||
|
|
||||||
else {
|
else {
|
||||||
croak "Unexpected line '$line' found in '$path'";
|
croak "Unexpected line '$line' found in '$path'";
|
||||||
}
|
}
|
||||||
@ -355,6 +365,11 @@ sub insert {
|
|||||||
|
|
||||||
if ( defined $option ) {
|
if ( defined $option ) {
|
||||||
#print "option: $option, value: $value\n";
|
#print "option: $option, value: $value\n";
|
||||||
|
my $tmp_option= $option;
|
||||||
|
$tmp_option =~ s/_/-/g;
|
||||||
|
|
||||||
|
# If the option is an option that one can specify many times, always add
|
||||||
|
$if_not_exist= 1 if ($multipart_options{$tmp_option});
|
||||||
|
|
||||||
# Add the option to the group
|
# Add the option to the group
|
||||||
$group->insert($option, $value, $if_not_exist);
|
$group->insert($option, $value, $if_not_exist);
|
||||||
|
@ -291,7 +291,7 @@ sub post_check_client_groups {
|
|||||||
$first_mysqld->name());
|
$first_mysqld->name());
|
||||||
|
|
||||||
# Then generate [client.<suffix>] for each [mysqld.<suffix>]
|
# Then generate [client.<suffix>] for each [mysqld.<suffix>]
|
||||||
foreach my $mysqld ( $config->like('mysqld.') ) {
|
foreach my $mysqld ( $config->like('mysqld\.') ) {
|
||||||
$self->post_check_client_group($config,
|
$self->post_check_client_group($config,
|
||||||
'client'.$mysqld->after('mysqld'),
|
'client'.$mysqld->after('mysqld'),
|
||||||
$mysqld->name())
|
$mysqld->name())
|
||||||
@ -313,7 +313,7 @@ sub post_check_embedded_group {
|
|||||||
my $mysqld= $config->group('mysqld') or
|
my $mysqld= $config->group('mysqld') or
|
||||||
croak "Can't run with embedded, config has no default mysqld section";
|
croak "Can't run with embedded, config has no default mysqld section";
|
||||||
|
|
||||||
my $first_mysqld= $config->first_like('mysqld.') or
|
my $first_mysqld= $config->first_like('mysqld\.') or
|
||||||
croak "Can't run with embedded, config has no mysqld";
|
croak "Can't run with embedded, config has no mysqld";
|
||||||
|
|
||||||
my %no_copy = map { $_ => 1 }
|
my %no_copy = map { $_ => 1 }
|
||||||
@ -351,7 +351,7 @@ sub resolve_at_variable {
|
|||||||
}
|
}
|
||||||
$res .= $after;
|
$res .= $after;
|
||||||
|
|
||||||
$config->insert($group->name(), $option->name(), $res)
|
$option->{value}= $res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -436,7 +436,7 @@ sub new_config {
|
|||||||
}
|
}
|
||||||
|
|
||||||
$self->run_section_rules($config,
|
$self->run_section_rules($config,
|
||||||
'mysqld.',
|
'mysqld\.',
|
||||||
@mysqld_rules);
|
@mysqld_rules);
|
||||||
|
|
||||||
# [mysqlbinlog] need additional settings
|
# [mysqlbinlog] need additional settings
|
||||||
|
@ -404,8 +404,8 @@ b int(11) YES NULL
|
|||||||
c int(11) YES NULL
|
c int(11) YES NULL
|
||||||
explain select * from t1,t2 where t1.b = t2.c and t1.c = t2.b;
|
explain select * from t1,t2 where t1.b = t2.c and t1.c = t2.b;
|
||||||
id select_type table type possible_keys key key_len ref rows Extra
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
1 SIMPLE t1 ALL b,c NULL NULL NULL 10
|
1 SIMPLE t2 ALL NULL NULL NULL NULL 10 Using where
|
||||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
|
1 SIMPLE t1 ref b,c b 5 test.t2.c 1 Using where
|
||||||
select * from t1,t2 where t1.b = t2.c and t1.c = t2.b;
|
select * from t1,t2 where t1.b = t2.c and t1.c = t2.b;
|
||||||
a a b c
|
a a b c
|
||||||
1 1 1 1
|
1 1 1 1
|
||||||
|
@ -1605,3 +1605,34 @@ SELECT STRAIGHT_JOIN * FROM t1, t2 AS t2_1, t2 AS t2_2
|
|||||||
WHERE t2_2.c = t2_1.c AND t2_2.b = t2_1.b AND ( a IS NULL OR t2_1.c = a );
|
WHERE t2_2.c = t2_1.c AND t2_2.b = t2_1.b AND ( a IS NULL OR t2_1.c = a );
|
||||||
a b c b c
|
a b c b c
|
||||||
DROP TABLE t1,t2;
|
DROP TABLE t1,t2;
|
||||||
|
#
|
||||||
|
# MDEV-19600: The optimizer should be able to produce rows=1 estimate for unique index with NULLable columns
|
||||||
|
#
|
||||||
|
create table t0(a int);
|
||||||
|
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||||
|
create table t1 (
|
||||||
|
pk int not null primary key auto_increment,
|
||||||
|
a int,
|
||||||
|
b int,
|
||||||
|
unique key(a)
|
||||||
|
);
|
||||||
|
insert into t1 (a,b) select null, 12345 from t0 A, t0 B, t0 C;
|
||||||
|
insert into t1 (a,b) select a,a from t0;
|
||||||
|
# Simulate InnoDB's persistent statistics (It always uses nulls_equal)
|
||||||
|
set @tmp1= @@myisam_stats_method;
|
||||||
|
set myisam_stats_method=nulls_equal;
|
||||||
|
analyze table t1;
|
||||||
|
Table Op Msg_type Msg_text
|
||||||
|
test.t1 analyze status Engine-independent statistics collected
|
||||||
|
test.t1 analyze status OK
|
||||||
|
set myisam_stats_method=@tmp1;
|
||||||
|
show keys from t1;
|
||||||
|
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
|
||||||
|
t1 0 PRIMARY 1 pk A 1010 NULL NULL BTREE
|
||||||
|
t1 0 a 1 a A 1010 NULL NULL YES BTREE
|
||||||
|
# t1 must use ref(t1.a=t0.a) and rows must be 1 (and not 45):
|
||||||
|
explain select * from t0,t1 where t0.a=t1.a;
|
||||||
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
|
1 SIMPLE t0 ALL NULL NULL NULL NULL 10 Using where
|
||||||
|
1 SIMPLE t1 ref a a 5 test.t0.a 1
|
||||||
|
drop table t0,t1;
|
||||||
|
@ -1255,3 +1255,33 @@ SELECT STRAIGHT_JOIN * FROM t1, t2 AS t2_1, t2 AS t2_2
|
|||||||
WHERE t2_2.c = t2_1.c AND t2_2.b = t2_1.b AND ( a IS NULL OR t2_1.c = a );
|
WHERE t2_2.c = t2_1.c AND t2_2.b = t2_1.b AND ( a IS NULL OR t2_1.c = a );
|
||||||
|
|
||||||
DROP TABLE t1,t2;
|
DROP TABLE t1,t2;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-19600: The optimizer should be able to produce rows=1 estimate for unique index with NULLable columns
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
create table t0(a int);
|
||||||
|
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||||
|
|
||||||
|
create table t1 (
|
||||||
|
pk int not null primary key auto_increment,
|
||||||
|
a int,
|
||||||
|
b int,
|
||||||
|
unique key(a)
|
||||||
|
);
|
||||||
|
|
||||||
|
# 10K of null values
|
||||||
|
insert into t1 (a,b) select null, 12345 from t0 A, t0 B, t0 C;
|
||||||
|
insert into t1 (a,b) select a,a from t0;
|
||||||
|
|
||||||
|
--echo # Simulate InnoDB's persistent statistics (It always uses nulls_equal)
|
||||||
|
set @tmp1= @@myisam_stats_method;
|
||||||
|
set myisam_stats_method=nulls_equal;
|
||||||
|
analyze table t1;
|
||||||
|
set myisam_stats_method=@tmp1;
|
||||||
|
show keys from t1;
|
||||||
|
|
||||||
|
--echo # t1 must use ref(t1.a=t0.a) and rows must be 1 (and not 45):
|
||||||
|
explain select * from t0,t1 where t0.a=t1.a;
|
||||||
|
|
||||||
|
drop table t0,t1;
|
||||||
|
@ -867,7 +867,7 @@ explain select * from t1,t2 where t1.a=t2.b+2 and t2.a= t1.b {
|
|||||||
"table": "t1",
|
"table": "t1",
|
||||||
"field": "a",
|
"field": "a",
|
||||||
"equals": "t2.b + 2",
|
"equals": "t2.b + 2",
|
||||||
"null_rejecting": false
|
"null_rejecting": true
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"table": "t2",
|
"table": "t2",
|
||||||
@ -1805,19 +1805,19 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
|
|||||||
"table": "t1",
|
"table": "t1",
|
||||||
"field": "a",
|
"field": "a",
|
||||||
"equals": "1",
|
"equals": "1",
|
||||||
"null_rejecting": false
|
"null_rejecting": true
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"table": "t1",
|
"table": "t1",
|
||||||
"field": "a",
|
"field": "a",
|
||||||
"equals": "1",
|
"equals": "1",
|
||||||
"null_rejecting": false
|
"null_rejecting": true
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"table": "t1",
|
"table": "t1",
|
||||||
"field": "b",
|
"field": "b",
|
||||||
"equals": "2",
|
"equals": "2",
|
||||||
"null_rejecting": false
|
"null_rejecting": true
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@ -2821,37 +2821,37 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
|
|||||||
"table": "t1",
|
"table": "t1",
|
||||||
"field": "pk",
|
"field": "pk",
|
||||||
"equals": "2",
|
"equals": "2",
|
||||||
"null_rejecting": false
|
"null_rejecting": true
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"table": "t1",
|
"table": "t1",
|
||||||
"field": "pk",
|
"field": "pk",
|
||||||
"equals": "2",
|
"equals": "2",
|
||||||
"null_rejecting": false
|
"null_rejecting": true
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"table": "t1",
|
"table": "t1",
|
||||||
"field": "a",
|
"field": "a",
|
||||||
"equals": "5",
|
"equals": "5",
|
||||||
"null_rejecting": false
|
"null_rejecting": true
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"table": "t1",
|
"table": "t1",
|
||||||
"field": "pk",
|
"field": "pk",
|
||||||
"equals": "2",
|
"equals": "2",
|
||||||
"null_rejecting": false
|
"null_rejecting": true
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"table": "t1",
|
"table": "t1",
|
||||||
"field": "a",
|
"field": "a",
|
||||||
"equals": "5",
|
"equals": "5",
|
||||||
"null_rejecting": false
|
"null_rejecting": true
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"table": "t1",
|
"table": "t1",
|
||||||
"field": "b",
|
"field": "b",
|
||||||
"equals": "1",
|
"equals": "1",
|
||||||
"null_rejecting": false
|
"null_rejecting": true
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -1589,7 +1589,7 @@ WHERE t2.b=14 AND t2.a=t1.a AND 5.1<t2.c AND t1.b='DE'
|
|||||||
ORDER BY t2.c LIMIT 1;
|
ORDER BY t2.c LIMIT 1;
|
||||||
id select_type table type possible_keys key key_len ref rows Extra
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
1 SIMPLE t2 range a,b,c c 5 NULL 420 Using where
|
1 SIMPLE t2 range a,b,c c 5 NULL 420 Using where
|
||||||
1 SIMPLE t1 ref a a 39 test.t2.a,const 10 Using where; Using index
|
1 SIMPLE t1 ref a a 39 test.t2.a,const 1 Using where; Using index
|
||||||
SELECT d FROM t3 AS t1, t2 AS t2
|
SELECT d FROM t3 AS t1, t2 AS t2
|
||||||
WHERE t2.b=14 AND t2.a=t1.a AND 5.1<t2.c AND t1.b='DE'
|
WHERE t2.b=14 AND t2.a=t1.a AND 5.1<t2.c AND t1.b='DE'
|
||||||
ORDER BY t2.c LIMIT 1;
|
ORDER BY t2.c LIMIT 1;
|
||||||
|
@ -7314,5 +7314,60 @@ ERROR HY000: Illegal parameter data types row and boolean for operation '='
|
|||||||
#
|
#
|
||||||
# End of 10.4 tests
|
# End of 10.4 tests
|
||||||
#
|
#
|
||||||
|
#
|
||||||
|
# MDEV-19714: JOIN::pseudo_bits_cond is not visible in EXPLAIN FORMAT=JSON
|
||||||
|
#
|
||||||
|
CREATE TABLE t1 ( a INT );
|
||||||
|
INSERT INTO t1 VALUES (1),(5);
|
||||||
|
CREATE TABLE t2 ( b INT ) ENGINE=MyISAM;
|
||||||
|
INSERT INTO t2 VALUES (1);
|
||||||
|
CREATE TABLE t3 ( c INT );
|
||||||
|
INSERT INTO t3 VALUES (4),(5);
|
||||||
|
SET @tmp19714=@@optimizer_switch;
|
||||||
|
SET optimizer_switch='subquery_cache=off';
|
||||||
|
explain format=json
|
||||||
|
SELECT ( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) FROM t1;
|
||||||
|
EXPLAIN
|
||||||
|
{
|
||||||
|
"query_block": {
|
||||||
|
"select_id": 1,
|
||||||
|
"table": {
|
||||||
|
"table_name": "t1",
|
||||||
|
"access_type": "ALL",
|
||||||
|
"rows": 2,
|
||||||
|
"filtered": 100
|
||||||
|
},
|
||||||
|
"subqueries": [
|
||||||
|
{
|
||||||
|
"query_block": {
|
||||||
|
"select_id": 2,
|
||||||
|
"pseudo_bits_condition": "1 = t1.a or <in_optimizer>(1,<exists>(subquery#3))",
|
||||||
|
"table": {
|
||||||
|
"table_name": "t2",
|
||||||
|
"access_type": "system",
|
||||||
|
"rows": 1,
|
||||||
|
"filtered": 100
|
||||||
|
},
|
||||||
|
"subqueries": [
|
||||||
|
{
|
||||||
|
"query_block": {
|
||||||
|
"select_id": 3,
|
||||||
|
"table": {
|
||||||
|
"table_name": "t3",
|
||||||
|
"access_type": "ALL",
|
||||||
|
"rows": 2,
|
||||||
|
"filtered": 100,
|
||||||
|
"attached_condition": "1 = t3.c"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
SET optimizer_switch=@tmp19714;
|
||||||
|
drop table t1,t2,t3;
|
||||||
set @optimizer_switch_for_subselect_test=null;
|
set @optimizer_switch_for_subselect_test=null;
|
||||||
set @join_cache_level_for_subselect_test=NULL;
|
set @join_cache_level_for_subselect_test=NULL;
|
||||||
|
@ -8,5 +8,28 @@ set @join_cache_level_for_subselect_test=@@join_cache_level;
|
|||||||
|
|
||||||
--source subselect.test
|
--source subselect.test
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-19714: JOIN::pseudo_bits_cond is not visible in EXPLAIN FORMAT=JSON
|
||||||
|
--echo #
|
||||||
|
CREATE TABLE t1 ( a INT );
|
||||||
|
INSERT INTO t1 VALUES (1),(5);
|
||||||
|
|
||||||
|
# t2 must be MyISAM or Aria and contain 1 row
|
||||||
|
CREATE TABLE t2 ( b INT ) ENGINE=MyISAM;
|
||||||
|
INSERT INTO t2 VALUES (1);
|
||||||
|
|
||||||
|
CREATE TABLE t3 ( c INT );
|
||||||
|
INSERT INTO t3 VALUES (4),(5);
|
||||||
|
|
||||||
|
SET @tmp19714=@@optimizer_switch;
|
||||||
|
SET optimizer_switch='subquery_cache=off';
|
||||||
|
|
||||||
|
explain format=json
|
||||||
|
SELECT ( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) FROM t1;
|
||||||
|
|
||||||
|
SET optimizer_switch=@tmp19714;
|
||||||
|
|
||||||
|
drop table t1,t2,t3;
|
||||||
|
|
||||||
set @optimizer_switch_for_subselect_test=null;
|
set @optimizer_switch_for_subselect_test=null;
|
||||||
set @join_cache_level_for_subselect_test=NULL;
|
set @join_cache_level_for_subselect_test=NULL;
|
||||||
|
@ -2555,33 +2555,94 @@ CREATE TABLE t1 ( a INT PRIMARY KEY, b INT, KEY(b) );
|
|||||||
INSERT INTO t1 VALUES
|
INSERT INTO t1 VALUES
|
||||||
(1,2),(2,1),(3,3),(4,2),(5,5),
|
(1,2),(2,1),(3,3),(4,2),(5,5),
|
||||||
(6,3),(7,1),(8,4),(9,3),(10,2);
|
(6,3),(7,1),(8,4),(9,3),(10,2);
|
||||||
CREATE TABLE t2 ( c INT, d INT, UNIQUE KEY(c) );
|
CREATE TABLE t2 ( c INT, d INT, KEY(c) );
|
||||||
INSERT INTO t2 VALUES
|
INSERT INTO t2 VALUES
|
||||||
(1,2),(2,1),(3,3),(4,2),(5,5),(6,3),(7,1);
|
(1,2),(2,1),(3,3),(4,2),(5,5),(6,3),(7,1);
|
||||||
|
analyze table t1,t2;
|
||||||
|
Table Op Msg_type Msg_text
|
||||||
|
test.t1 analyze status Engine-independent statistics collected
|
||||||
|
test.t1 analyze status OK
|
||||||
|
test.t2 analyze status Engine-independent statistics collected
|
||||||
|
test.t2 analyze status OK
|
||||||
|
explain
|
||||||
|
SELECT a, b, d FROM t1, t2
|
||||||
|
WHERE ( b, d ) IN
|
||||||
|
( SELECT b, d FROM t1, t2 WHERE b = c );
|
||||||
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
|
1 PRIMARY t2 ALL NULL NULL NULL NULL 7
|
||||||
|
1 PRIMARY t1 index b b 5 NULL 10 Using where; Using index; LooseScan
|
||||||
|
1 PRIMARY t2 ref c c 5 test.t1.b 1 Using where; FirstMatch(t1)
|
||||||
|
1 PRIMARY t1 ref b b 5 test.t1.b 2
|
||||||
SELECT a, b, d FROM t1, t2
|
SELECT a, b, d FROM t1, t2
|
||||||
WHERE ( b, d ) IN
|
WHERE ( b, d ) IN
|
||||||
( SELECT b, d FROM t1, t2 WHERE b = c );
|
( SELECT b, d FROM t1, t2 WHERE b = c );
|
||||||
a b d
|
a b d
|
||||||
2 1 2
|
2 1 2
|
||||||
7 1 2
|
7 1 2
|
||||||
|
8 4 2
|
||||||
|
1 2 1
|
||||||
|
4 2 1
|
||||||
|
10 2 1
|
||||||
|
3 3 3
|
||||||
|
6 3 3
|
||||||
|
9 3 3
|
||||||
2 1 2
|
2 1 2
|
||||||
7 1 2
|
7 1 2
|
||||||
1 2 1
|
|
||||||
4 2 1
|
|
||||||
10 2 1
|
|
||||||
1 2 1
|
|
||||||
4 2 1
|
|
||||||
10 2 1
|
|
||||||
3 3 3
|
|
||||||
6 3 3
|
|
||||||
9 3 3
|
|
||||||
3 3 3
|
|
||||||
6 3 3
|
|
||||||
9 3 3
|
|
||||||
8 4 2
|
|
||||||
8 4 2
|
8 4 2
|
||||||
5 5 5
|
5 5 5
|
||||||
|
3 3 3
|
||||||
|
6 3 3
|
||||||
|
9 3 3
|
||||||
|
1 2 1
|
||||||
|
4 2 1
|
||||||
|
10 2 1
|
||||||
DROP TABLE t1, t2;
|
DROP TABLE t1, t2;
|
||||||
|
# Another testcase for the above that still uses LooseScan:
|
||||||
|
create table t0(a int primary key);
|
||||||
|
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||||
|
create table t10(a int primary key);
|
||||||
|
insert into t10 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C;
|
||||||
|
create table t1 (
|
||||||
|
pk int primary key auto_increment,
|
||||||
|
kp1 int,
|
||||||
|
kp2 int,
|
||||||
|
filler char(100),
|
||||||
|
key (kp1, kp2)
|
||||||
|
);
|
||||||
|
insert into t1 (kp1, kp2, filler)
|
||||||
|
select
|
||||||
|
A.a, B.a, 'filler-data'
|
||||||
|
from t0 A, t0 B;
|
||||||
|
create table t2 (a int, filler char(100), key(a));
|
||||||
|
create table t3 (a int);
|
||||||
|
insert into t3 values (1),(2);
|
||||||
|
insert into t2
|
||||||
|
select (A.a+1000*B.a)/20, 'filler_data' from t10 A, t0 B;
|
||||||
|
analyze table t1,t2,t3;
|
||||||
|
Table Op Msg_type Msg_text
|
||||||
|
test.t1 analyze status Engine-independent statistics collected
|
||||||
|
test.t1 analyze status Table is already up to date
|
||||||
|
test.t2 analyze status Engine-independent statistics collected
|
||||||
|
test.t2 analyze status Table is already up to date
|
||||||
|
test.t3 analyze status Engine-independent statistics collected
|
||||||
|
test.t3 analyze status OK
|
||||||
|
delete from t1 where kp2 in (1,3);
|
||||||
|
# Ref + LooseScan on t1:
|
||||||
|
explain select sum(t2.a)
|
||||||
|
from t2,t3
|
||||||
|
where (t3.a,t2.a) in (select kp1,kp2 from t1,t0 where t0.a=2);
|
||||||
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
|
1 PRIMARY t0 const PRIMARY PRIMARY 4 const 1 Using index
|
||||||
|
1 PRIMARY t3 ALL NULL NULL NULL NULL 2 Using where
|
||||||
|
1 PRIMARY t1 ref kp1 kp1 5 test.t3.a 10 Using where; Using index; LooseScan
|
||||||
|
1 PRIMARY t2 ref a a 5 test.t1.kp2 19 Using index
|
||||||
|
select sum(t2.a)
|
||||||
|
from t2,t3
|
||||||
|
where (t3.a,t2.a) in (select kp1,kp2 from t1,t0 where t0.a=2);
|
||||||
|
sum(t2.a)
|
||||||
|
1640
|
||||||
|
drop table t0,t10;
|
||||||
|
drop table t1,t2,t3;
|
||||||
#
|
#
|
||||||
# BUG#920713: Wrong result (missing rows) with firstmatch+BNL, IN subquery, ...
|
# BUG#920713: Wrong result (missing rows) with firstmatch+BNL, IN subquery, ...
|
||||||
#
|
#
|
||||||
|
@ -2285,16 +2285,66 @@ INSERT INTO t1 VALUES
|
|||||||
(1,2),(2,1),(3,3),(4,2),(5,5),
|
(1,2),(2,1),(3,3),(4,2),(5,5),
|
||||||
(6,3),(7,1),(8,4),(9,3),(10,2);
|
(6,3),(7,1),(8,4),(9,3),(10,2);
|
||||||
|
|
||||||
CREATE TABLE t2 ( c INT, d INT, UNIQUE KEY(c) );
|
CREATE TABLE t2 ( c INT, d INT, KEY(c) );
|
||||||
INSERT INTO t2 VALUES
|
INSERT INTO t2 VALUES
|
||||||
(1,2),(2,1),(3,3),(4,2),(5,5),(6,3),(7,1);
|
(1,2),(2,1),(3,3),(4,2),(5,5),(6,3),(7,1);
|
||||||
|
|
||||||
|
analyze table t1,t2;
|
||||||
|
explain
|
||||||
|
SELECT a, b, d FROM t1, t2
|
||||||
|
WHERE ( b, d ) IN
|
||||||
|
( SELECT b, d FROM t1, t2 WHERE b = c );
|
||||||
SELECT a, b, d FROM t1, t2
|
SELECT a, b, d FROM t1, t2
|
||||||
WHERE ( b, d ) IN
|
WHERE ( b, d ) IN
|
||||||
( SELECT b, d FROM t1, t2 WHERE b = c );
|
( SELECT b, d FROM t1, t2 WHERE b = c );
|
||||||
|
|
||||||
DROP TABLE t1, t2;
|
DROP TABLE t1, t2;
|
||||||
|
|
||||||
|
--echo # Another testcase for the above that still uses LooseScan:
|
||||||
|
|
||||||
|
create table t0(a int primary key);
|
||||||
|
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||||
|
|
||||||
|
create table t10(a int primary key);
|
||||||
|
insert into t10 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C;
|
||||||
|
|
||||||
|
create table t1 (
|
||||||
|
pk int primary key auto_increment,
|
||||||
|
kp1 int,
|
||||||
|
kp2 int,
|
||||||
|
filler char(100),
|
||||||
|
key (kp1, kp2)
|
||||||
|
);
|
||||||
|
|
||||||
|
# 10 groups, each has 10 elements.
|
||||||
|
insert into t1 (kp1, kp2, filler)
|
||||||
|
select
|
||||||
|
A.a, B.a, 'filler-data'
|
||||||
|
from t0 A, t0 B;
|
||||||
|
|
||||||
|
create table t2 (a int, filler char(100), key(a));
|
||||||
|
|
||||||
|
create table t3 (a int);
|
||||||
|
insert into t3 values (1),(2);
|
||||||
|
|
||||||
|
insert into t2
|
||||||
|
select (A.a+1000*B.a)/20, 'filler_data' from t10 A, t0 B;
|
||||||
|
|
||||||
|
analyze table t1,t2,t3;
|
||||||
|
delete from t1 where kp2 in (1,3);
|
||||||
|
|
||||||
|
--echo # Ref + LooseScan on t1:
|
||||||
|
explain select sum(t2.a)
|
||||||
|
from t2,t3
|
||||||
|
where (t3.a,t2.a) in (select kp1,kp2 from t1,t0 where t0.a=2);
|
||||||
|
|
||||||
|
select sum(t2.a)
|
||||||
|
from t2,t3
|
||||||
|
where (t3.a,t2.a) in (select kp1,kp2 from t1,t0 where t0.a=2);
|
||||||
|
|
||||||
|
drop table t0,t10;
|
||||||
|
drop table t1,t2,t3;
|
||||||
|
|
||||||
--echo #
|
--echo #
|
||||||
--echo # BUG#920713: Wrong result (missing rows) with firstmatch+BNL, IN subquery, ...
|
--echo # BUG#920713: Wrong result (missing rows) with firstmatch+BNL, IN subquery, ...
|
||||||
--echo #
|
--echo #
|
||||||
|
@ -2569,9 +2569,24 @@ CREATE TABLE t1 ( a INT PRIMARY KEY, b INT, KEY(b) );
|
|||||||
INSERT INTO t1 VALUES
|
INSERT INTO t1 VALUES
|
||||||
(1,2),(2,1),(3,3),(4,2),(5,5),
|
(1,2),(2,1),(3,3),(4,2),(5,5),
|
||||||
(6,3),(7,1),(8,4),(9,3),(10,2);
|
(6,3),(7,1),(8,4),(9,3),(10,2);
|
||||||
CREATE TABLE t2 ( c INT, d INT, UNIQUE KEY(c) );
|
CREATE TABLE t2 ( c INT, d INT, KEY(c) );
|
||||||
INSERT INTO t2 VALUES
|
INSERT INTO t2 VALUES
|
||||||
(1,2),(2,1),(3,3),(4,2),(5,5),(6,3),(7,1);
|
(1,2),(2,1),(3,3),(4,2),(5,5),(6,3),(7,1);
|
||||||
|
analyze table t1,t2;
|
||||||
|
Table Op Msg_type Msg_text
|
||||||
|
test.t1 analyze status Engine-independent statistics collected
|
||||||
|
test.t1 analyze status OK
|
||||||
|
test.t2 analyze status Engine-independent statistics collected
|
||||||
|
test.t2 analyze status OK
|
||||||
|
explain
|
||||||
|
SELECT a, b, d FROM t1, t2
|
||||||
|
WHERE ( b, d ) IN
|
||||||
|
( SELECT b, d FROM t1, t2 WHERE b = c );
|
||||||
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
|
1 PRIMARY t2 ALL NULL NULL NULL NULL 7
|
||||||
|
1 PRIMARY t1 index b b 5 NULL 10 Using where; Using index; LooseScan
|
||||||
|
1 PRIMARY t2 ref c c 5 test.t1.b 1 Using where; FirstMatch(t1)
|
||||||
|
1 PRIMARY t1 ref b b 5 test.t1.b 2 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
|
||||||
SELECT a, b, d FROM t1, t2
|
SELECT a, b, d FROM t1, t2
|
||||||
WHERE ( b, d ) IN
|
WHERE ( b, d ) IN
|
||||||
( SELECT b, d FROM t1, t2 WHERE b = c );
|
( SELECT b, d FROM t1, t2 WHERE b = c );
|
||||||
@ -2596,6 +2611,52 @@ a b d
|
|||||||
10 2 1
|
10 2 1
|
||||||
10 2 1
|
10 2 1
|
||||||
DROP TABLE t1, t2;
|
DROP TABLE t1, t2;
|
||||||
|
# Another testcase for the above that still uses LooseScan:
|
||||||
|
create table t0(a int primary key);
|
||||||
|
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||||
|
create table t10(a int primary key);
|
||||||
|
insert into t10 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C;
|
||||||
|
create table t1 (
|
||||||
|
pk int primary key auto_increment,
|
||||||
|
kp1 int,
|
||||||
|
kp2 int,
|
||||||
|
filler char(100),
|
||||||
|
key (kp1, kp2)
|
||||||
|
);
|
||||||
|
insert into t1 (kp1, kp2, filler)
|
||||||
|
select
|
||||||
|
A.a, B.a, 'filler-data'
|
||||||
|
from t0 A, t0 B;
|
||||||
|
create table t2 (a int, filler char(100), key(a));
|
||||||
|
create table t3 (a int);
|
||||||
|
insert into t3 values (1),(2);
|
||||||
|
insert into t2
|
||||||
|
select (A.a+1000*B.a)/20, 'filler_data' from t10 A, t0 B;
|
||||||
|
analyze table t1,t2,t3;
|
||||||
|
Table Op Msg_type Msg_text
|
||||||
|
test.t1 analyze status Engine-independent statistics collected
|
||||||
|
test.t1 analyze status Table is already up to date
|
||||||
|
test.t2 analyze status Engine-independent statistics collected
|
||||||
|
test.t2 analyze status Table is already up to date
|
||||||
|
test.t3 analyze status Engine-independent statistics collected
|
||||||
|
test.t3 analyze status OK
|
||||||
|
delete from t1 where kp2 in (1,3);
|
||||||
|
# Ref + LooseScan on t1:
|
||||||
|
explain select sum(t2.a)
|
||||||
|
from t2,t3
|
||||||
|
where (t3.a,t2.a) in (select kp1,kp2 from t1,t0 where t0.a=2);
|
||||||
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
|
1 PRIMARY t0 const PRIMARY PRIMARY 4 const 1 Using index
|
||||||
|
1 PRIMARY t3 ALL NULL NULL NULL NULL 2 Using where
|
||||||
|
1 PRIMARY t1 ref kp1 kp1 5 test.t3.a 10 Using where; Using index; LooseScan
|
||||||
|
1 PRIMARY t2 ref a a 5 test.t1.kp2 19 Using index
|
||||||
|
select sum(t2.a)
|
||||||
|
from t2,t3
|
||||||
|
where (t3.a,t2.a) in (select kp1,kp2 from t1,t0 where t0.a=2);
|
||||||
|
sum(t2.a)
|
||||||
|
1640
|
||||||
|
drop table t0,t10;
|
||||||
|
drop table t1,t2,t3;
|
||||||
#
|
#
|
||||||
# BUG#920713: Wrong result (missing rows) with firstmatch+BNL, IN subquery, ...
|
# BUG#920713: Wrong result (missing rows) with firstmatch+BNL, IN subquery, ...
|
||||||
#
|
#
|
||||||
|
@ -67,9 +67,9 @@ insert into t4 select A.a + 10*B.a, A.a + 10*B.a, 'filler' from t0 A, t0 B;
|
|||||||
explain select * from t0, t4 where
|
explain select * from t0, t4 where
|
||||||
t4.b=t0.a and t4.a in (select max(t2.a) from t1, t2 group by t2.b);
|
t4.b=t0.a and t4.a in (select max(t2.a) from t1, t2 group by t2.b);
|
||||||
id select_type table type possible_keys key key_len ref rows Extra
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
1 PRIMARY t0 ALL NULL NULL NULL NULL 10
|
1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 5
|
||||||
1 PRIMARY t4 ALL a NULL NULL NULL 100 Using where; Using join buffer (flat, BNL join)
|
1 PRIMARY t0 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
|
||||||
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 test.t4.a 1
|
1 PRIMARY t4 ref a a 10 <subquery2>.max(t2.a),test.t0.a 1
|
||||||
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 Using temporary
|
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 Using temporary
|
||||||
2 MATERIALIZED t1 ALL NULL NULL NULL NULL 10 Using join buffer (flat, BNL join)
|
2 MATERIALIZED t1 ALL NULL NULL NULL NULL 10 Using join buffer (flat, BNL join)
|
||||||
insert into t4 select 100 + (B.a *100 + A.a), 100 + (B.a*100 + A.a), 'filler' from t4 A, t0 B;
|
insert into t4 select 100 + (B.a *100 + A.a), 100 + (B.a*100 + A.a), 'filler' from t4 A, t0 B;
|
||||||
@ -77,9 +77,9 @@ explain select * from t4 where
|
|||||||
t4.a in (select max(t2.a) from t1, t2 group by t2.b) and
|
t4.a in (select max(t2.a) from t1, t2 group by t2.b) and
|
||||||
t4.b in (select max(t2.a) from t1, t2 group by t2.b);
|
t4.b in (select max(t2.a) from t1, t2 group by t2.b);
|
||||||
id select_type table type possible_keys key key_len ref rows Extra
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 5
|
1 PRIMARY <subquery3> ALL distinct_key NULL NULL NULL 5
|
||||||
1 PRIMARY t4 ref a a 5 <subquery2>.max(t2.a) 12 Using index condition
|
1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 5 Using join buffer (flat, BNL join)
|
||||||
1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 test.t4.b 1
|
1 PRIMARY t4 ref a a 10 <subquery2>.max(t2.a),<subquery3>.max(t2.a) 1
|
||||||
3 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 Using temporary
|
3 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 Using temporary
|
||||||
3 MATERIALIZED t1 ALL NULL NULL NULL NULL 10 Using join buffer (flat, BNL join)
|
3 MATERIALIZED t1 ALL NULL NULL NULL NULL 10 Using join buffer (flat, BNL join)
|
||||||
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 Using temporary
|
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 Using temporary
|
||||||
|
@ -279,7 +279,7 @@ insert into t2 values
|
|||||||
explain select t1.* from t1 left join t2 on t2.a=t1.a;
|
explain select t1.* from t1 left join t2 on t2.a=t1.a;
|
||||||
id select_type table type possible_keys key key_len ref rows Extra
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
1 SIMPLE t1 index NULL PRIMARY 10 NULL 2 Using index
|
1 SIMPLE t1 index NULL PRIMARY 10 NULL 2 Using index
|
||||||
1 SIMPLE t2 ref a a 3 test.t1.a 2 Using where
|
1 SIMPLE t2 ref a a 3 test.t1.a 1 Using where
|
||||||
drop table t1, t2;
|
drop table t1, t2;
|
||||||
#
|
#
|
||||||
# check UPDATE/DELETE that look like they could be eliminated
|
# check UPDATE/DELETE that look like they could be eliminated
|
||||||
|
@ -3607,6 +3607,33 @@ b row_number() over (partition by sum(a)+1)
|
|||||||
2000 1
|
2000 1
|
||||||
drop table t1;
|
drop table t1;
|
||||||
#
|
#
|
||||||
|
# MDEV-18015: Assertion `global_status_var.global_memory_used == 0' failed when using UDF,
|
||||||
|
# window functions and views
|
||||||
|
#
|
||||||
|
create table t1 (id int, n1 int);
|
||||||
|
insert into t1 values (1,1),(2,1),(3,2),(4,4);
|
||||||
|
explain
|
||||||
|
select max(n1) over (partition by 'abc') from t1;
|
||||||
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
|
1 SIMPLE t1 ALL NULL NULL NULL NULL 4 Using temporary
|
||||||
|
select max(n1) over (partition by 'abc') from t1;
|
||||||
|
max(n1) over (partition by 'abc')
|
||||||
|
4
|
||||||
|
4
|
||||||
|
4
|
||||||
|
4
|
||||||
|
explain
|
||||||
|
select rank() over (partition by 'abc' order by 'xyz') from t1;
|
||||||
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
|
1 SIMPLE t1 ALL NULL NULL NULL NULL 4 Using temporary
|
||||||
|
select rank() over (partition by 'abc' order by 'xyz') from t1;
|
||||||
|
rank() over (partition by 'abc' order by 'xyz')
|
||||||
|
1
|
||||||
|
1
|
||||||
|
1
|
||||||
|
1
|
||||||
|
drop table t1;
|
||||||
|
#
|
||||||
# End of 10.2 tests
|
# End of 10.2 tests
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
|
@ -2325,6 +2325,22 @@ select b, row_number() over (partition by sum(a)+1) from t1 group by b;
|
|||||||
|
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-18015: Assertion `global_status_var.global_memory_used == 0' failed when using UDF,
|
||||||
|
--echo # window functions and views
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
create table t1 (id int, n1 int);
|
||||||
|
insert into t1 values (1,1),(2,1),(3,2),(4,4);
|
||||||
|
explain
|
||||||
|
select max(n1) over (partition by 'abc') from t1;
|
||||||
|
select max(n1) over (partition by 'abc') from t1;
|
||||||
|
|
||||||
|
explain
|
||||||
|
select rank() over (partition by 'abc' order by 'xyz') from t1;
|
||||||
|
select rank() over (partition by 'abc' order by 'xyz') from t1;
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
--echo #
|
--echo #
|
||||||
--echo # End of 10.2 tests
|
--echo # End of 10.2 tests
|
||||||
--echo #
|
--echo #
|
||||||
|
71
mysql-test/suite/binlog/r/flashback-largebinlog.result
Normal file
71
mysql-test/suite/binlog/r/flashback-largebinlog.result
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
#
|
||||||
|
# Preparatory cleanup.
|
||||||
|
#
|
||||||
|
drop database if exists mysqltest;
|
||||||
|
create database mysqltest;
|
||||||
|
use mysqltest;
|
||||||
|
DROP TABLE IF EXISTS t1;
|
||||||
|
#
|
||||||
|
# We need a fixed timestamp to avoid varying results.
|
||||||
|
#
|
||||||
|
SET timestamp=1000000000;
|
||||||
|
#
|
||||||
|
# We need big packets.
|
||||||
|
#
|
||||||
|
# Capture initial value to reset at the end of the test
|
||||||
|
# Now adjust max_allowed_packet
|
||||||
|
SET @@global.max_allowed_packet= 10*1024*1024*1024;
|
||||||
|
Warnings:
|
||||||
|
Warning 1292 Truncated incorrect max_allowed_packet value: '10737418240'
|
||||||
|
max_allowed_packet is a global variable.
|
||||||
|
In order for the preceding change in max_allowed_packets' value
|
||||||
|
to be seen and used, we must start a new connection.
|
||||||
|
The change does not take effect with the current one.
|
||||||
|
For simplicity, we just disconnect / reconnect connection default here.
|
||||||
|
disconnect default;
|
||||||
|
connect default, localhost,root,,;
|
||||||
|
#
|
||||||
|
# Delete all existing binary logs.
|
||||||
|
#
|
||||||
|
RESET MASTER;
|
||||||
|
#
|
||||||
|
# Create a test table.
|
||||||
|
#
|
||||||
|
use mysqltest;
|
||||||
|
CREATE TABLE t1 (
|
||||||
|
c1 LONGTEXT
|
||||||
|
) DEFAULT CHARSET latin1;
|
||||||
|
#
|
||||||
|
# Show how many rows are affected by each statement.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Insert some big rows.
|
||||||
|
#
|
||||||
|
insert 1024MB data twice
|
||||||
|
INSERT INTO t1 VALUES (REPEAT('ManyMegaByteBlck', 67108864));
|
||||||
|
affected rows: 1
|
||||||
|
INSERT INTO t1 VALUES (REPEAT('MegaByteBlckMany', 67108864));
|
||||||
|
affected rows: 1
|
||||||
|
#
|
||||||
|
# Flush all log buffers to the log file.
|
||||||
|
#
|
||||||
|
FLUSH LOGS;
|
||||||
|
affected rows: 0
|
||||||
|
#
|
||||||
|
# Call mysqlbinlog to display the log file contents.
|
||||||
|
# NOTE: The output of mysqlbinlog is redirected to
|
||||||
|
# $MYSQLTEST_VARDIR/tmp/mysqlbinlog_big_1.out
|
||||||
|
# If you want to examine it, disable remove_file
|
||||||
|
# at the bottom of the test script.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Cleanup.
|
||||||
|
#
|
||||||
|
# reset variable value to pass testcase checks
|
||||||
|
SET @@global.max_allowed_packet = 16777216;
|
||||||
|
affected rows: 0
|
||||||
|
DROP TABLE t1;
|
||||||
|
affected rows: 0
|
||||||
|
drop database if exists mysqltest;
|
||||||
|
affected rows: 0
|
||||||
|
remove_file $MYSQLTEST_VARDIR/tmp/mysqlbinlog_big_1.out
|
110
mysql-test/suite/binlog/t/flashback-largebinlog.test
Normal file
110
mysql-test/suite/binlog/t/flashback-largebinlog.test
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
# mysqlbinlog_big.test
|
||||||
|
#
|
||||||
|
# Show that mysqlbinlog can handle big rows.
|
||||||
|
#
|
||||||
|
|
||||||
|
#
|
||||||
|
# The *huge* output of mysqlbinlog will be redirected to
|
||||||
|
# $MYSQLTEST_VARDIR/$mysqlbinlog_output
|
||||||
|
#
|
||||||
|
--let $mysqlbinlog_output= tmp/mysqlbinlog_big_1.out
|
||||||
|
|
||||||
|
--source include/have_binlog_format_row.inc
|
||||||
|
|
||||||
|
--source include/have_log_bin.inc
|
||||||
|
|
||||||
|
# This is a big test.
|
||||||
|
--source include/big_test.inc
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # Preparatory cleanup.
|
||||||
|
--echo #
|
||||||
|
--disable_warnings
|
||||||
|
drop database if exists mysqltest;
|
||||||
|
create database mysqltest;
|
||||||
|
use mysqltest;
|
||||||
|
DROP TABLE IF EXISTS t1;
|
||||||
|
--enable_warnings
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # We need a fixed timestamp to avoid varying results.
|
||||||
|
--echo #
|
||||||
|
SET timestamp=1000000000;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # We need big packets.
|
||||||
|
--echo #
|
||||||
|
--echo # Capture initial value to reset at the end of the test
|
||||||
|
# use let $<var> = query_get_value as FLUSH statements
|
||||||
|
# in the test will set @<var> values to NULL
|
||||||
|
let $orig_max_allowed_packet =
|
||||||
|
query_get_value(SELECT @@global.max_allowed_packet, @@global.max_allowed_packet, 1);
|
||||||
|
|
||||||
|
--echo # Now adjust max_allowed_packet
|
||||||
|
SET @@global.max_allowed_packet= 10*1024*1024*1024;
|
||||||
|
|
||||||
|
--echo max_allowed_packet is a global variable.
|
||||||
|
--echo In order for the preceding change in max_allowed_packets' value
|
||||||
|
--echo to be seen and used, we must start a new connection.
|
||||||
|
--echo The change does not take effect with the current one.
|
||||||
|
--echo For simplicity, we just disconnect / reconnect connection default here.
|
||||||
|
disconnect default;
|
||||||
|
connect (default, localhost,root,,);
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # Delete all existing binary logs.
|
||||||
|
--echo #
|
||||||
|
RESET MASTER;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # Create a test table.
|
||||||
|
--echo #
|
||||||
|
use mysqltest;
|
||||||
|
eval CREATE TABLE t1 (
|
||||||
|
c1 LONGTEXT
|
||||||
|
) DEFAULT CHARSET latin1;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # Show how many rows are affected by each statement.
|
||||||
|
--echo #
|
||||||
|
--enable_info
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # Insert some big rows.
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
--echo insert 1024MB data twice
|
||||||
|
INSERT INTO t1 VALUES (REPEAT('ManyMegaByteBlck', 67108864));
|
||||||
|
INSERT INTO t1 VALUES (REPEAT('MegaByteBlckMany', 67108864));
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # Flush all log buffers to the log file.
|
||||||
|
--echo #
|
||||||
|
FLUSH LOGS;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # Call mysqlbinlog to display the log file contents.
|
||||||
|
--echo # NOTE: The output of mysqlbinlog is redirected to
|
||||||
|
--echo # \$MYSQLTEST_VARDIR/$mysqlbinlog_output
|
||||||
|
--echo # If you want to examine it, disable remove_file
|
||||||
|
--echo # at the bottom of the test script.
|
||||||
|
--echo #
|
||||||
|
let $MYSQLD_DATADIR= `select @@datadir`;
|
||||||
|
--replace_result $MYSQLTEST_VARDIR <MYSQLTEST_VARDIR>
|
||||||
|
--replace_regex /SQL_LOAD_MB-[0-9]-[0-9]/SQL_LOAD_MB-#-#/ /exec_time=[0-9]*/exec_time=#/ /end_log_pos [0-9]*/end_log_pos #/ /# at [0-9]*/# at #/ /Xid = [0-9]*/Xid = #/
|
||||||
|
--exec $MYSQL_BINLOG -B -v -v $MYSQLD_DATADIR/master-bin.000001 > $MYSQLTEST_VARDIR/$mysqlbinlog_output
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # Cleanup.
|
||||||
|
--echo #
|
||||||
|
--echo # reset variable value to pass testcase checks
|
||||||
|
eval SET @@global.max_allowed_packet = $orig_max_allowed_packet;
|
||||||
|
DROP TABLE t1;
|
||||||
|
drop database if exists mysqltest;
|
||||||
|
|
||||||
|
--echo remove_file \$MYSQLTEST_VARDIR/$mysqlbinlog_output
|
||||||
|
#
|
||||||
|
# NOTE: If you want to see the *huge* mysqlbinlog output, disable next line:
|
||||||
|
#
|
||||||
|
--remove_file $MYSQLTEST_VARDIR/$mysqlbinlog_output
|
||||||
|
|
@ -0,0 +1,39 @@
|
|||||||
|
CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY, a VARCHAR(255)) ENGINE=InnoDB ROW_FORMAT=COMPRESSED ENCRYPTED=YES;
|
||||||
|
INSERT INTO t1 VALUES(1, repeat('Nesamani', 10));
|
||||||
|
SELECT COUNT(*) FROM t1;
|
||||||
|
COUNT(*)
|
||||||
|
1
|
||||||
|
SHOW CREATE TABLE t1;
|
||||||
|
Table Create Table
|
||||||
|
t1 CREATE TABLE `t1` (
|
||||||
|
`id` int(11) NOT NULL,
|
||||||
|
`a` varchar(255) DEFAULT NULL,
|
||||||
|
PRIMARY KEY (`id`)
|
||||||
|
) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED `ENCRYPTED`=YES
|
||||||
|
# Wait max 10 min for key encryption threads to encrypt all spaces
|
||||||
|
db.opt
|
||||||
|
t1.frm
|
||||||
|
t1.ibd
|
||||||
|
FLUSH TABLES t1 FOR EXPORT;
|
||||||
|
backup: t1
|
||||||
|
db.opt
|
||||||
|
t1.cfg
|
||||||
|
t1.frm
|
||||||
|
t1.ibd
|
||||||
|
UNLOCK TABLES;
|
||||||
|
DROP TABLE t1;
|
||||||
|
CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY, a VARCHAR(255)) ENGINE=InnoDB ROW_FORMAT=COMPRESSED ENCRYPTED=YES;
|
||||||
|
ALTER TABLE t1 DISCARD TABLESPACE;
|
||||||
|
restore: t1 .ibd and .cfg files
|
||||||
|
ALTER TABLE t1 IMPORT TABLESPACE;
|
||||||
|
SELECT COUNT(*) FROM t1;
|
||||||
|
COUNT(*)
|
||||||
|
1
|
||||||
|
SHOW CREATE TABLE t1;
|
||||||
|
Table Create Table
|
||||||
|
t1 CREATE TABLE `t1` (
|
||||||
|
`id` int(11) NOT NULL,
|
||||||
|
`a` varchar(255) DEFAULT NULL,
|
||||||
|
PRIMARY KEY (`id`)
|
||||||
|
) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED `ENCRYPTED`=YES
|
||||||
|
DROP TABLE t1;
|
@ -0,0 +1,3 @@
|
|||||||
|
--innodb-encrypt-tables=ON
|
||||||
|
--innodb-encryption-threads=4
|
||||||
|
--innodb-tablespaces-encryption
|
@ -0,0 +1,45 @@
|
|||||||
|
-- source include/have_innodb.inc
|
||||||
|
-- source include/have_example_key_management_plugin.inc
|
||||||
|
-- source include/not_valgrind.inc
|
||||||
|
-- source include/not_embedded.inc
|
||||||
|
|
||||||
|
let MYSQLD_DATADIR = `SELECT @@datadir`;
|
||||||
|
--let t1_IBD = $MYSQLD_DATADIR/test/t1.ibd
|
||||||
|
CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY, a VARCHAR(255)) ENGINE=InnoDB ROW_FORMAT=COMPRESSED ENCRYPTED=YES;
|
||||||
|
INSERT INTO t1 VALUES(1, repeat('Nesamani', 10));
|
||||||
|
|
||||||
|
SELECT COUNT(*) FROM t1;
|
||||||
|
SHOW CREATE TABLE t1;
|
||||||
|
|
||||||
|
--echo # Wait max 10 min for key encryption threads to encrypt all spaces
|
||||||
|
--let $wait_timeout= 600
|
||||||
|
--let $wait_condition=SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0 AND ROTATING_OR_FLUSHING <> 0
|
||||||
|
--source include/wait_condition.inc
|
||||||
|
|
||||||
|
let $restart_noprint=2;
|
||||||
|
--source include/restart_mysqld.inc
|
||||||
|
let MYSQLD_DATADIR =`SELECT @@datadir`;
|
||||||
|
|
||||||
|
--list_files $MYSQLD_DATADIR/test
|
||||||
|
FLUSH TABLES t1 FOR EXPORT;
|
||||||
|
perl;
|
||||||
|
do "$ENV{MTR_SUITE_DIR}/include/innodb-util.pl";
|
||||||
|
ib_backup_tablespaces("test", "t1");
|
||||||
|
EOF
|
||||||
|
--list_files $MYSQLD_DATADIR/test
|
||||||
|
UNLOCK TABLES;
|
||||||
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY, a VARCHAR(255)) ENGINE=InnoDB ROW_FORMAT=COMPRESSED ENCRYPTED=YES;
|
||||||
|
ALTER TABLE t1 DISCARD TABLESPACE;
|
||||||
|
perl;
|
||||||
|
do "$ENV{MTR_SUITE_DIR}/include/innodb-util.pl";
|
||||||
|
ib_discard_tablespaces("test", "t1");
|
||||||
|
ib_restore_tablespaces("test", "t1");
|
||||||
|
EOF
|
||||||
|
|
||||||
|
ALTER TABLE t1 IMPORT TABLESPACE;
|
||||||
|
|
||||||
|
SELECT COUNT(*) FROM t1;
|
||||||
|
SHOW CREATE TABLE t1;
|
||||||
|
DROP TABLE t1;
|
@ -1110,7 +1110,7 @@ id select_type table type possible_keys key key_len ref rows Extra
|
|||||||
EXPLAIN SELECT * FROM t1 AS t2 STRAIGHT_JOIN t1 FORCE INDEX(b) WHERE t1.b=t2.b;
|
EXPLAIN SELECT * FROM t1 AS t2 STRAIGHT_JOIN t1 FORCE INDEX(b) WHERE t1.b=t2.b;
|
||||||
id select_type table type possible_keys key key_len ref rows Extra
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
1 SIMPLE t2 ALL b NULL NULL NULL 2 Using where
|
1 SIMPLE t2 ALL b NULL NULL NULL 2 Using where
|
||||||
1 SIMPLE t1 ref b b 5 test.t2.b 2
|
1 SIMPLE t1 ref b b 5 test.t2.b 1
|
||||||
EXPLAIN SELECT b FROM t1 FORCE INDEX(b);
|
EXPLAIN SELECT b FROM t1 FORCE INDEX(b);
|
||||||
id select_type table type possible_keys key key_len ref rows Extra
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
1 SIMPLE t1 index NULL b 5 NULL 2 Using index
|
1 SIMPLE t1 index NULL b 5 NULL 2 Using index
|
||||||
|
@ -64,7 +64,7 @@ a b c
|
|||||||
explain select * from t1 where b in (select c from t3);
|
explain select * from t1 where b in (select c from t3);
|
||||||
id select_type table type possible_keys key key_len ref rows Extra
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
|
1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
|
||||||
1 PRIMARY t3 ref c c 5 test.t1.b 2 Using index
|
1 PRIMARY t3 ref c c 5 test.t1.b 1 Using index
|
||||||
# select_type=PRIMARY, type=range,ref
|
# select_type=PRIMARY, type=range,ref
|
||||||
select * from t1 where c in (select c from t3 where c between -2 and -1);
|
select * from t1 where c in (select c from t3 where c between -2 and -1);
|
||||||
a b c
|
a b c
|
||||||
@ -74,7 +74,7 @@ a b c
|
|||||||
explain select * from t1 where c in (select c from t3 where c between -2 and -1);
|
explain select * from t1 where c in (select c from t3 where c between -2 and -1);
|
||||||
id select_type table type possible_keys key key_len ref rows Extra
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
1 PRIMARY t1 range c c 5 NULL 3 Using index condition
|
1 PRIMARY t1 range c c 5 NULL 3 Using index condition
|
||||||
1 PRIMARY t3 index c c 5 NULL 6 Using where; Using index; Using join buffer (flat, BNL join)
|
1 PRIMARY t3 ref c c 5 test.t1.c 1 Using index
|
||||||
# select_type=UNION, type=system
|
# select_type=UNION, type=system
|
||||||
# select_type=UNION RESULT, type=<union1,2>
|
# select_type=UNION RESULT, type=<union1,2>
|
||||||
select * from t1 union select * from t2;
|
select * from t1 union select * from t2;
|
||||||
|
@ -74,7 +74,7 @@ IF(HAVE_MLOCK)
|
|||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
ADD_CONVENIENCE_LIBRARY(mysys ${MYSYS_SOURCES})
|
ADD_CONVENIENCE_LIBRARY(mysys ${MYSYS_SOURCES})
|
||||||
TARGET_LINK_LIBRARIES(mysys dbug strings mysys_ssl ${ZLIB_LIBRARY}
|
TARGET_LINK_LIBRARIES(mysys dbug strings ${ZLIB_LIBRARY}
|
||||||
${LIBNSL} ${LIBM} ${LIBRT} ${LIBDL} ${LIBSOCKET} ${LIBEXECINFO} ${CRC32_LIBRARY})
|
${LIBNSL} ${LIBM} ${LIBRT} ${LIBDL} ${LIBSOCKET} ${LIBEXECINFO} ${CRC32_LIBRARY})
|
||||||
DTRACE_INSTRUMENT(mysys)
|
DTRACE_INSTRUMENT(mysys)
|
||||||
|
|
||||||
|
@ -25,8 +25,8 @@ MYSQL_ADD_PLUGIN(auth_ed25519 server_ed25519.c ${REF10_SOURCES} MODULE_ONLY)
|
|||||||
|
|
||||||
# client plugin and unit test ed25519-t can use the library
|
# client plugin and unit test ed25519-t can use the library
|
||||||
#MYSQL_ADD_PLUGIN(client_ed25519 client_ed25519.c MODULE_ONLY
|
#MYSQL_ADD_PLUGIN(client_ed25519 client_ed25519.c MODULE_ONLY
|
||||||
# CLIENT LINK_LIBRARIES mysys_ssl ref10 COMPONENT ClientPlugins)
|
# CLIENT LINK_LIBRARIES ref10 mysys_ssl COMPONENT ClientPlugins)
|
||||||
|
|
||||||
IF(WITH_UNIT_TESTS)
|
IF(WITH_UNIT_TESTS)
|
||||||
MY_ADD_TESTS(ed25519 LINK_LIBRARIES mysys ref10)
|
MY_ADD_TESTS(ed25519 LINK_LIBRARIES ref10 mysys_ssl)
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
@ -1036,7 +1036,7 @@ typedef bool (stat_print_fn)(THD *thd, const char *type, size_t type_len,
|
|||||||
const char *file, size_t file_len,
|
const char *file, size_t file_len,
|
||||||
const char *status, size_t status_len);
|
const char *status, size_t status_len);
|
||||||
enum ha_stat_type { HA_ENGINE_STATUS, HA_ENGINE_LOGS, HA_ENGINE_MUTEX };
|
enum ha_stat_type { HA_ENGINE_STATUS, HA_ENGINE_LOGS, HA_ENGINE_MUTEX };
|
||||||
extern st_plugin_int *hton2plugin[MAX_HA];
|
extern MYSQL_PLUGIN_IMPORT st_plugin_int *hton2plugin[MAX_HA];
|
||||||
|
|
||||||
/* Transaction log maintains type definitions */
|
/* Transaction log maintains type definitions */
|
||||||
enum log_status
|
enum log_status
|
||||||
|
@ -3913,7 +3913,6 @@ int subselect_single_select_engine::exec()
|
|||||||
tab->save_read_record= tab->read_record.read_record_func;
|
tab->save_read_record= tab->read_record.read_record_func;
|
||||||
tab->read_record.read_record_func= rr_sequential;
|
tab->read_record.read_record_func= rr_sequential;
|
||||||
tab->read_first_record= read_first_record_seq;
|
tab->read_first_record= read_first_record_seq;
|
||||||
tab->read_record.record= tab->table->record[0];
|
|
||||||
tab->read_record.thd= join->thd;
|
tab->read_record.thd= join->thd;
|
||||||
tab->read_record.ref_length= tab->table->file->ref_length;
|
tab->read_record.ref_length= tab->table->file->ref_length;
|
||||||
tab->read_record.unlock_row= rr_unlock_row;
|
tab->read_record.unlock_row= rr_unlock_row;
|
||||||
@ -3931,7 +3930,6 @@ int subselect_single_select_engine::exec()
|
|||||||
for (JOIN_TAB **ptab= changed_tabs; ptab != last_changed_tab; ptab++)
|
for (JOIN_TAB **ptab= changed_tabs; ptab != last_changed_tab; ptab++)
|
||||||
{
|
{
|
||||||
JOIN_TAB *tab= *ptab;
|
JOIN_TAB *tab= *ptab;
|
||||||
tab->read_record.record= 0;
|
|
||||||
tab->read_record.ref_length= 0;
|
tab->read_record.ref_length= 0;
|
||||||
tab->read_first_record= tab->save_read_first_record;
|
tab->read_first_record= tab->save_read_first_record;
|
||||||
tab->read_record.read_record_func= tab->save_read_record;
|
tab->read_record.read_record_func= tab->save_read_record;
|
||||||
|
@ -44,11 +44,6 @@ public:
|
|||||||
first_check= true;
|
first_check= true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void cleanup()
|
|
||||||
{
|
|
||||||
group_fields.empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Check if the current row is in a different group than the previous row
|
Check if the current row is in a different group than the previous row
|
||||||
this function was called for.
|
this function was called for.
|
||||||
@ -86,6 +81,10 @@ public:
|
|||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
~Group_bound_tracker()
|
||||||
|
{
|
||||||
|
group_fields.delete_elements();
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
List<Cached_item> group_fields;
|
List<Cached_item> group_fields;
|
||||||
@ -213,7 +212,6 @@ public:
|
|||||||
{
|
{
|
||||||
if (peer_tracker)
|
if (peer_tracker)
|
||||||
{
|
{
|
||||||
peer_tracker->cleanup();
|
|
||||||
delete peer_tracker;
|
delete peer_tracker;
|
||||||
peer_tracker= NULL;
|
peer_tracker= NULL;
|
||||||
}
|
}
|
||||||
@ -284,7 +282,6 @@ class Item_sum_dense_rank: public Item_sum_int
|
|||||||
{
|
{
|
||||||
if (peer_tracker)
|
if (peer_tracker)
|
||||||
{
|
{
|
||||||
peer_tracker->cleanup();
|
|
||||||
delete peer_tracker;
|
delete peer_tracker;
|
||||||
peer_tracker= NULL;
|
peer_tracker= NULL;
|
||||||
}
|
}
|
||||||
@ -548,7 +545,6 @@ class Item_sum_percent_rank: public Item_sum_window_with_row_count
|
|||||||
{
|
{
|
||||||
if (peer_tracker)
|
if (peer_tracker)
|
||||||
{
|
{
|
||||||
peer_tracker->cleanup();
|
|
||||||
delete peer_tracker;
|
delete peer_tracker;
|
||||||
peer_tracker= NULL;
|
peer_tracker= NULL;
|
||||||
}
|
}
|
||||||
|
@ -8485,8 +8485,10 @@ mysqld_get_one_option(int optid, const struct my_option *opt, char *argument)
|
|||||||
opt_specialflag|= SPECIAL_NO_HOST_CACHE;
|
opt_specialflag|= SPECIAL_NO_HOST_CACHE;
|
||||||
break;
|
break;
|
||||||
case (int) OPT_SKIP_RESOLVE:
|
case (int) OPT_SKIP_RESOLVE:
|
||||||
opt_skip_name_resolve= 1;
|
if ((opt_skip_name_resolve= (argument != disabled_my_option)))
|
||||||
opt_specialflag|=SPECIAL_NO_RESOLVE;
|
opt_specialflag|= SPECIAL_NO_RESOLVE;
|
||||||
|
else
|
||||||
|
opt_specialflag&= ~SPECIAL_NO_RESOLVE;
|
||||||
break;
|
break;
|
||||||
case (int) OPT_WANT_CORE:
|
case (int) OPT_WANT_CORE:
|
||||||
test_flags |= TEST_CORE_ON_SIGNAL;
|
test_flags |= TEST_CORE_ON_SIGNAL;
|
||||||
@ -8545,6 +8547,8 @@ mysqld_get_one_option(int optid, const struct my_option *opt, char *argument)
|
|||||||
break;
|
break;
|
||||||
case OPT_PLUGIN_LOAD:
|
case OPT_PLUGIN_LOAD:
|
||||||
free_list(opt_plugin_load_list_ptr);
|
free_list(opt_plugin_load_list_ptr);
|
||||||
|
if (argument == disabled_my_option)
|
||||||
|
break; // Resets plugin list
|
||||||
/* fall through */
|
/* fall through */
|
||||||
case OPT_PLUGIN_LOAD_ADD:
|
case OPT_PLUGIN_LOAD_ADD:
|
||||||
opt_plugin_load_list_ptr->push_back(new i_string(argument));
|
opt_plugin_load_list_ptr->push_back(new i_string(argument));
|
||||||
|
@ -77,7 +77,6 @@ bool init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
|
|||||||
bzero((char*) info,sizeof(*info));
|
bzero((char*) info,sizeof(*info));
|
||||||
info->thd= thd;
|
info->thd= thd;
|
||||||
info->table= table;
|
info->table= table;
|
||||||
info->record= table->record[0];
|
|
||||||
info->print_error= print_error;
|
info->print_error= print_error;
|
||||||
info->unlock_row= rr_unlock_row;
|
info->unlock_row= rr_unlock_row;
|
||||||
|
|
||||||
@ -210,7 +209,6 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
empty_record(table);
|
empty_record(table);
|
||||||
info->record= table->record[0];
|
|
||||||
info->ref_length= (uint)table->file->ref_length;
|
info->ref_length= (uint)table->file->ref_length;
|
||||||
}
|
}
|
||||||
info->select=select;
|
info->select=select;
|
||||||
@ -393,7 +391,7 @@ static int rr_index_first(READ_RECORD *info)
|
|||||||
return tmp;
|
return tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
tmp= info->table->file->ha_index_first(info->record);
|
tmp= info->table->file->ha_index_first(info->record());
|
||||||
info->read_record_func= rr_index;
|
info->read_record_func= rr_index;
|
||||||
if (tmp)
|
if (tmp)
|
||||||
tmp= rr_handle_error(info, tmp);
|
tmp= rr_handle_error(info, tmp);
|
||||||
@ -416,7 +414,7 @@ static int rr_index_first(READ_RECORD *info)
|
|||||||
|
|
||||||
static int rr_index_last(READ_RECORD *info)
|
static int rr_index_last(READ_RECORD *info)
|
||||||
{
|
{
|
||||||
int tmp= info->table->file->ha_index_last(info->record);
|
int tmp= info->table->file->ha_index_last(info->record());
|
||||||
info->read_record_func= rr_index_desc;
|
info->read_record_func= rr_index_desc;
|
||||||
if (tmp)
|
if (tmp)
|
||||||
tmp= rr_handle_error(info, tmp);
|
tmp= rr_handle_error(info, tmp);
|
||||||
@ -442,7 +440,7 @@ static int rr_index_last(READ_RECORD *info)
|
|||||||
|
|
||||||
static int rr_index(READ_RECORD *info)
|
static int rr_index(READ_RECORD *info)
|
||||||
{
|
{
|
||||||
int tmp= info->table->file->ha_index_next(info->record);
|
int tmp= info->table->file->ha_index_next(info->record());
|
||||||
if (tmp)
|
if (tmp)
|
||||||
tmp= rr_handle_error(info, tmp);
|
tmp= rr_handle_error(info, tmp);
|
||||||
return tmp;
|
return tmp;
|
||||||
@ -467,7 +465,7 @@ static int rr_index(READ_RECORD *info)
|
|||||||
|
|
||||||
static int rr_index_desc(READ_RECORD *info)
|
static int rr_index_desc(READ_RECORD *info)
|
||||||
{
|
{
|
||||||
int tmp= info->table->file->ha_index_prev(info->record);
|
int tmp= info->table->file->ha_index_prev(info->record());
|
||||||
if (tmp)
|
if (tmp)
|
||||||
tmp= rr_handle_error(info, tmp);
|
tmp= rr_handle_error(info, tmp);
|
||||||
return tmp;
|
return tmp;
|
||||||
@ -477,7 +475,7 @@ static int rr_index_desc(READ_RECORD *info)
|
|||||||
int rr_sequential(READ_RECORD *info)
|
int rr_sequential(READ_RECORD *info)
|
||||||
{
|
{
|
||||||
int tmp;
|
int tmp;
|
||||||
while ((tmp= info->table->file->ha_rnd_next(info->record)))
|
while ((tmp= info->table->file->ha_rnd_next(info->record())))
|
||||||
{
|
{
|
||||||
tmp= rr_handle_error(info, tmp);
|
tmp= rr_handle_error(info, tmp);
|
||||||
break;
|
break;
|
||||||
@ -493,7 +491,7 @@ static int rr_from_tempfile(READ_RECORD *info)
|
|||||||
{
|
{
|
||||||
if (my_b_read(info->io_cache,info->ref_pos,info->ref_length))
|
if (my_b_read(info->io_cache,info->ref_pos,info->ref_length))
|
||||||
return -1; /* End of file */
|
return -1; /* End of file */
|
||||||
if (!(tmp= info->table->file->ha_rnd_pos(info->record,info->ref_pos)))
|
if (!(tmp= info->table->file->ha_rnd_pos(info->record(), info->ref_pos)))
|
||||||
break;
|
break;
|
||||||
/* The following is extremely unlikely to happen */
|
/* The following is extremely unlikely to happen */
|
||||||
if (tmp == HA_ERR_KEY_NOT_FOUND)
|
if (tmp == HA_ERR_KEY_NOT_FOUND)
|
||||||
@ -543,7 +541,7 @@ int rr_from_pointers(READ_RECORD *info)
|
|||||||
cache_pos= info->cache_pos;
|
cache_pos= info->cache_pos;
|
||||||
info->cache_pos+= info->ref_length;
|
info->cache_pos+= info->ref_length;
|
||||||
|
|
||||||
if (!(tmp= info->table->file->ha_rnd_pos(info->record,cache_pos)))
|
if (!(tmp= info->table->file->ha_rnd_pos(info->record(), cache_pos)))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* The following is extremely unlikely to happen */
|
/* The following is extremely unlikely to happen */
|
||||||
@ -638,7 +636,7 @@ static int rr_from_cache(READ_RECORD *info)
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
error=0;
|
error=0;
|
||||||
memcpy(info->record,info->cache_pos,
|
memcpy(info->record(), info->cache_pos,
|
||||||
(size_t) info->table->s->reclength);
|
(size_t) info->table->s->reclength);
|
||||||
}
|
}
|
||||||
info->cache_pos+=info->reclength;
|
info->cache_pos+=info->reclength;
|
||||||
|
@ -19,9 +19,10 @@
|
|||||||
#pragma interface /* gcc class implementation */
|
#pragma interface /* gcc class implementation */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#include "table.h"
|
||||||
|
|
||||||
struct st_join_table;
|
struct st_join_table;
|
||||||
class handler;
|
class handler;
|
||||||
struct TABLE;
|
|
||||||
class THD;
|
class THD;
|
||||||
class SQL_SELECT;
|
class SQL_SELECT;
|
||||||
class Copy_field;
|
class Copy_field;
|
||||||
@ -58,7 +59,6 @@ struct READ_RECORD
|
|||||||
SQL_SELECT *select;
|
SQL_SELECT *select;
|
||||||
uint ref_length, reclength, rec_cache_size, error_offset;
|
uint ref_length, reclength, rec_cache_size, error_offset;
|
||||||
uchar *ref_pos; /* pointer to form->refpos */
|
uchar *ref_pos; /* pointer to form->refpos */
|
||||||
uchar *record;
|
|
||||||
uchar *rec_buf; /* to read field values after filesort */
|
uchar *rec_buf; /* to read field values after filesort */
|
||||||
uchar *cache,*cache_pos,*cache_end,*read_positions;
|
uchar *cache,*cache_pos,*cache_end,*read_positions;
|
||||||
struct st_sort_addon_field *addon_field; /* Pointer to the fields info */
|
struct st_sort_addon_field *addon_field; /* Pointer to the fields info */
|
||||||
@ -67,6 +67,7 @@ struct READ_RECORD
|
|||||||
void (*unpack)(struct st_sort_addon_field *, uchar *, uchar *);
|
void (*unpack)(struct st_sort_addon_field *, uchar *, uchar *);
|
||||||
|
|
||||||
int read_record() { return read_record_func(this); }
|
int read_record() { return read_record_func(this); }
|
||||||
|
uchar *record() const { return table->record[0]; }
|
||||||
|
|
||||||
/*
|
/*
|
||||||
SJ-Materialization runtime may need to read fields from the materialized
|
SJ-Materialization runtime may need to read fields from the materialized
|
||||||
|
@ -941,6 +941,11 @@ void Explain_select::print_explain_json(Explain_query *query,
|
|||||||
writer->add_member("outer_ref_condition");
|
writer->add_member("outer_ref_condition");
|
||||||
write_item(writer, outer_ref_cond);
|
write_item(writer, outer_ref_cond);
|
||||||
}
|
}
|
||||||
|
if (pseudo_bits_cond)
|
||||||
|
{
|
||||||
|
writer->add_member("pseudo_bits_condition");
|
||||||
|
write_item(writer, pseudo_bits_cond);
|
||||||
|
}
|
||||||
|
|
||||||
/* we do not print HAVING which always evaluates to TRUE */
|
/* we do not print HAVING which always evaluates to TRUE */
|
||||||
if (having || (having_value == Item::COND_FALSE))
|
if (having || (having_value == Item::COND_FALSE))
|
||||||
|
@ -236,6 +236,7 @@ public:
|
|||||||
/* Expensive constant condition */
|
/* Expensive constant condition */
|
||||||
Item *exec_const_cond;
|
Item *exec_const_cond;
|
||||||
Item *outer_ref_cond;
|
Item *outer_ref_cond;
|
||||||
|
Item *pseudo_bits_cond;
|
||||||
|
|
||||||
/* HAVING condition */
|
/* HAVING condition */
|
||||||
Item *having;
|
Item *having;
|
||||||
|
@ -5900,18 +5900,16 @@ add_key_field(JOIN *join,
|
|||||||
(*key_fields)->level= and_level;
|
(*key_fields)->level= and_level;
|
||||||
(*key_fields)->optimize= optimize;
|
(*key_fields)->optimize= optimize;
|
||||||
/*
|
/*
|
||||||
If the condition has form "tbl.keypart = othertbl.field" and
|
If the condition we are analyzing is NULL-rejecting and at least
|
||||||
othertbl.field can be NULL, there will be no matches if othertbl.field
|
one side of the equalities is NULLable, mark the KEY_FIELD object as
|
||||||
has NULL value.
|
null-rejecting. This property is used by:
|
||||||
We use null_rejecting in add_not_null_conds() to add
|
- add_not_null_conds() to add "column IS NOT NULL" conditions
|
||||||
'othertbl.field IS NOT NULL' to tab->select_cond.
|
- best_access_path() to produce better estimates for NULL-able unique keys.
|
||||||
*/
|
*/
|
||||||
{
|
{
|
||||||
Item *real= (*value)->real_item();
|
if ((cond->functype() == Item_func::EQ_FUNC ||
|
||||||
if (((cond->functype() == Item_func::EQ_FUNC) ||
|
cond->functype() == Item_func::MULT_EQUAL_FUNC) &&
|
||||||
(cond->functype() == Item_func::MULT_EQUAL_FUNC)) &&
|
((*value)->maybe_null || field->real_maybe_null()))
|
||||||
(real->type() == Item::FIELD_ITEM) &&
|
|
||||||
((Item_field*)real)->field->maybe_null())
|
|
||||||
(*key_fields)->null_rejecting= true;
|
(*key_fields)->null_rejecting= true;
|
||||||
else
|
else
|
||||||
(*key_fields)->null_rejecting= false;
|
(*key_fields)->null_rejecting= false;
|
||||||
@ -7235,6 +7233,7 @@ best_access_path(JOIN *join,
|
|||||||
ulong key_flags;
|
ulong key_flags;
|
||||||
uint key_parts;
|
uint key_parts;
|
||||||
key_part_map found_part= 0;
|
key_part_map found_part= 0;
|
||||||
|
key_part_map notnull_part=0; // key parts which won't have NULL in lookup tuple.
|
||||||
table_map found_ref= 0;
|
table_map found_ref= 0;
|
||||||
uint key= keyuse->key;
|
uint key= keyuse->key;
|
||||||
filter= 0;
|
filter= 0;
|
||||||
@ -7294,6 +7293,9 @@ best_access_path(JOIN *join,
|
|||||||
if (!(keyuse->used_tables & ~join->const_table_map))
|
if (!(keyuse->used_tables & ~join->const_table_map))
|
||||||
const_part|= keyuse->keypart_map;
|
const_part|= keyuse->keypart_map;
|
||||||
|
|
||||||
|
if (!keyuse->val->maybe_null || keyuse->null_rejecting)
|
||||||
|
notnull_part|=keyuse->keypart_map;
|
||||||
|
|
||||||
double tmp2= prev_record_reads(join->positions, idx,
|
double tmp2= prev_record_reads(join->positions, idx,
|
||||||
(found_ref | keyuse->used_tables));
|
(found_ref | keyuse->used_tables));
|
||||||
if (tmp2 < best_prev_record_reads)
|
if (tmp2 < best_prev_record_reads)
|
||||||
@ -7347,12 +7349,19 @@ best_access_path(JOIN *join,
|
|||||||
loose_scan_opt.check_ref_access_part1(s, key, start_key, found_part);
|
loose_scan_opt.check_ref_access_part1(s, key, start_key, found_part);
|
||||||
|
|
||||||
/* Check if we found full key */
|
/* Check if we found full key */
|
||||||
if (found_part == PREV_BITS(uint, key_parts) &&
|
const key_part_map all_key_parts= PREV_BITS(uint, key_parts);
|
||||||
!ref_or_null_part)
|
if (found_part == all_key_parts && !ref_or_null_part)
|
||||||
{ /* use eq key */
|
{ /* use eq key */
|
||||||
max_key_part= (uint) ~0;
|
max_key_part= (uint) ~0;
|
||||||
if ((key_flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME ||
|
/*
|
||||||
MY_TEST(key_flags & HA_EXT_NOSAME))
|
If the index is a unique index (1), and
|
||||||
|
- all its columns are not null (2), or
|
||||||
|
- equalities we are using reject NULLs (3)
|
||||||
|
then the estimate is rows=1.
|
||||||
|
*/
|
||||||
|
if ((key_flags & (HA_NOSAME | HA_EXT_NOSAME)) && // (1)
|
||||||
|
(!(key_flags & HA_NULL_PART_KEY) || // (2)
|
||||||
|
all_key_parts == notnull_part)) // (3)
|
||||||
{
|
{
|
||||||
trace_access_idx.add("access_type", "eq_ref")
|
trace_access_idx.add("access_type", "eq_ref")
|
||||||
.add("index", keyinfo->name);
|
.add("index", keyinfo->name);
|
||||||
@ -10565,8 +10574,16 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j,
|
|||||||
uint maybe_null= MY_TEST(keyinfo->key_part[i].null_bit);
|
uint maybe_null= MY_TEST(keyinfo->key_part[i].null_bit);
|
||||||
j->ref.items[i]=keyuse->val; // Save for cond removal
|
j->ref.items[i]=keyuse->val; // Save for cond removal
|
||||||
j->ref.cond_guards[i]= keyuse->cond_guard;
|
j->ref.cond_guards[i]= keyuse->cond_guard;
|
||||||
if (keyuse->null_rejecting)
|
|
||||||
|
/*
|
||||||
|
Set ref.null_rejecting to true only if we are going to inject a
|
||||||
|
"keyuse->val IS NOT NULL" predicate.
|
||||||
|
*/
|
||||||
|
Item *real= (keyuse->val)->real_item();
|
||||||
|
if (keyuse->null_rejecting && (real->type() == Item::FIELD_ITEM) &&
|
||||||
|
((Item_field*)real)->field->maybe_null())
|
||||||
j->ref.null_rejecting|= (key_part_map)1 << i;
|
j->ref.null_rejecting|= (key_part_map)1 << i;
|
||||||
|
|
||||||
keyuse_uses_no_tables= keyuse_uses_no_tables && !keyuse->used_tables;
|
keyuse_uses_no_tables= keyuse_uses_no_tables && !keyuse->used_tables;
|
||||||
/*
|
/*
|
||||||
We don't want to compute heavy expressions in EXPLAIN, an example would
|
We don't want to compute heavy expressions in EXPLAIN, an example would
|
||||||
@ -21060,7 +21077,6 @@ join_read_first(JOIN_TAB *tab)
|
|||||||
tab->table->status=0;
|
tab->table->status=0;
|
||||||
tab->read_record.read_record_func= join_read_next;
|
tab->read_record.read_record_func= join_read_next;
|
||||||
tab->read_record.table=table;
|
tab->read_record.table=table;
|
||||||
tab->read_record.record=table->record[0];
|
|
||||||
if (!table->file->inited)
|
if (!table->file->inited)
|
||||||
error= table->file->ha_index_init(tab->index, tab->sorted);
|
error= table->file->ha_index_init(tab->index, tab->sorted);
|
||||||
if (likely(!error))
|
if (likely(!error))
|
||||||
@ -21080,7 +21096,7 @@ static int
|
|||||||
join_read_next(READ_RECORD *info)
|
join_read_next(READ_RECORD *info)
|
||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
if (unlikely((error= info->table->file->ha_index_next(info->record))))
|
if (unlikely((error= info->table->file->ha_index_next(info->record()))))
|
||||||
return report_error(info->table, error);
|
return report_error(info->table, error);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -21100,7 +21116,6 @@ join_read_last(JOIN_TAB *tab)
|
|||||||
tab->table->status=0;
|
tab->table->status=0;
|
||||||
tab->read_record.read_record_func= join_read_prev;
|
tab->read_record.read_record_func= join_read_prev;
|
||||||
tab->read_record.table=table;
|
tab->read_record.table=table;
|
||||||
tab->read_record.record=table->record[0];
|
|
||||||
if (!table->file->inited)
|
if (!table->file->inited)
|
||||||
error= table->file->ha_index_init(tab->index, 1);
|
error= table->file->ha_index_init(tab->index, 1);
|
||||||
if (likely(!error))
|
if (likely(!error))
|
||||||
@ -21117,7 +21132,7 @@ static int
|
|||||||
join_read_prev(READ_RECORD *info)
|
join_read_prev(READ_RECORD *info)
|
||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
if (unlikely((error= info->table->file->ha_index_prev(info->record))))
|
if (unlikely((error= info->table->file->ha_index_prev(info->record()))))
|
||||||
return report_error(info->table, error);
|
return report_error(info->table, error);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -21147,7 +21162,7 @@ static int
|
|||||||
join_ft_read_next(READ_RECORD *info)
|
join_ft_read_next(READ_RECORD *info)
|
||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
if (unlikely((error= info->table->file->ha_ft_read(info->table->record[0]))))
|
if (unlikely((error= info->table->file->ha_ft_read(info->record()))))
|
||||||
return report_error(info->table, error);
|
return report_error(info->table, error);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -26401,6 +26416,7 @@ int JOIN::save_explain_data_intern(Explain_query *output,
|
|||||||
|
|
||||||
xpl_sel->exec_const_cond= exec_const_cond;
|
xpl_sel->exec_const_cond= exec_const_cond;
|
||||||
xpl_sel->outer_ref_cond= outer_ref_cond;
|
xpl_sel->outer_ref_cond= outer_ref_cond;
|
||||||
|
xpl_sel->pseudo_bits_cond= pseudo_bits_cond;
|
||||||
if (tmp_having)
|
if (tmp_having)
|
||||||
xpl_sel->having= tmp_having;
|
xpl_sel->having= tmp_having;
|
||||||
else
|
else
|
||||||
|
@ -898,7 +898,7 @@ public:
|
|||||||
{
|
{
|
||||||
Rowid_seq_cursor::init(info);
|
Rowid_seq_cursor::init(info);
|
||||||
table= info->table;
|
table= info->table;
|
||||||
record= info->record;
|
record= info->record();
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual int fetch()
|
virtual int fetch()
|
||||||
|
@ -6111,10 +6111,12 @@ database_corrupted:
|
|||||||
|
|
||||||
if (err == DB_PAGE_CORRUPTED
|
if (err == DB_PAGE_CORRUPTED
|
||||||
|| err == DB_DECRYPTION_FAILED) {
|
|| err == DB_DECRYPTION_FAILED) {
|
||||||
|
const page_id_t corrupt_page_id = bpage->id;
|
||||||
|
|
||||||
buf_corrupt_page_release(bpage, space);
|
buf_corrupt_page_release(bpage, space);
|
||||||
|
|
||||||
if (recv_recovery_is_on()) {
|
if (recv_recovery_is_on()) {
|
||||||
recv_recover_corrupt_page(bpage);
|
recv_recover_corrupt_page(corrupt_page_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
space->release_for_io();
|
space->release_for_io();
|
||||||
|
@ -214,77 +214,6 @@ struct buf_pools_list_size_t {
|
|||||||
};
|
};
|
||||||
#endif /* !UNIV_INNOCHECKSUM */
|
#endif /* !UNIV_INNOCHECKSUM */
|
||||||
|
|
||||||
/** Page identifier. */
|
|
||||||
class page_id_t {
|
|
||||||
public:
|
|
||||||
|
|
||||||
/** Constructor from (space, page_no).
|
|
||||||
@param[in] space tablespace id
|
|
||||||
@param[in] page_no page number */
|
|
||||||
page_id_t(ulint space, ulint page_no)
|
|
||||||
: m_space(uint32_t(space)), m_page_no(uint32(page_no))
|
|
||||||
{
|
|
||||||
ut_ad(space <= 0xFFFFFFFFU);
|
|
||||||
ut_ad(page_no <= 0xFFFFFFFFU);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool operator==(const page_id_t& rhs) const
|
|
||||||
{
|
|
||||||
return m_space == rhs.m_space && m_page_no == rhs.m_page_no;
|
|
||||||
}
|
|
||||||
bool operator!=(const page_id_t& rhs) const { return !(*this == rhs); }
|
|
||||||
|
|
||||||
bool operator<(const page_id_t& rhs) const
|
|
||||||
{
|
|
||||||
if (m_space == rhs.m_space) {
|
|
||||||
return m_page_no < rhs.m_page_no;
|
|
||||||
}
|
|
||||||
|
|
||||||
return m_space < rhs.m_space;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Retrieve the tablespace id.
|
|
||||||
@return tablespace id */
|
|
||||||
uint32_t space() const { return m_space; }
|
|
||||||
|
|
||||||
/** Retrieve the page number.
|
|
||||||
@return page number */
|
|
||||||
uint32_t page_no() const { return m_page_no; }
|
|
||||||
|
|
||||||
/** Retrieve the fold value.
|
|
||||||
@return fold value */
|
|
||||||
ulint fold() const { return (m_space << 20) + m_space + m_page_no; }
|
|
||||||
|
|
||||||
/** Reset the page number only.
|
|
||||||
@param[in] page_no page number */
|
|
||||||
inline void set_page_no(ulint page_no)
|
|
||||||
{
|
|
||||||
m_page_no = uint32_t(page_no);
|
|
||||||
|
|
||||||
ut_ad(page_no <= 0xFFFFFFFFU);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
|
|
||||||
/** Tablespace id. */
|
|
||||||
uint32_t m_space;
|
|
||||||
|
|
||||||
/** Page number. */
|
|
||||||
uint32_t m_page_no;
|
|
||||||
|
|
||||||
/** Declare the overloaded global operator<< as a friend of this
|
|
||||||
class. Refer to the global declaration for further details. Print
|
|
||||||
the given page_id_t object.
|
|
||||||
@param[in,out] out the output stream
|
|
||||||
@param[in] page_id the page_id_t object to be printed
|
|
||||||
@return the output stream */
|
|
||||||
friend
|
|
||||||
std::ostream&
|
|
||||||
operator<<(
|
|
||||||
std::ostream& out,
|
|
||||||
const page_id_t page_id);
|
|
||||||
};
|
|
||||||
|
|
||||||
/** Print the given page_id_t object.
|
/** Print the given page_id_t object.
|
||||||
@param[in,out] out the output stream
|
@param[in,out] out the output stream
|
||||||
@param[in] page_id the page_id_t object to be printed
|
@param[in] page_id the page_id_t object to be printed
|
||||||
|
@ -134,6 +134,77 @@ this must be equal to srv_page_size */
|
|||||||
#define BUF_BUDDY_HIGH (BUF_BUDDY_LOW << BUF_BUDDY_SIZES)
|
#define BUF_BUDDY_HIGH (BUF_BUDDY_LOW << BUF_BUDDY_SIZES)
|
||||||
/* @} */
|
/* @} */
|
||||||
|
|
||||||
|
/** Page identifier. */
|
||||||
|
class page_id_t {
|
||||||
|
public:
|
||||||
|
|
||||||
|
/** Constructor from (space, page_no).
|
||||||
|
@param[in] space tablespace id
|
||||||
|
@param[in] page_no page number */
|
||||||
|
page_id_t(ulint space, ulint page_no)
|
||||||
|
: m_space(uint32_t(space)), m_page_no(uint32(page_no))
|
||||||
|
{
|
||||||
|
ut_ad(space <= 0xFFFFFFFFU);
|
||||||
|
ut_ad(page_no <= 0xFFFFFFFFU);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator==(const page_id_t& rhs) const
|
||||||
|
{
|
||||||
|
return m_space == rhs.m_space && m_page_no == rhs.m_page_no;
|
||||||
|
}
|
||||||
|
bool operator!=(const page_id_t& rhs) const { return !(*this == rhs); }
|
||||||
|
|
||||||
|
bool operator<(const page_id_t& rhs) const
|
||||||
|
{
|
||||||
|
if (m_space == rhs.m_space) {
|
||||||
|
return m_page_no < rhs.m_page_no;
|
||||||
|
}
|
||||||
|
|
||||||
|
return m_space < rhs.m_space;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Retrieve the tablespace id.
|
||||||
|
@return tablespace id */
|
||||||
|
uint32_t space() const { return m_space; }
|
||||||
|
|
||||||
|
/** Retrieve the page number.
|
||||||
|
@return page number */
|
||||||
|
uint32_t page_no() const { return m_page_no; }
|
||||||
|
|
||||||
|
/** Retrieve the fold value.
|
||||||
|
@return fold value */
|
||||||
|
ulint fold() const { return (m_space << 20) + m_space + m_page_no; }
|
||||||
|
|
||||||
|
/** Reset the page number only.
|
||||||
|
@param[in] page_no page number */
|
||||||
|
void set_page_no(ulint page_no)
|
||||||
|
{
|
||||||
|
m_page_no = uint32_t(page_no);
|
||||||
|
|
||||||
|
ut_ad(page_no <= 0xFFFFFFFFU);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
|
/** Tablespace id. */
|
||||||
|
uint32_t m_space;
|
||||||
|
|
||||||
|
/** Page number. */
|
||||||
|
uint32_t m_page_no;
|
||||||
|
|
||||||
|
/** Declare the overloaded global operator<< as a friend of this
|
||||||
|
class. Refer to the global declaration for further details. Print
|
||||||
|
the given page_id_t object.
|
||||||
|
@param[in,out] out the output stream
|
||||||
|
@param[in] page_id the page_id_t object to be printed
|
||||||
|
@return the output stream */
|
||||||
|
friend
|
||||||
|
std::ostream&
|
||||||
|
operator<<(
|
||||||
|
std::ostream& out,
|
||||||
|
const page_id_t page_id);
|
||||||
|
};
|
||||||
|
|
||||||
#ifndef UNIV_INNOCHECKSUM
|
#ifndef UNIV_INNOCHECKSUM
|
||||||
|
|
||||||
#include "ut0mutex.h"
|
#include "ut0mutex.h"
|
||||||
|
@ -50,8 +50,8 @@ recv_find_max_checkpoint(ulint* max_field)
|
|||||||
|
|
||||||
/** Reduces recv_sys.n_addrs for the corrupted page.
|
/** Reduces recv_sys.n_addrs for the corrupted page.
|
||||||
This function should called when srv_force_recovery > 0.
|
This function should called when srv_force_recovery > 0.
|
||||||
@param[in] bpage buffer pool page */
|
@param[in] page_id page id of the corrupted page */
|
||||||
void recv_recover_corrupt_page(buf_page_t* bpage);
|
void recv_recover_corrupt_page(page_id_t page_id);
|
||||||
|
|
||||||
/** Apply any buffered redo log to a page that was just read from a data file.
|
/** Apply any buffered redo log to a page that was just read from a data file.
|
||||||
@param[in,out] bpage buffer pool page */
|
@param[in,out] bpage buffer pool page */
|
||||||
|
@ -777,9 +777,7 @@ os_file_rename
|
|||||||
os_aio
|
os_aio
|
||||||
os_file_read
|
os_file_read
|
||||||
os_file_read_no_error_handling
|
os_file_read_no_error_handling
|
||||||
os_file_read_no_error_handling_int_fd
|
|
||||||
os_file_write
|
os_file_write
|
||||||
os_file_write_int_fd
|
|
||||||
|
|
||||||
The wrapper functions have the prefix of "innodb_". */
|
The wrapper functions have the prefix of "innodb_". */
|
||||||
|
|
||||||
@ -1155,13 +1153,9 @@ to original un-instrumented file I/O APIs */
|
|||||||
|
|
||||||
# define os_file_read_no_error_handling(type, file, buf, offset, n, o) \
|
# define os_file_read_no_error_handling(type, file, buf, offset, n, o) \
|
||||||
os_file_read_no_error_handling_func(type, file, buf, offset, n, o)
|
os_file_read_no_error_handling_func(type, file, buf, offset, n, o)
|
||||||
# define os_file_read_no_error_handling_int_fd(type, file, buf, offset, n) \
|
|
||||||
os_file_read_no_error_handling_func(type, OS_FILE_FROM_FD(file), buf, offset, n, NULL)
|
|
||||||
|
|
||||||
# define os_file_write(type, name, file, buf, offset, n) \
|
# define os_file_write(type, name, file, buf, offset, n) \
|
||||||
os_file_write_func(type, name, file, buf, offset, n)
|
os_file_write_func(type, name, file, buf, offset, n)
|
||||||
# define os_file_write_int_fd(type, name, file, buf, offset, n) \
|
|
||||||
os_file_write_func(type, name, OS_FILE_FROM_FD(file), buf, offset, n)
|
|
||||||
|
|
||||||
# define os_file_flush(file) os_file_flush_func(file)
|
# define os_file_flush(file) os_file_flush_func(file)
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*****************************************************************************
|
/*****************************************************************************
|
||||||
|
|
||||||
Copyright (c) 2010, 2017, Oracle and/or its affiliates. All Rights Reserved.
|
Copyright (c) 2010, 2017, Oracle and/or its affiliates. All Rights Reserved.
|
||||||
Copyright (c) 2013, 2017, MariaDB Corporation.
|
Copyright (c) 2013, 2019, MariaDB Corporation.
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify it under
|
This program is free software; you can redistribute it and/or modify it under
|
||||||
the terms of the GNU General Public License as published by the Free Software
|
the terms of the GNU General Public License as published by the Free Software
|
||||||
|
@ -370,7 +370,9 @@ row_merge_buf_sort(
|
|||||||
|
|
||||||
/********************************************************************//**
|
/********************************************************************//**
|
||||||
Write a merge block to the file system.
|
Write a merge block to the file system.
|
||||||
@return whether the request was completed successfully */
|
@return whether the request was completed successfully
|
||||||
|
@retval false on error
|
||||||
|
@retval true on success */
|
||||||
UNIV_INTERN
|
UNIV_INTERN
|
||||||
bool
|
bool
|
||||||
row_merge_write(
|
row_merge_write(
|
||||||
|
@ -2102,8 +2102,8 @@ static void recv_recover_page(buf_block_t* block, mtr_t& mtr,
|
|||||||
|
|
||||||
/** Reduces recv_sys.n_addrs for the corrupted page.
|
/** Reduces recv_sys.n_addrs for the corrupted page.
|
||||||
This function should called when srv_force_recovery > 0.
|
This function should called when srv_force_recovery > 0.
|
||||||
@param[in] bpage buffer pool page */
|
@param[in] page_id page id of the corrupted page */
|
||||||
void recv_recover_corrupt_page(buf_page_t* bpage)
|
void recv_recover_corrupt_page(page_id_t page_id)
|
||||||
{
|
{
|
||||||
ut_ad(srv_force_recovery);
|
ut_ad(srv_force_recovery);
|
||||||
mutex_enter(&recv_sys.mutex);
|
mutex_enter(&recv_sys.mutex);
|
||||||
@ -2114,7 +2114,7 @@ void recv_recover_corrupt_page(buf_page_t* bpage)
|
|||||||
}
|
}
|
||||||
|
|
||||||
recv_addr_t* recv_addr = recv_get_fil_addr_struct(
|
recv_addr_t* recv_addr = recv_get_fil_addr_struct(
|
||||||
bpage->id.space(), bpage->id.page_no());
|
page_id.space(), page_id.page_no());
|
||||||
|
|
||||||
ut_ad(recv_addr->state != RECV_WILL_NOT_READ);
|
ut_ad(recv_addr->state != RECV_WILL_NOT_READ);
|
||||||
|
|
||||||
|
@ -4869,7 +4869,8 @@ Requests a synchronous write operation.
|
|||||||
@param[out] buf buffer from which to write
|
@param[out] buf buffer from which to write
|
||||||
@param[in] offset file offset from the start where to read
|
@param[in] offset file offset from the start where to read
|
||||||
@param[in] n number of bytes to read, starting from offset
|
@param[in] n number of bytes to read, starting from offset
|
||||||
@return DB_SUCCESS if request was successful, false if fail */
|
@return error code
|
||||||
|
@retval DB_SUCCESS if the operation succeeded */
|
||||||
dberr_t
|
dberr_t
|
||||||
os_file_write_func(
|
os_file_write_func(
|
||||||
const IORequest& type,
|
const IORequest& type,
|
||||||
@ -5354,7 +5355,8 @@ Requests a synchronous positioned read operation.
|
|||||||
@param[out] buf buffer where to read
|
@param[out] buf buffer where to read
|
||||||
@param[in] offset file offset from the start where to read
|
@param[in] offset file offset from the start where to read
|
||||||
@param[in] n number of bytes to read, starting from offset
|
@param[in] n number of bytes to read, starting from offset
|
||||||
@return DB_SUCCESS or error code */
|
@return error code
|
||||||
|
@retval DB_SUCCESS if the operation succeeded */
|
||||||
dberr_t
|
dberr_t
|
||||||
os_file_read_func(
|
os_file_read_func(
|
||||||
const IORequest& type,
|
const IORequest& type,
|
||||||
|
@ -3424,8 +3424,12 @@ page_corrupted:
|
|||||||
if (!encrypted) {
|
if (!encrypted) {
|
||||||
} else if (!key_version) {
|
} else if (!key_version) {
|
||||||
not_encrypted:
|
not_encrypted:
|
||||||
if (!page_compressed
|
if (block->page.id.page_no() == 0
|
||||||
&& !block->page.zip.data) {
|
&& block->page.zip.data) {
|
||||||
|
block->page.zip.data = src;
|
||||||
|
frame_changed = true;
|
||||||
|
} else if (!page_compressed
|
||||||
|
&& !block->page.zip.data) {
|
||||||
block->frame = src;
|
block->frame = src;
|
||||||
frame_changed = true;
|
frame_changed = true;
|
||||||
} else {
|
} else {
|
||||||
@ -3529,7 +3533,11 @@ not_encrypted:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (frame_changed) {
|
if (frame_changed) {
|
||||||
block->frame = dst;
|
if (block->page.zip.data) {
|
||||||
|
block->page.zip.data = dst;
|
||||||
|
} else {
|
||||||
|
block->frame = dst;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
src = io_buffer + (i * size);
|
src = io_buffer + (i * size);
|
||||||
|
@ -1123,7 +1123,9 @@ row_merge_read(
|
|||||||
|
|
||||||
/********************************************************************//**
|
/********************************************************************//**
|
||||||
Write a merge block to the file system.
|
Write a merge block to the file system.
|
||||||
@return whether the request was completed successfully */
|
@return whether the request was completed successfully
|
||||||
|
@retval false on error
|
||||||
|
@retval true on success */
|
||||||
UNIV_INTERN
|
UNIV_INTERN
|
||||||
bool
|
bool
|
||||||
row_merge_write(
|
row_merge_write(
|
||||||
|
@ -2778,9 +2778,12 @@ int ha_maria::external_lock(THD *thd, int lock_type)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} /* if transactional table */
|
} /* if transactional table */
|
||||||
DBUG_RETURN(maria_lock_database(file, !table->s->tmp_table ?
|
int result = maria_lock_database(file, !table->s->tmp_table ?
|
||||||
lock_type : ((lock_type == F_UNLCK) ?
|
lock_type : ((lock_type == F_UNLCK) ?
|
||||||
F_UNLCK : F_EXTRA_LCK)));
|
F_UNLCK : F_EXTRA_LCK));
|
||||||
|
if (!file->s->base.born_transactional)
|
||||||
|
file->state= &file->s->state.state; // Restore state if clone
|
||||||
|
DBUG_RETURN(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
int ha_maria::start_stmt(THD *thd, thr_lock_type lock_type)
|
int ha_maria::start_stmt(THD *thd, thr_lock_type lock_type)
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
#!/bin/awk
|
|
||||||
|
|
||||||
/Query_time:/ {
|
/Query_time:/ {
|
||||||
results["Rows_examined:"] = "uninit";
|
results["Rows_examined:"] = "uninit";
|
||||||
results["RocksDB_key_skipped:"] = "uninit";
|
results["RocksDB_key_skipped:"] = "uninit";
|
||||||
|
@ -1,2 +1,4 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
src_data_dir="${MYSQLTEST_VARDIR}/mysqld.1/data/"
|
src_data_dir="${MYSQLTEST_VARDIR}/mysqld.1/data/"
|
||||||
python -c "import socket as s; sock = s.socket(s.AF_UNIX); sock.bind('${src_data_dir}/slocket')"
|
python -c "import socket as s; sock = s.socket(s.AF_UNIX); sock.bind('${src_data_dir}/slocket')"
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
COPY_LOG=$1
|
COPY_LOG=$1
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Insert 100 batches of 100 records each to a table with following schema:
|
# Insert 100 batches of 100 records each to a table with following schema:
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Initially loads a chunk of data.
|
# Initially loads a chunk of data.
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Insert 10 batches of 10 records each to a table with following schema:
|
# Insert 10 batches of 10 records each to a table with following schema:
|
||||||
|
@ -1,2 +1,4 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
src_data_dir="${MYSQLTEST_VARDIR}/mysqld.1/data/"
|
src_data_dir="${MYSQLTEST_VARDIR}/mysqld.1/data/"
|
||||||
rm "${src_data_dir}/slocket"
|
rm "${src_data_dir}/slocket"
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
binlog_line=($(grep -o "Last binlog file position [0-9]*, file name .*\.[0-9]*" ${MYSQLTEST_VARDIR}/log/mysqld.2.err | tail -1))
|
binlog_line=($(grep -o "Last binlog file position [0-9]*, file name .*\.[0-9]*" ${MYSQLTEST_VARDIR}/log/mysqld.2.err | tail -1))
|
||||||
|
@ -2348,6 +2348,12 @@ static void test_ps_query_cache()
|
|||||||
"(2, 'hh', 'hh'), (1, 'ii', 'ii'), (2, 'ii', 'ii')");
|
"(2, 'hh', 'hh'), (1, 'ii', 'ii'), (2, 'ii', 'ii')");
|
||||||
myquery(rc);
|
myquery(rc);
|
||||||
|
|
||||||
|
rc= mysql_query(mysql,
|
||||||
|
"set @save_query_cache_type="
|
||||||
|
"@@global.query_cache_type,"
|
||||||
|
"@save_query_cache_size="
|
||||||
|
"@@global.query_cache_size");
|
||||||
|
myquery(rc);
|
||||||
rc= mysql_query(lmysql, "set global query_cache_type=ON");
|
rc= mysql_query(lmysql, "set global query_cache_type=ON");
|
||||||
myquery(rc);
|
myquery(rc);
|
||||||
rc= mysql_query(lmysql, "set local query_cache_type=ON");
|
rc= mysql_query(lmysql, "set local query_cache_type=ON");
|
||||||
@ -2504,9 +2510,9 @@ static void test_ps_query_cache()
|
|||||||
if (lmysql != mysql)
|
if (lmysql != mysql)
|
||||||
mysql_close(lmysql);
|
mysql_close(lmysql);
|
||||||
|
|
||||||
rc= mysql_query(mysql, "set global query_cache_size=default");
|
rc= mysql_query(mysql, "set global query_cache_size=@save_query_cache_size");
|
||||||
myquery(rc);
|
myquery(rc);
|
||||||
rc= mysql_query(mysql, "set global query_cache_type=default");
|
rc= mysql_query(mysql, "set global query_cache_type=@save_query_cache_type");
|
||||||
myquery(rc);
|
myquery(rc);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -13522,6 +13528,12 @@ static void test_open_cursor_prepared_statement_query_cache()
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rc= mysql_query(mysql,
|
||||||
|
"set @save_query_cache_type="
|
||||||
|
"@@global.query_cache_type,"
|
||||||
|
"@save_query_cache_size="
|
||||||
|
"@@global.query_cache_size");
|
||||||
|
myquery(rc);
|
||||||
rc= mysql_query(mysql, "set global query_cache_type=ON");
|
rc= mysql_query(mysql, "set global query_cache_type=ON");
|
||||||
myquery(rc);
|
myquery(rc);
|
||||||
rc= mysql_query(mysql, "set local query_cache_type=ON");
|
rc= mysql_query(mysql, "set local query_cache_type=ON");
|
||||||
@ -13548,9 +13560,9 @@ static void test_open_cursor_prepared_statement_query_cache()
|
|||||||
check_execute(stmt, rc);
|
check_execute(stmt, rc);
|
||||||
mysql_stmt_close(stmt);
|
mysql_stmt_close(stmt);
|
||||||
|
|
||||||
rc= mysql_query(mysql, "set global query_cache_type=default");
|
rc= mysql_query(mysql, "set global query_cache_type=@save_query_cache_type");
|
||||||
myquery(rc);
|
myquery(rc);
|
||||||
rc= mysql_query(mysql, "set global query_cache_size=default");
|
rc= mysql_query(mysql, "set global query_cache_size=@save_query_cache_size");
|
||||||
myquery(rc);
|
myquery(rc);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -18335,6 +18347,12 @@ static void test_bug36326()
|
|||||||
myquery(rc);
|
myquery(rc);
|
||||||
rc= mysql_query(mysql, "INSERT INTO t1 VALUES (1)");
|
rc= mysql_query(mysql, "INSERT INTO t1 VALUES (1)");
|
||||||
myquery(rc);
|
myquery(rc);
|
||||||
|
rc= mysql_query(mysql,
|
||||||
|
"set @save_query_cache_type="
|
||||||
|
"@@global.query_cache_type,"
|
||||||
|
"@save_query_cache_size="
|
||||||
|
"@@global.query_cache_size");
|
||||||
|
myquery(rc);
|
||||||
rc= mysql_query(mysql, "SET GLOBAL query_cache_type = 1");
|
rc= mysql_query(mysql, "SET GLOBAL query_cache_type = 1");
|
||||||
myquery(rc);
|
myquery(rc);
|
||||||
rc= mysql_query(mysql, "SET LOCAL query_cache_type = 1");
|
rc= mysql_query(mysql, "SET LOCAL query_cache_type = 1");
|
||||||
@ -18362,8 +18380,8 @@ static void test_bug36326()
|
|||||||
DIE_UNLESS(rc == 1);
|
DIE_UNLESS(rc == 1);
|
||||||
rc= mysql_query(mysql, "DROP TABLE t1");
|
rc= mysql_query(mysql, "DROP TABLE t1");
|
||||||
myquery(rc);
|
myquery(rc);
|
||||||
rc= mysql_query(mysql, "SET GLOBAL query_cache_size = default");
|
rc= mysql_query(mysql, "SET GLOBAL query_cache_size = @save_query_cache_size");
|
||||||
rc= mysql_query(mysql, "SET GLOBAL query_cache_type = default");
|
rc= mysql_query(mysql, "SET GLOBAL query_cache_type = @save_query_cache_type");
|
||||||
myquery(rc);
|
myquery(rc);
|
||||||
|
|
||||||
DBUG_VOID_RETURN;
|
DBUG_VOID_RETURN;
|
||||||
|
@ -14,10 +14,10 @@
|
|||||||
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA
|
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA
|
||||||
|
|
||||||
MY_ADD_TESTS(bitmap base64 my_atomic my_rdtsc lf my_malloc my_getopt dynstring
|
MY_ADD_TESTS(bitmap base64 my_atomic my_rdtsc lf my_malloc my_getopt dynstring
|
||||||
aes byte_order
|
byte_order
|
||||||
LINK_LIBRARIES mysys)
|
LINK_LIBRARIES mysys)
|
||||||
MY_ADD_TESTS(my_vsnprintf LINK_LIBRARIES strings mysys)
|
MY_ADD_TESTS(my_vsnprintf LINK_LIBRARIES strings mysys)
|
||||||
|
MY_ADD_TESTS(aes LINK_LIBRARIES mysys mysys_ssl)
|
||||||
ADD_DEFINITIONS(${SSL_DEFINES})
|
ADD_DEFINITIONS(${SSL_DEFINES})
|
||||||
|
|
||||||
MY_ADD_TESTS(ma_dyncol LINK_LIBRARIES mysys)
|
MY_ADD_TESTS(ma_dyncol LINK_LIBRARIES mysys)
|
||||||
|
@ -31,6 +31,6 @@ TARGET_LINK_LIBRARIES(explain_filename-t sql mytap)
|
|||||||
MY_ADD_TEST(explain_filename)
|
MY_ADD_TEST(explain_filename)
|
||||||
|
|
||||||
ADD_EXECUTABLE(mf_iocache-t mf_iocache-t.cc ../../sql/mf_iocache_encr.cc)
|
ADD_EXECUTABLE(mf_iocache-t mf_iocache-t.cc ../../sql/mf_iocache_encr.cc)
|
||||||
TARGET_LINK_LIBRARIES(mf_iocache-t mysys mytap)
|
TARGET_LINK_LIBRARIES(mf_iocache-t mysys mytap mysys_ssl)
|
||||||
ADD_DEPENDENCIES(mf_iocache-t GenError)
|
ADD_DEPENDENCIES(mf_iocache-t GenError)
|
||||||
MY_ADD_TEST(mf_iocache)
|
MY_ADD_TEST(mf_iocache)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user