Merge branch '10.3' into 10.4
This commit is contained in:
commit
57325e4706
@ -30,7 +30,6 @@ SET(HAVE_BACKTRACE_SYMBOLS CACHE INTERNAL "")
|
||||
SET(HAVE_BACKTRACE_SYMBOLS_FD CACHE INTERNAL "")
|
||||
SET(HAVE_BFILL CACHE INTERNAL "")
|
||||
SET(HAVE_BSD_SIGNALS CACHE INTERNAL "")
|
||||
SET(HAVE_BSS_START CACHE INTERNAL "")
|
||||
SET(HAVE_CLOCK_GETTIME CACHE INTERNAL "")
|
||||
SET(HAVE_COMPRESS CACHE INTERNAL "")
|
||||
SET(HAVE_CRYPT CACHE INTERNAL "")
|
||||
|
@ -241,7 +241,6 @@
|
||||
/* Symbols we may use */
|
||||
#cmakedefine HAVE_SYS_ERRLIST 1
|
||||
/* used by stacktrace functions */
|
||||
#cmakedefine HAVE_BSS_START 1
|
||||
#cmakedefine HAVE_BACKTRACE 1
|
||||
#cmakedefine HAVE_BACKTRACE_SYMBOLS 1
|
||||
#cmakedefine HAVE_BACKTRACE_SYMBOLS_FD 1
|
||||
|
@ -797,14 +797,6 @@ CHECK_CXX_SOURCE_COMPILES("
|
||||
HAVE_ABI_CXA_DEMANGLE)
|
||||
ENDIF()
|
||||
|
||||
CHECK_C_SOURCE_COMPILES("
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
extern char *__bss_start;
|
||||
return __bss_start ? 1 : 0;
|
||||
}"
|
||||
HAVE_BSS_START)
|
||||
|
||||
CHECK_C_SOURCE_COMPILES("
|
||||
int main()
|
||||
{
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit ce74fd0c4009ed9f4bcbdb4a01e96c823e961dc3
|
||||
Subproject commit a746c3af449a8754e78ad7971e59e79af7957cdb
|
@ -109,6 +109,7 @@ SET(SQL_EMBEDDED_SOURCES emb_qcache.cc libmysqld.c lib_sql.cc
|
||||
../sql/sql_explain.cc ../sql/sql_explain.h
|
||||
../sql/sql_analyze_stmt.cc ../sql/sql_analyze_stmt.h
|
||||
../sql/compat56.cc
|
||||
../sql/sql_schema.cc
|
||||
../sql/sql_type.cc ../sql/sql_type.h
|
||||
../sql/sql_mode.cc
|
||||
../sql/sql_type_json.cc
|
||||
|
@ -1,6 +1,6 @@
|
||||
'\" t
|
||||
.\"
|
||||
.TH "\FBMYSQL_UPGRADE\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
|
||||
.TH "\FBMYSQL_UPGRADE\FR" "1" "20 July 2020" "MariaDB 10\&.4" "MariaDB Database System"
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" * set default formatting
|
||||
.\" -----------------------------------------------------------------
|
||||
@ -165,6 +165,8 @@ in the data directory\&. This is used to quickly check whether all tables have b
|
||||
\fB\-\-force\fR
|
||||
option\&.
|
||||
.PP
|
||||
For this reason, \fBmysql_upgrade\fR needs to be run as a user with write access to the data directory\&.
|
||||
.PP
|
||||
If you install MariaDB from RPM packages on Linux, you must install the server and client RPMs\&.
|
||||
\fBmysql_upgrade\fR
|
||||
is included in the server RPM but requires the client RPM because the latter includes
|
||||
|
@ -1186,13 +1186,13 @@ i count(*) std(e1/e2)
|
||||
3 4 0.00000000
|
||||
select std(s1/s2) from bug22555;
|
||||
std(s1/s2)
|
||||
0.21325764
|
||||
0.21328517
|
||||
select std(o1/o2) from bug22555;
|
||||
std(o1/o2)
|
||||
0.2132576358664934
|
||||
select std(e1/e2) from bug22555;
|
||||
std(e1/e2)
|
||||
0.21325764
|
||||
0.21328517
|
||||
set @saved_div_precision_increment=@@div_precision_increment;
|
||||
set div_precision_increment=19;
|
||||
select i, count(*), std(s1/s2) from bug22555 group by i order by i;
|
||||
|
@ -25,6 +25,19 @@ ERROR HY000: Table 'procs_priv' was not locked with LOCK TABLES
|
||||
REVOKE PROCESS ON *.* FROM u;
|
||||
ERROR HY000: Table 'db' was not locked with LOCK TABLES
|
||||
DROP TABLE t1;
|
||||
create database mysqltest1;
|
||||
use mysqltest1;
|
||||
create table t1(id int);
|
||||
insert t1 values(2);
|
||||
create user u1@localhost;
|
||||
grant select on mysqltest1.t1 to u1@localhost;
|
||||
grant update on mysqltest1.* to u1@localhost;
|
||||
connect u1, localhost, u1;
|
||||
update mysqltest1.t1 set id=1 where id=2;
|
||||
connection default;
|
||||
disconnect u1;
|
||||
drop user u1@localhost;
|
||||
drop database mysqltest1;
|
||||
#
|
||||
# MDEV-20076: SHOW GRANTS does not quote role names properly
|
||||
#
|
||||
|
@ -34,6 +34,27 @@ REVOKE EXECUTE ON PROCEDURE sp FROM u;
|
||||
REVOKE PROCESS ON *.* FROM u;
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# MDEV-23010 UPDATE privilege at Database and Table level fail to update with SELECT command denied to user
|
||||
#
|
||||
create database mysqltest1;
|
||||
use mysqltest1;
|
||||
create table t1(id int);
|
||||
insert t1 values(2);
|
||||
create user u1@localhost;
|
||||
grant select on mysqltest1.t1 to u1@localhost;
|
||||
grant update on mysqltest1.* to u1@localhost;
|
||||
connect u1, localhost, u1;
|
||||
update mysqltest1.t1 set id=1 where id=2;
|
||||
connection default;
|
||||
disconnect u1;
|
||||
drop user u1@localhost;
|
||||
drop database mysqltest1;
|
||||
|
||||
#
|
||||
# End of 10.1 tests
|
||||
#
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-20076: SHOW GRANTS does not quote role names properly
|
||||
--echo #
|
||||
|
@ -158,3 +158,13 @@ show triggers like '%T1%';
|
||||
Trigger Event Table Statement Timing Created sql_mode Definer character_set_client collation_connection Database Collation
|
||||
drop table t1;
|
||||
set GLOBAL sql_mode=default;
|
||||
#
|
||||
# MDEV-19632 Replication aborts with ER_SLAVE_CONVERSION_FAILED upon CREATE ... SELECT in ORACLE mode
|
||||
#
|
||||
# Compatibility schema names respect the filesystem case sensitivity
|
||||
CREATE TABLE t1 (a MARIADB_SCHEMA.date);
|
||||
ERROR HY000: Unknown data type: 'MARIADB_SCHEMA.date'
|
||||
CREATE TABLE t1 (a Mariadb_schema.date);
|
||||
ERROR HY000: Unknown data type: 'Mariadb_schema.date'
|
||||
CREATE TABLE t1 (a mariadb_schema.date);
|
||||
DROP TABLE t1;
|
||||
|
@ -130,3 +130,18 @@ let $datadir= `select @@datadir`;
|
||||
remove_file $datadir/mysql_upgrade_info;
|
||||
|
||||
set GLOBAL sql_mode=default;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-19632 Replication aborts with ER_SLAVE_CONVERSION_FAILED upon CREATE ... SELECT in ORACLE mode
|
||||
--echo #
|
||||
|
||||
--echo # Compatibility schema names respect the filesystem case sensitivity
|
||||
|
||||
--error ER_UNKNOWN_ERROR
|
||||
CREATE TABLE t1 (a MARIADB_SCHEMA.date);
|
||||
--error ER_UNKNOWN_ERROR
|
||||
CREATE TABLE t1 (a Mariadb_schema.date);
|
||||
|
||||
CREATE TABLE t1 (a mariadb_schema.date);
|
||||
DROP TABLE t1;
|
||||
|
@ -619,7 +619,7 @@ select 4 - 3 * 2, (4 - 3) * 2, 4 - (3 * 2);
|
||||
Testing that / is left associative
|
||||
select 15 / 5 / 3, (15 / 5) / 3, 15 / (5 / 3);
|
||||
15 / 5 / 3 (15 / 5) / 3 15 / (5 / 3)
|
||||
1.00000000 1.00000000 9.0000
|
||||
1.00000000 1.00000000 8.9998
|
||||
Testing that / has precedence over |
|
||||
select 105 / 5 | 2, (105 / 5) | 2, 105 / (5 | 2);
|
||||
105 / 5 | 2 (105 / 5) | 2 105 / (5 | 2)
|
||||
|
@ -2763,5 +2763,45 @@ SELECT 1 FROM t1 WHERE a XOR 'a';
|
||||
1
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# Bug #25207522: INCORRECT ORDER-BY BEHAVIOR ON A PARTITIONED TABLE
|
||||
# WITH A COMPOSITE PREFIX INDEX
|
||||
#
|
||||
create table t1(id int unsigned not null,
|
||||
data varchar(2) default null,
|
||||
key data_idx (data(1),id)
|
||||
) default charset=utf8
|
||||
partition by range (id) (
|
||||
partition p10 values less than (10),
|
||||
partition p20 values less than (20)
|
||||
);
|
||||
insert t1 values (6, 'ab'), (4, 'ab'), (5, 'ab'), (16, 'ab'), (14, 'ab'), (15, 'ab'), (5, 'ac'), (15, 'aa') ;
|
||||
select id from t1 where data = 'ab' order by id;
|
||||
id
|
||||
4
|
||||
5
|
||||
6
|
||||
14
|
||||
15
|
||||
16
|
||||
drop table t1;
|
||||
create table t1(id int unsigned not null,
|
||||
data text default null,
|
||||
key data_idx (data(1),id)
|
||||
) default charset=utf8
|
||||
partition by range (id) (
|
||||
partition p10 values less than (10),
|
||||
partition p20 values less than (20)
|
||||
);
|
||||
insert t1 values (6, 'ab'), (4, 'ab'), (5, 'ab'), (16, 'ab'), (14, 'ab'), (15, 'ab'), (5, 'ac'), (15, 'aa') ;
|
||||
select id from t1 where data = 'ab' order by id;
|
||||
id
|
||||
4
|
||||
5
|
||||
6
|
||||
14
|
||||
15
|
||||
16
|
||||
drop table t1;
|
||||
#
|
||||
# End of 10.1 tests
|
||||
#
|
||||
|
@ -2973,6 +2973,34 @@ CREATE TABLE t1(a BINARY(80)) PARTITION BY KEY(a) PARTITIONS 3;
|
||||
SELECT 1 FROM t1 WHERE a XOR 'a';
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # Bug #25207522: INCORRECT ORDER-BY BEHAVIOR ON A PARTITIONED TABLE
|
||||
--echo # WITH A COMPOSITE PREFIX INDEX
|
||||
--echo #
|
||||
create table t1(id int unsigned not null,
|
||||
data varchar(2) default null,
|
||||
key data_idx (data(1),id)
|
||||
) default charset=utf8
|
||||
partition by range (id) (
|
||||
partition p10 values less than (10),
|
||||
partition p20 values less than (20)
|
||||
);
|
||||
insert t1 values (6, 'ab'), (4, 'ab'), (5, 'ab'), (16, 'ab'), (14, 'ab'), (15, 'ab'), (5, 'ac'), (15, 'aa') ;
|
||||
select id from t1 where data = 'ab' order by id;
|
||||
drop table t1;
|
||||
|
||||
create table t1(id int unsigned not null,
|
||||
data text default null,
|
||||
key data_idx (data(1),id)
|
||||
) default charset=utf8
|
||||
partition by range (id) (
|
||||
partition p10 values less than (10),
|
||||
partition p20 values less than (20)
|
||||
);
|
||||
insert t1 values (6, 'ab'), (4, 'ab'), (5, 'ab'), (16, 'ab'), (14, 'ab'), (15, 'ab'), (5, 'ac'), (15, 'aa') ;
|
||||
select id from t1 where data = 'ab' order by id;
|
||||
drop table t1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.1 tests
|
||||
--echo #
|
||||
|
@ -9,5 +9,38 @@ ANALYZE TABLE t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
SET use_stat_tables = DEFAULT;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-21472: ALTER TABLE ... ANALYZE PARTITION ... with EITS reads and locks all rows
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
id int(11) auto_increment primary key,
|
||||
c1 int(11) DEFAULT NULL
|
||||
) PARTITION BY RANGE (id) (
|
||||
PARTITION p0 VALUES LESS THAN (4),
|
||||
PARTITION p1 VALUES LESS THAN MAXVALUE
|
||||
);
|
||||
insert into t1(c1) values (1),(1),(1),(1), (1),(1),(1),(1);
|
||||
insert into t1(c1) select c1 from t1;
|
||||
insert into t1(c1) select c1 from t1;
|
||||
select count(*) from t1;
|
||||
count(*)
|
||||
32
|
||||
select count(*) from t1 where id <4;
|
||||
count(*)
|
||||
3
|
||||
flush status;
|
||||
set session use_stat_tables='preferably';
|
||||
# Must NOT show "Engine-independent statistics collected":
|
||||
alter table t1 analyze partition p0;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
# Should not have Handler_read_rnd_next=34
|
||||
show session status like 'Handler_read_rnd%';
|
||||
Variable_name Value
|
||||
Handler_read_rnd 0
|
||||
Handler_read_rnd_deleted 0
|
||||
Handler_read_rnd_next 34
|
||||
drop table t1;
|
||||
SET use_stat_tables = DEFAULT;
|
||||
|
@ -11,7 +11,33 @@ CREATE TABLE t1 (pk int PRIMARY KEY, a bit(1), INDEX idx(a)
|
||||
INSERT INTO t1 VALUES (1,1),(2,0),(3,0),(4,1);
|
||||
|
||||
ANALYZE TABLE t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-21472: ALTER TABLE ... ANALYZE PARTITION ... with EITS reads and locks all rows
|
||||
--echo #
|
||||
CREATE TABLE t1 (
|
||||
id int(11) auto_increment primary key,
|
||||
c1 int(11) DEFAULT NULL
|
||||
) PARTITION BY RANGE (id) (
|
||||
PARTITION p0 VALUES LESS THAN (4),
|
||||
PARTITION p1 VALUES LESS THAN MAXVALUE
|
||||
);
|
||||
|
||||
insert into t1(c1) values (1),(1),(1),(1), (1),(1),(1),(1);
|
||||
insert into t1(c1) select c1 from t1;
|
||||
insert into t1(c1) select c1 from t1;
|
||||
|
||||
select count(*) from t1;
|
||||
select count(*) from t1 where id <4;
|
||||
flush status;
|
||||
set session use_stat_tables='preferably';
|
||||
|
||||
--echo # Must NOT show "Engine-independent statistics collected":
|
||||
alter table t1 analyze partition p0;
|
||||
|
||||
--echo # Should not have Handler_read_rnd_next=34
|
||||
show session status like 'Handler_read_rnd%';
|
||||
drop table t1;
|
||||
|
||||
SET use_stat_tables = DEFAULT;
|
||||
|
||||
DROP TABLE t1;
|
||||
|
@ -2585,6 +2585,30 @@ e 2
|
||||
o 6
|
||||
DROP TABLE t1, t2;
|
||||
#
|
||||
# MDEV-19232: Floating point precision / value comparison problem
|
||||
#
|
||||
CREATE TABLE t1 (region varchar(60), area decimal(10,0), population decimal(11,0));
|
||||
INSERT INTO t1 VALUES ('Central America and the Caribbean',91,11797);
|
||||
INSERT INTO t1 VALUES ('Central America and the Caribbean',442,66422);
|
||||
SET @save_optimizer_switch=@@optimizer_switch;
|
||||
SET optimizer_switch='subquery_cache=on';
|
||||
SELECT
|
||||
population, area, population/area,
|
||||
cast(population/area as DECIMAL(20,9)) FROM t1 LIMIT 1;
|
||||
population area population/area cast(population/area as DECIMAL(20,9))
|
||||
11797 91 129.6374 129.637400000
|
||||
SELECT * FROM t1 A
|
||||
WHERE population/area = (SELECT MAX(population/area) from t1 B where A.region = B.region);
|
||||
region area population
|
||||
Central America and the Caribbean 442 66422
|
||||
SET optimizer_switch='subquery_cache=off';
|
||||
SELECT * FROM t1 A
|
||||
WHERE population/area = (SELECT MAX(population/area) from t1 B where A.region = B.region);
|
||||
region area population
|
||||
Central America and the Caribbean 442 66422
|
||||
SET @@optimizer_switch= @save_optimizer_switch;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-22852: SIGSEGV in sortlength (optimized builds)
|
||||
#
|
||||
SET @save_optimizer_switch=@@optimizer_switch;
|
||||
|
@ -2115,6 +2115,32 @@ EXPLAIN EXTENDED SELECT * FROM t1 where ( t1.l1 < ANY (SELECT MAX(t2.v1) FROM t
|
||||
SELECT * FROM t1 where ( t1.l1 < ANY (SELECT MAX(t2.v1) FROM t2));
|
||||
DROP TABLE t1, t2;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-19232: Floating point precision / value comparison problem
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (region varchar(60), area decimal(10,0), population decimal(11,0));
|
||||
INSERT INTO t1 VALUES ('Central America and the Caribbean',91,11797);
|
||||
INSERT INTO t1 VALUES ('Central America and the Caribbean',442,66422);
|
||||
|
||||
SET @save_optimizer_switch=@@optimizer_switch;
|
||||
SET optimizer_switch='subquery_cache=on';
|
||||
|
||||
SELECT
|
||||
population, area, population/area,
|
||||
cast(population/area as DECIMAL(20,9)) FROM t1 LIMIT 1;
|
||||
|
||||
SELECT * FROM t1 A
|
||||
WHERE population/area = (SELECT MAX(population/area) from t1 B where A.region = B.region);
|
||||
|
||||
SET optimizer_switch='subquery_cache=off';
|
||||
SELECT * FROM t1 A
|
||||
WHERE population/area = (SELECT MAX(population/area) from t1 B where A.region = B.region);
|
||||
|
||||
SET @@optimizer_switch= @save_optimizer_switch;
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-22852: SIGSEGV in sortlength (optimized builds)
|
||||
--echo #
|
||||
|
@ -975,4 +975,131 @@ id
|
||||
DROP PROCEDURE p1;
|
||||
DROP TABLE t1;
|
||||
# End of 10.0 tests
|
||||
#
|
||||
# MDEV-23221: A subquery causes crash
|
||||
#
|
||||
create table t1 (
|
||||
location_code varchar(10),
|
||||
country_id varchar(10)
|
||||
);
|
||||
insert into t1 values ('HKG', 'HK');
|
||||
insert into t1 values ('NYC', 'US');
|
||||
insert into t1 values ('LAX', 'US');
|
||||
create table t2 (
|
||||
container_id varchar(10),
|
||||
cntr_activity_type varchar(10),
|
||||
cntr_dest varchar(10)
|
||||
);
|
||||
insert into t2 values ('AAAA1111', 'VSL', 'NYC');
|
||||
insert into t2 values ('AAAA1111', 'CUV', 'NYC');
|
||||
insert into t2 values ('BBBB2222', 'VSL', 'LAX');
|
||||
insert into t2 values ('BBBB2222', 'XYZ', 'LAX');
|
||||
# Must not crash or return an error:
|
||||
select
|
||||
(select country_id from t1 where location_code = cl1.cntr_dest) as dest_cntry,
|
||||
(select
|
||||
max(container_id)
|
||||
from t2 as cl2
|
||||
where
|
||||
cl2.container_id = cl1.container_id and
|
||||
cl2.cntr_activity_type = 'CUV' and
|
||||
exists (select location_code
|
||||
from t1
|
||||
where
|
||||
location_code = cl2.cntr_dest and
|
||||
country_id = dest_cntry)
|
||||
) as CUV
|
||||
from
|
||||
t2 cl1;
|
||||
dest_cntry CUV
|
||||
US AAAA1111
|
||||
US AAAA1111
|
||||
US NULL
|
||||
US NULL
|
||||
prepare s from "select
|
||||
(select country_id from t1 where location_code = cl1.cntr_dest) as dest_cntry,
|
||||
(select
|
||||
max(container_id)
|
||||
from t2 as cl2
|
||||
where
|
||||
cl2.container_id = cl1.container_id and
|
||||
cl2.cntr_activity_type = 'CUV' and
|
||||
exists (select location_code
|
||||
from t1
|
||||
where
|
||||
location_code = cl2.cntr_dest and
|
||||
country_id = dest_cntry)
|
||||
) as CUV
|
||||
from
|
||||
t2 cl1";
|
||||
execute s;
|
||||
dest_cntry CUV
|
||||
US AAAA1111
|
||||
US AAAA1111
|
||||
US NULL
|
||||
US NULL
|
||||
execute s;
|
||||
dest_cntry CUV
|
||||
US AAAA1111
|
||||
US AAAA1111
|
||||
US NULL
|
||||
US NULL
|
||||
drop table t1,t2;
|
||||
#
|
||||
# MDEV-20557: SQL query with duplicate table aliases consistently crashes server
|
||||
# (Just a testcase)
|
||||
#
|
||||
create table t1 (id int, id2 int);
|
||||
create table t2 (id int, id2 int, a int);
|
||||
create table t3 (id int);
|
||||
create table t4 (id int);
|
||||
select (select 1 from t1 where (exists
|
||||
(select 1 from t2
|
||||
where t2.a = (select t4.id from t4 where t4.id = t3.id) and t2.id2 = t1.id2))) dt
|
||||
from t3;
|
||||
ERROR 42000: This version of MariaDB doesn't yet support 'SUBQUERY in ROW in left expression of IN/ALL/ANY'
|
||||
drop table t1,t2,t3,t4;
|
||||
#
|
||||
# MDEV-21649: Crash when using nested EXISTS
|
||||
# (Just a testcase)
|
||||
#
|
||||
CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(id));
|
||||
CREATE TABLE t2 (id INT NOT NULL AUTO_INCREMENT, ip_id INT, PRIMARY KEY(id));
|
||||
CREATE TABLE t3 (id INT NOT NULL AUTO_INCREMENT, storage_method_id INT, storage_target_id INT, PRIMARY KEY(id));
|
||||
SELECT
|
||||
W0.`id`
|
||||
FROM
|
||||
`t1` W0
|
||||
WHERE (
|
||||
EXISTS(
|
||||
SELECT
|
||||
V0.`id`
|
||||
FROM
|
||||
`t2` V0
|
||||
WHERE (
|
||||
EXISTS(
|
||||
SELECT
|
||||
U0.`id`
|
||||
FROM
|
||||
`t2` U0
|
||||
INNER JOIN `t3` U4 ON (U0.`id` = U4.`storage_target_id`)
|
||||
WHERE (
|
||||
U0.`ip_id` = V0.`ip_id`
|
||||
AND U4.`storage_method_id` = (
|
||||
SELECT
|
||||
U5.`storage_method_id`
|
||||
FROM
|
||||
`t3` U5
|
||||
WHERE
|
||||
U5.`storage_target_id` = V0.`id`
|
||||
LIMIT
|
||||
1
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
);
|
||||
id
|
||||
drop table t1,t2,t3;
|
||||
set optimizer_switch=default;
|
||||
|
@ -829,5 +829,117 @@ DROP TABLE t1;
|
||||
|
||||
--echo # End of 10.0 tests
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-23221: A subquery causes crash
|
||||
--echo #
|
||||
create table t1 (
|
||||
location_code varchar(10),
|
||||
country_id varchar(10)
|
||||
);
|
||||
insert into t1 values ('HKG', 'HK');
|
||||
insert into t1 values ('NYC', 'US');
|
||||
insert into t1 values ('LAX', 'US');
|
||||
|
||||
create table t2 (
|
||||
container_id varchar(10),
|
||||
cntr_activity_type varchar(10),
|
||||
cntr_dest varchar(10)
|
||||
);
|
||||
insert into t2 values ('AAAA1111', 'VSL', 'NYC');
|
||||
insert into t2 values ('AAAA1111', 'CUV', 'NYC');
|
||||
insert into t2 values ('BBBB2222', 'VSL', 'LAX');
|
||||
insert into t2 values ('BBBB2222', 'XYZ', 'LAX');
|
||||
|
||||
let $query=
|
||||
select
|
||||
(select country_id from t1 where location_code = cl1.cntr_dest) as dest_cntry,
|
||||
(select
|
||||
max(container_id)
|
||||
from t2 as cl2
|
||||
where
|
||||
cl2.container_id = cl1.container_id and
|
||||
cl2.cntr_activity_type = 'CUV' and
|
||||
exists (select location_code
|
||||
from t1
|
||||
where
|
||||
location_code = cl2.cntr_dest and
|
||||
country_id = dest_cntry)
|
||||
) as CUV
|
||||
from
|
||||
t2 cl1;
|
||||
|
||||
--echo # Must not crash or return an error:
|
||||
eval $query;
|
||||
|
||||
eval prepare s from "$query";
|
||||
execute s;
|
||||
execute s;
|
||||
|
||||
drop table t1,t2;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-20557: SQL query with duplicate table aliases consistently crashes server
|
||||
--echo # (Just a testcase)
|
||||
--echo #
|
||||
|
||||
create table t1 (id int, id2 int);
|
||||
create table t2 (id int, id2 int, a int);
|
||||
create table t3 (id int);
|
||||
create table t4 (id int);
|
||||
|
||||
--error ER_NOT_SUPPORTED_YET
|
||||
select (select 1 from t1 where (exists
|
||||
(select 1 from t2
|
||||
where t2.a = (select t4.id from t4 where t4.id = t3.id) and t2.id2 = t1.id2))) dt
|
||||
from t3;
|
||||
|
||||
drop table t1,t2,t3,t4;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-21649: Crash when using nested EXISTS
|
||||
--echo # (Just a testcase)
|
||||
--echo #
|
||||
CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(id));
|
||||
CREATE TABLE t2 (id INT NOT NULL AUTO_INCREMENT, ip_id INT, PRIMARY KEY(id));
|
||||
CREATE TABLE t3 (id INT NOT NULL AUTO_INCREMENT, storage_method_id INT, storage_target_id INT, PRIMARY KEY(id));
|
||||
|
||||
SELECT
|
||||
W0.`id`
|
||||
FROM
|
||||
`t1` W0
|
||||
WHERE (
|
||||
EXISTS(
|
||||
SELECT
|
||||
V0.`id`
|
||||
FROM
|
||||
`t2` V0
|
||||
WHERE (
|
||||
EXISTS(
|
||||
SELECT
|
||||
U0.`id`
|
||||
FROM
|
||||
`t2` U0
|
||||
INNER JOIN `t3` U4 ON (U0.`id` = U4.`storage_target_id`)
|
||||
WHERE (
|
||||
U0.`ip_id` = V0.`ip_id`
|
||||
AND U4.`storage_method_id` = (
|
||||
SELECT
|
||||
U5.`storage_method_id`
|
||||
FROM
|
||||
`t3` U5
|
||||
WHERE
|
||||
U5.`storage_target_id` = V0.`id`
|
||||
LIMIT
|
||||
1
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
drop table t1,t2,t3;
|
||||
|
||||
#restore defaults
|
||||
set optimizer_switch=default;
|
||||
|
@ -676,6 +676,27 @@ Warnings:
|
||||
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 2010e0
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-23282 FLOAT(53,0) badly handles out-of-range values
|
||||
#
|
||||
CREATE OR REPLACE TABLE t1 (c1 FLOAT NOT NULL, c2 FLOAT NOT NULL);
|
||||
INSERT IGNORE INTO t1 VALUES (1e+40, -1e+40);
|
||||
Warnings:
|
||||
Warning 1264 Out of range value for column 'c1' at row 1
|
||||
Warning 1264 Out of range value for column 'c2' at row 1
|
||||
SELECT c1, c2 FROM t1;
|
||||
c1 c2
|
||||
3.40282e38 -3.40282e38
|
||||
DROP TABLE t1;
|
||||
CREATE OR REPLACE TABLE t1 (c1 FLOAT(53,0) NOT NULL, c2 FLOAT(53,0) NOT NULL);
|
||||
INSERT IGNORE INTO t1 VALUES (1e+40, -1e+40);
|
||||
Warnings:
|
||||
Warning 1264 Out of range value for column 'c1' at row 1
|
||||
Warning 1264 Out of range value for column 'c2' at row 1
|
||||
SELECT c1, c2 FROM t1;
|
||||
c1 c2
|
||||
340282346638528860000000000000000000000 -340282346638528860000000000000000000000
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# End of 10.1 tests
|
||||
#
|
||||
#
|
||||
|
@ -485,6 +485,20 @@ EXPLAIN EXTENDED SELECT * FROM t1 WHERE a=2010e0 AND a>=2010e0;
|
||||
DROP TABLE t1;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-23282 FLOAT(53,0) badly handles out-of-range values
|
||||
--echo #
|
||||
|
||||
CREATE OR REPLACE TABLE t1 (c1 FLOAT NOT NULL, c2 FLOAT NOT NULL);
|
||||
INSERT IGNORE INTO t1 VALUES (1e+40, -1e+40);
|
||||
SELECT c1, c2 FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
CREATE OR REPLACE TABLE t1 (c1 FLOAT(53,0) NOT NULL, c2 FLOAT(53,0) NOT NULL);
|
||||
INSERT IGNORE INTO t1 VALUES (1e+40, -1e+40);
|
||||
SELECT c1, c2 FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.1 tests
|
||||
--echo #
|
||||
|
@ -1532,11 +1532,8 @@ select (1.20396873 * 0.89550000 * 0.68000000 * 1.08721696 * 0.99500000 *
|
||||
1.01500000 * 1.01500000 * 0.99500000)
|
||||
0.81298807395367312459230693948000000000
|
||||
create table t1 as select 5.05 / 0.014;
|
||||
Warnings:
|
||||
Note 1265 Data truncated for column '5.05 / 0.014' at row 1
|
||||
show warnings;
|
||||
Level Code Message
|
||||
Note 1265 Data truncated for column '5.05 / 0.014' at row 1
|
||||
show create table t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
@ -1651,8 +1648,6 @@ my_col
|
||||
0.12345678912345678912345678912345678912
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 SELECT 1 / .123456789123456789123456789123456789123456789123456789123456789123456789123456789 AS my_col;
|
||||
Warnings:
|
||||
Note 1265 Data truncated for column 'my_col' at row 1
|
||||
DESCRIBE t1;
|
||||
Field Type Null Key Default Extra
|
||||
my_col decimal(65,4) YES NULL
|
||||
|
@ -91,8 +91,6 @@ DROP INDEX test ON t1;
|
||||
insert into t1 values (10, 1,1,1,1,1,1,1,1,1,1,1,1,1,NULL,0,0,0,1,1,1,1,'one','one');
|
||||
insert into t1 values (NULL,2,2,2,2,2,2,2,2,2,2,2,2,2,NULL,NULL,NULL,NULL,NULL,NULL,2,2,'two','two,one');
|
||||
insert ignore into t1 values (0,1/3,3,3,3,3,3,3,3,3,3,3,3,3,NULL,'19970303','10:10:10','19970303101010','','','','3',3,3);
|
||||
Warnings:
|
||||
Warning 1265 Data truncated for column 'string' at row 1
|
||||
insert ignore into t1 values (0,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,NULL,19970807,080706,19970403090807,-1,-1,-1,'-1',-1,-1);
|
||||
Warnings:
|
||||
Warning 1264 Out of range value for column 'utiny' at row 1
|
||||
@ -130,7 +128,7 @@ select auto,string,tiny,short,medium,long_int,longlong,real_float,real_double,ut
|
||||
auto string tiny short medium long_int longlong real_float real_double utiny ushort umedium ulong ulonglong mod(floor(time_stamp/1000000),1000000)-mod(curdate(),1000000) date_field time_field date_time blob_col tinyblob_col mediumblob_col longblob_col
|
||||
10 1 1 1 1 1 1 1.0 1.0000 1 00001 1 1 1 0 0000-00-00 00:00:00 0000-00-00 00:00:00 1 1 1 1
|
||||
11 2 2 2 2 2 2 2.0 2.0000 2 00002 2 2 2 0 NULL NULL NULL NULL NULL 2 2
|
||||
12 0.33333333 3 3 3 3 3 3.0 3.0000 3 00003 3 3 3 0 1997-03-03 10:10:10 1997-03-03 10:10:10 3
|
||||
12 0.3333 3 3 3 3 3 3.0 3.0000 3 00003 3 3 3 0 1997-03-03 10:10:10 1997-03-03 10:10:10 3
|
||||
13 -1 -1 -1 -1 -1 -1 -1.0 -1.0000 0 00000 0 0 0 0 1997-08-07 08:07:06 1997-04-03 09:08:07 -1 -1 -1 -1
|
||||
14 -429496729 -128 -32768 -8388608 -2147483648 -4294967295 -4294967296.0 -4294967295.0000 0 00000 0 0 0 0 0000-00-00 00:00:00 0000-00-00 00:00:00 -4294967295 -4294967295 -4294967295 -4294967295
|
||||
15 4294967295 127 32767 8388607 2147483647 4294967295 4294967296.0 4294967295.0000 255 65535 16777215 4294967295 4294967295 0 0000-00-00 00:00:00 0000-00-00 00:00:00 4294967295 4294967295 4294967295 4294967295
|
||||
@ -182,7 +180,7 @@ Warning 1265 Data truncated for column 'new_field' at row 7
|
||||
select * from t2;
|
||||
auto string mediumblob_col new_field
|
||||
1 2 2 ne
|
||||
2 0.33333333 ne
|
||||
2 0.3333 ne
|
||||
3 -1 -1 ne
|
||||
4 -429496729 -4294967295 ne
|
||||
5 4294967295 4294967295 ne
|
||||
|
@ -177,6 +177,7 @@ my @DEFAULT_SUITES= qw(
|
||||
csv-
|
||||
compat/oracle-
|
||||
compat/mssql-
|
||||
compat/maxdb-
|
||||
encryption-
|
||||
federated-
|
||||
funcs_1-
|
||||
@ -687,8 +688,7 @@ sub run_test_server ($$$) {
|
||||
My::CoreDump->show($core_file, $exe_mysqld, $opt_parallel);
|
||||
|
||||
# Limit number of core files saved
|
||||
if ($opt_max_save_core > 0 &&
|
||||
$num_saved_cores >= $opt_max_save_core)
|
||||
if ($num_saved_cores >= $opt_max_save_core)
|
||||
{
|
||||
mtr_report(" - deleting it, already saved",
|
||||
"$opt_max_save_core");
|
||||
@ -704,8 +704,7 @@ sub run_test_server ($$$) {
|
||||
},
|
||||
$worker_savedir);
|
||||
|
||||
if ($opt_max_save_datadir > 0 &&
|
||||
$num_saved_datadir >= $opt_max_save_datadir)
|
||||
if ($num_saved_datadir >= $opt_max_save_datadir)
|
||||
{
|
||||
mtr_report(" - skipping '$worker_savedir/'");
|
||||
rmtree($worker_savedir);
|
||||
@ -714,9 +713,9 @@ sub run_test_server ($$$) {
|
||||
{
|
||||
mtr_report(" - saving '$worker_savedir/' to '$savedir/'");
|
||||
rename($worker_savedir, $savedir);
|
||||
$num_saved_datadir++;
|
||||
}
|
||||
resfile_print_test();
|
||||
$num_saved_datadir++;
|
||||
$num_failed_test++ unless ($result->{retries} ||
|
||||
$result->{exp_fail});
|
||||
|
||||
@ -1273,6 +1272,17 @@ sub command_line_setup {
|
||||
report_option('verbose', $opt_verbose);
|
||||
}
|
||||
|
||||
# Negative values aren't meaningful on integer options
|
||||
foreach(grep(/=i$/, keys %options))
|
||||
{
|
||||
if (defined ${$options{$_}} &&
|
||||
do { no warnings "numeric"; int ${$options{$_}} < 0})
|
||||
{
|
||||
my $v= (split /=/)[0];
|
||||
die("$v doesn't accept a negative value:");
|
||||
}
|
||||
}
|
||||
|
||||
# Find the absolute path to the test directory
|
||||
$glob_mysql_test_dir= cwd();
|
||||
if ($glob_mysql_test_dir =~ / /)
|
||||
@ -6421,12 +6431,12 @@ Options for debugging the product
|
||||
test(s)
|
||||
max-save-core Limit the number of core files saved (to avoid filling
|
||||
up disks for heavily crashing server). Defaults to
|
||||
$opt_max_save_core, set to 0 for no limit. Set
|
||||
it's default with MTR_MAX_SAVE_CORE
|
||||
$opt_max_save_core. Set its default with
|
||||
MTR_MAX_SAVE_CORE
|
||||
max-save-datadir Limit the number of datadir saved (to avoid filling
|
||||
up disks for heavily crashing server). Defaults to
|
||||
$opt_max_save_datadir, set to 0 for no limit. Set
|
||||
it's default with MTR_MAX_SAVE_DATADIR
|
||||
$opt_max_save_datadir. Set its default with
|
||||
MTR_MAX_SAVE_DATADIR
|
||||
max-test-fail Limit the number of test failures before aborting
|
||||
the current test run. Defaults to
|
||||
$opt_max_test_fail, set to 0 for no limit. Set
|
||||
|
65
mysql-test/suite/compat/maxdb/rpl_mariadb_timestamp.result
Normal file
65
mysql-test/suite/compat/maxdb/rpl_mariadb_timestamp.result
Normal file
@ -0,0 +1,65 @@
|
||||
include/master-slave.inc
|
||||
[connection master]
|
||||
#
|
||||
# MDEV-19632 Replication aborts with ER_SLAVE_CONVERSION_FAILED upon CREATE ... SELECT in ORACLE mode
|
||||
#
|
||||
SET timestamp=UNIX_TIMESTAMP('2001-01-01 10:00:00');
|
||||
SET sql_mode=DEFAULT;
|
||||
CREATE TABLE t1 (a TIMESTAMP);
|
||||
INSERT INTO t1 VALUES (NULL);
|
||||
INSERT INTO t1 VALUES ('2001-01-01 10:20:30');
|
||||
SET sql_mode=MAXDB;
|
||||
CREATE TABLE t2 SELECT * FROM t1;
|
||||
SET timestamp=DEFAULT;
|
||||
include/show_binlog_events.inc
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000001 # Gtid # # GTID #-#-#
|
||||
master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a TIMESTAMP)
|
||||
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
|
||||
master-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES (NULL)
|
||||
master-bin.000001 # Table_map # # table_id: # (test.t1)
|
||||
master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Query # # COMMIT
|
||||
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
|
||||
master-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES ('2001-01-01 10:20:30')
|
||||
master-bin.000001 # Table_map # # table_id: # (test.t1)
|
||||
master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Query # # COMMIT
|
||||
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
|
||||
master-bin.000001 # Query # # use `test`; CREATE TABLE "t2" (
|
||||
"a" mariadb_schema.timestamp NOT NULL DEFAULT current_timestamp()
|
||||
)
|
||||
master-bin.000001 # Annotate_rows # # CREATE TABLE t2 SELECT * FROM t1
|
||||
master-bin.000001 # Table_map # # table_id: # (test.t2)
|
||||
master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Query # # COMMIT
|
||||
connection slave;
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
2001-01-01 10:00:00
|
||||
2001-01-01 10:20:30
|
||||
SET sql_mode=DEFAULT;
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp()
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
SHOW CREATE TABLE t2;
|
||||
Table Create Table
|
||||
t2 CREATE TABLE `t2` (
|
||||
`a` timestamp NOT NULL DEFAULT current_timestamp()
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
SET sql_mode=MAXDB;
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE "t1" (
|
||||
"a" mariadb_schema.timestamp NOT NULL DEFAULT current_timestamp()
|
||||
)
|
||||
SHOW CREATE TABLE t2;
|
||||
Table Create Table
|
||||
t2 CREATE TABLE "t2" (
|
||||
"a" mariadb_schema.timestamp NOT NULL DEFAULT current_timestamp()
|
||||
)
|
||||
connection master;
|
||||
DROP TABLE t1, t2;
|
||||
include/rpl_end.inc
|
34
mysql-test/suite/compat/maxdb/rpl_mariadb_timestamp.test
Normal file
34
mysql-test/suite/compat/maxdb/rpl_mariadb_timestamp.test
Normal file
@ -0,0 +1,34 @@
|
||||
--source include/have_binlog_format_row.inc
|
||||
--source include/master-slave.inc
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-19632 Replication aborts with ER_SLAVE_CONVERSION_FAILED upon CREATE ... SELECT in ORACLE mode
|
||||
--echo #
|
||||
|
||||
SET timestamp=UNIX_TIMESTAMP('2001-01-01 10:00:00');
|
||||
SET sql_mode=DEFAULT;
|
||||
CREATE TABLE t1 (a TIMESTAMP);
|
||||
INSERT INTO t1 VALUES (NULL);
|
||||
INSERT INTO t1 VALUES ('2001-01-01 10:20:30');
|
||||
SET sql_mode=MAXDB;
|
||||
CREATE TABLE t2 SELECT * FROM t1;
|
||||
SET timestamp=DEFAULT;
|
||||
|
||||
--let $binlog_file = LAST
|
||||
source include/show_binlog_events.inc;
|
||||
|
||||
|
||||
--sync_slave_with_master
|
||||
SELECT * FROM t1;
|
||||
SET sql_mode=DEFAULT;
|
||||
SHOW CREATE TABLE t1;
|
||||
SHOW CREATE TABLE t2;
|
||||
|
||||
SET sql_mode=MAXDB;
|
||||
SHOW CREATE TABLE t1;
|
||||
SHOW CREATE TABLE t2;
|
||||
|
||||
--connection master
|
||||
DROP TABLE t1, t2;
|
||||
|
||||
--source include/rpl_end.inc
|
53
mysql-test/suite/compat/maxdb/type_timestamp.result
Normal file
53
mysql-test/suite/compat/maxdb/type_timestamp.result
Normal file
@ -0,0 +1,53 @@
|
||||
#
|
||||
# MDEV-19632 Replication aborts with ER_SLAVE_CONVERSION_FAILED upon CREATE ... SELECT in ORACLE mode
|
||||
#
|
||||
SET sql_mode=DEFAULT;
|
||||
CREATE TABLE t1 (
|
||||
def_timestamp TIMESTAMP,
|
||||
mdb_timestamp mariadb_schema.TIMESTAMP,
|
||||
ora_timestamp oracle_schema.TIMESTAMP,
|
||||
max_timestamp maxdb_schema.TIMESTAMP
|
||||
);
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`def_timestamp` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
|
||||
`mdb_timestamp` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
|
||||
`ora_timestamp` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
|
||||
`max_timestamp` datetime DEFAULT NULL
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
SET sql_mode=MAXDB;
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE "t1" (
|
||||
"def_timestamp" mariadb_schema.timestamp NOT NULL DEFAULT current_timestamp(),
|
||||
"mdb_timestamp" mariadb_schema.timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
|
||||
"ora_timestamp" mariadb_schema.timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
|
||||
"max_timestamp" datetime DEFAULT NULL
|
||||
)
|
||||
DROP TABLE t1;
|
||||
SET sql_mode=MAXDB;
|
||||
CREATE TABLE t1 (
|
||||
def_timestamp TIMESTAMP,
|
||||
mdb_timestamp mariadb_schema.TIMESTAMP,
|
||||
ora_timestamp oracle_schema.TIMESTAMP,
|
||||
max_timestamp maxdb_schema.TIMESTAMP
|
||||
);
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE "t1" (
|
||||
"def_timestamp" datetime DEFAULT NULL,
|
||||
"mdb_timestamp" mariadb_schema.timestamp NOT NULL DEFAULT current_timestamp(),
|
||||
"ora_timestamp" mariadb_schema.timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
|
||||
"max_timestamp" datetime DEFAULT NULL
|
||||
)
|
||||
SET sql_mode=DEFAULT;
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`def_timestamp` datetime DEFAULT NULL,
|
||||
`mdb_timestamp` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
|
||||
`ora_timestamp` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
|
||||
`max_timestamp` datetime DEFAULT NULL
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
DROP TABLE t1;
|
29
mysql-test/suite/compat/maxdb/type_timestamp.test
Normal file
29
mysql-test/suite/compat/maxdb/type_timestamp.test
Normal file
@ -0,0 +1,29 @@
|
||||
--echo #
|
||||
--echo # MDEV-19632 Replication aborts with ER_SLAVE_CONVERSION_FAILED upon CREATE ... SELECT in ORACLE mode
|
||||
--echo #
|
||||
|
||||
|
||||
SET sql_mode=DEFAULT;
|
||||
CREATE TABLE t1 (
|
||||
def_timestamp TIMESTAMP,
|
||||
mdb_timestamp mariadb_schema.TIMESTAMP,
|
||||
ora_timestamp oracle_schema.TIMESTAMP,
|
||||
max_timestamp maxdb_schema.TIMESTAMP
|
||||
);
|
||||
SHOW CREATE TABLE t1;
|
||||
SET sql_mode=MAXDB;
|
||||
SHOW CREATE TABLE t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
|
||||
SET sql_mode=MAXDB;
|
||||
CREATE TABLE t1 (
|
||||
def_timestamp TIMESTAMP,
|
||||
mdb_timestamp mariadb_schema.TIMESTAMP,
|
||||
ora_timestamp oracle_schema.TIMESTAMP,
|
||||
max_timestamp maxdb_schema.TIMESTAMP
|
||||
);
|
||||
SHOW CREATE TABLE t1;
|
||||
SET sql_mode=DEFAULT;
|
||||
SHOW CREATE TABLE t1;
|
||||
DROP TABLE t1;
|
86
mysql-test/suite/compat/oracle/r/rpl_mariadb_date.result
Normal file
86
mysql-test/suite/compat/oracle/r/rpl_mariadb_date.result
Normal file
@ -0,0 +1,86 @@
|
||||
include/master-slave.inc
|
||||
[connection master]
|
||||
SET SQL_MODE=DEFAULT;
|
||||
CREATE TABLE t1 (a DATE);
|
||||
INSERT INTO t1 VALUES (NULL);
|
||||
INSERT INTO t1 VALUES ('2001-01-01');
|
||||
SET SQL_MODE= ORACLE;
|
||||
CREATE TABLE t2 SELECT * FROM t1;
|
||||
include/show_binlog_events.inc
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000001 # Gtid # # GTID #-#-#
|
||||
master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a DATE)
|
||||
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
|
||||
master-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES (NULL)
|
||||
master-bin.000001 # Table_map # # table_id: # (test.t1)
|
||||
master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Query # # COMMIT
|
||||
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
|
||||
master-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES ('2001-01-01')
|
||||
master-bin.000001 # Table_map # # table_id: # (test.t1)
|
||||
master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Query # # COMMIT
|
||||
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
|
||||
master-bin.000001 # Query # # use `test`; CREATE TABLE "t2" (
|
||||
"a" mariadb_schema.date DEFAULT NULL
|
||||
)
|
||||
master-bin.000001 # Annotate_rows # # CREATE TABLE t2 SELECT * FROM t1
|
||||
master-bin.000001 # Table_map # # table_id: # (test.t2)
|
||||
master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Query # # COMMIT
|
||||
SET SQL_MODE= DEFAULT;
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` date DEFAULT NULL
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
SHOW CREATE TABLE t2;
|
||||
Table Create Table
|
||||
t2 CREATE TABLE `t2` (
|
||||
`a` date DEFAULT NULL
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
SET SQL_MODE= ORACLE;
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE "t1" (
|
||||
"a" mariadb_schema.date DEFAULT NULL
|
||||
)
|
||||
SHOW CREATE TABLE t2;
|
||||
Table Create Table
|
||||
t2 CREATE TABLE "t2" (
|
||||
"a" mariadb_schema.date DEFAULT NULL
|
||||
)
|
||||
connection slave;
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
NULL
|
||||
2001-01-01
|
||||
SELECT * FROM t2;
|
||||
a
|
||||
NULL
|
||||
2001-01-01
|
||||
SET SQL_MODE= DEFAULT;
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` date DEFAULT NULL
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
SHOW CREATE TABLE t2;
|
||||
Table Create Table
|
||||
t2 CREATE TABLE `t2` (
|
||||
`a` date DEFAULT NULL
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
SET SQL_MODE= ORACLE;
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE "t1" (
|
||||
"a" mariadb_schema.date DEFAULT NULL
|
||||
)
|
||||
SHOW CREATE TABLE t2;
|
||||
Table Create Table
|
||||
t2 CREATE TABLE "t2" (
|
||||
"a" mariadb_schema.date DEFAULT NULL
|
||||
)
|
||||
connection master;
|
||||
DROP TABLE t1, t2;
|
||||
include/rpl_end.inc
|
@ -6,3 +6,153 @@ t1 CREATE TABLE "t1" (
|
||||
"a" datetime DEFAULT NULL
|
||||
)
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-19632 Replication aborts with ER_SLAVE_CONVERSION_FAILED upon CREATE ... SELECT in ORACLE mode
|
||||
#
|
||||
SET sql_mode=DEFAULT;
|
||||
CREATE TABLE t1 (a unknown.DATE);
|
||||
ERROR HY000: Unknown data type: 'unknown.date'
|
||||
SET sql_mode=DEFAULT;
|
||||
CREATE TABLE t1 (
|
||||
def_date DATE,
|
||||
mdb_date mariadb_schema.DATE,
|
||||
ora_date oracle_schema.DATE,
|
||||
max_date maxdb_schema.DATE
|
||||
);
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`def_date` date DEFAULT NULL,
|
||||
`mdb_date` date DEFAULT NULL,
|
||||
`ora_date` datetime DEFAULT NULL,
|
||||
`max_date` date DEFAULT NULL
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
SET sql_mode=ORACLE;
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE "t1" (
|
||||
"def_date" mariadb_schema.date DEFAULT NULL,
|
||||
"mdb_date" mariadb_schema.date DEFAULT NULL,
|
||||
"ora_date" datetime DEFAULT NULL,
|
||||
"max_date" mariadb_schema.date DEFAULT NULL
|
||||
)
|
||||
DROP TABLE t1;
|
||||
SET sql_mode=ORACLE;
|
||||
CREATE TABLE t1 (
|
||||
def_date DATE,
|
||||
mdb_date mariadb_schema.DATE,
|
||||
ora_date oracle_schema.DATE,
|
||||
max_date maxdb_schema.DATE
|
||||
);
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE "t1" (
|
||||
"def_date" datetime DEFAULT NULL,
|
||||
"mdb_date" mariadb_schema.date DEFAULT NULL,
|
||||
"ora_date" datetime DEFAULT NULL,
|
||||
"max_date" mariadb_schema.date DEFAULT NULL
|
||||
)
|
||||
SET sql_mode=DEFAULT;
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`def_date` datetime DEFAULT NULL,
|
||||
`mdb_date` date DEFAULT NULL,
|
||||
`ora_date` datetime DEFAULT NULL,
|
||||
`max_date` date DEFAULT NULL
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# ALTER..MODIFY and ALTER..CHANGE understand qualifiers
|
||||
#
|
||||
SET sql_mode=DEFAULT;
|
||||
CREATE TABLE t1 (a DATE);
|
||||
INSERT INTO t1 VALUES ('2001-01-01');
|
||||
SET sql_mode=ORACLE;
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE "t1" (
|
||||
"a" mariadb_schema.date DEFAULT NULL
|
||||
)
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
2001-01-01
|
||||
ALTER TABLE t1 MODIFY a DATE;
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE "t1" (
|
||||
"a" datetime DEFAULT NULL
|
||||
)
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
2001-01-01 00:00:00
|
||||
ALTER TABLE t1 MODIFY a mariadb_schema.DATE;
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE "t1" (
|
||||
"a" mariadb_schema.date DEFAULT NULL
|
||||
)
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
2001-01-01
|
||||
ALTER TABLE t1 MODIFY a oracle_schema.DATE;
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE "t1" (
|
||||
"a" datetime DEFAULT NULL
|
||||
)
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
2001-01-01 00:00:00
|
||||
ALTER TABLE t1 CHANGE a b mariadb_schema.DATE;
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE "t1" (
|
||||
"b" mariadb_schema.date DEFAULT NULL
|
||||
)
|
||||
SELECT * FROM t1;
|
||||
b
|
||||
2001-01-01
|
||||
ALTER TABLE t1 CHANGE b a oracle_schema.DATE;
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE "t1" (
|
||||
"a" datetime DEFAULT NULL
|
||||
)
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
2001-01-01 00:00:00
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# Qualified syntax is not supported yet in SP
|
||||
# See MDEV-23353 Qualified data types in SP
|
||||
#
|
||||
SET sql_mode=ORACLE;
|
||||
CREATE FUNCTION f1() RETURN mariadb_schema.DATE AS
|
||||
BEGIN
|
||||
RETURN CURRENT_DATE;
|
||||
END;
|
||||
$$
|
||||
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'mariadb_schema.DATE AS
|
||||
BEGIN
|
||||
RETURN CURRENT_DATE;
|
||||
END' at line 1
|
||||
CREATE PROCEDURE p1(a mariadb_schema.DATE) AS
|
||||
BEGIN
|
||||
NULL;
|
||||
END;
|
||||
$$
|
||||
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ') AS
|
||||
BEGIN
|
||||
NULL;
|
||||
END' at line 1
|
||||
CREATE PROCEDURE p1() AS
|
||||
a mariadb_schema.DATE;
|
||||
BEGIN
|
||||
NULL;
|
||||
END;
|
||||
$$
|
||||
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ';
|
||||
BEGIN
|
||||
NULL;
|
||||
END' at line 2
|
||||
|
38
mysql-test/suite/compat/oracle/t/rpl_mariadb_date.test
Normal file
38
mysql-test/suite/compat/oracle/t/rpl_mariadb_date.test
Normal file
@ -0,0 +1,38 @@
|
||||
--source include/have_binlog_format_row.inc
|
||||
--source include/master-slave.inc
|
||||
|
||||
SET SQL_MODE=DEFAULT;
|
||||
CREATE TABLE t1 (a DATE);
|
||||
INSERT INTO t1 VALUES (NULL);
|
||||
INSERT INTO t1 VALUES ('2001-01-01');
|
||||
|
||||
SET SQL_MODE= ORACLE;
|
||||
CREATE TABLE t2 SELECT * FROM t1;
|
||||
|
||||
--let $binlog_file = LAST
|
||||
source include/show_binlog_events.inc;
|
||||
|
||||
SET SQL_MODE= DEFAULT;
|
||||
SHOW CREATE TABLE t1;
|
||||
SHOW CREATE TABLE t2;
|
||||
|
||||
SET SQL_MODE= ORACLE;
|
||||
SHOW CREATE TABLE t1;
|
||||
SHOW CREATE TABLE t2;
|
||||
|
||||
--sync_slave_with_master
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t2;
|
||||
|
||||
SET SQL_MODE= DEFAULT;
|
||||
SHOW CREATE TABLE t1;
|
||||
SHOW CREATE TABLE t2;
|
||||
|
||||
SET SQL_MODE= ORACLE;
|
||||
SHOW CREATE TABLE t1;
|
||||
SHOW CREATE TABLE t2;
|
||||
|
||||
# Cleanup
|
||||
--connection master
|
||||
DROP TABLE t1, t2;
|
||||
--source include/rpl_end.inc
|
@ -2,3 +2,102 @@ SET sql_mode=ORACLE;
|
||||
CREATE TABLE t1 (a DATE);
|
||||
SHOW CREATE TABLE t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-19632 Replication aborts with ER_SLAVE_CONVERSION_FAILED upon CREATE ... SELECT in ORACLE mode
|
||||
--echo #
|
||||
|
||||
SET sql_mode=DEFAULT;
|
||||
--error ER_UNKNOWN_ERROR
|
||||
CREATE TABLE t1 (a unknown.DATE);
|
||||
|
||||
|
||||
SET sql_mode=DEFAULT;
|
||||
CREATE TABLE t1 (
|
||||
def_date DATE,
|
||||
mdb_date mariadb_schema.DATE,
|
||||
ora_date oracle_schema.DATE,
|
||||
max_date maxdb_schema.DATE
|
||||
);
|
||||
SHOW CREATE TABLE t1;
|
||||
SET sql_mode=ORACLE;
|
||||
SHOW CREATE TABLE t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
|
||||
SET sql_mode=ORACLE;
|
||||
CREATE TABLE t1 (
|
||||
def_date DATE,
|
||||
mdb_date mariadb_schema.DATE,
|
||||
ora_date oracle_schema.DATE,
|
||||
max_date maxdb_schema.DATE
|
||||
);
|
||||
SHOW CREATE TABLE t1;
|
||||
SET sql_mode=DEFAULT;
|
||||
SHOW CREATE TABLE t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # ALTER..MODIFY and ALTER..CHANGE understand qualifiers
|
||||
--echo #
|
||||
|
||||
SET sql_mode=DEFAULT;
|
||||
CREATE TABLE t1 (a DATE);
|
||||
INSERT INTO t1 VALUES ('2001-01-01');
|
||||
SET sql_mode=ORACLE;
|
||||
SHOW CREATE TABLE t1;
|
||||
SELECT * FROM t1;
|
||||
|
||||
ALTER TABLE t1 MODIFY a DATE;
|
||||
SHOW CREATE TABLE t1;
|
||||
SELECT * FROM t1;
|
||||
|
||||
ALTER TABLE t1 MODIFY a mariadb_schema.DATE;
|
||||
SHOW CREATE TABLE t1;
|
||||
SELECT * FROM t1;
|
||||
|
||||
ALTER TABLE t1 MODIFY a oracle_schema.DATE;
|
||||
SHOW CREATE TABLE t1;
|
||||
SELECT * FROM t1;
|
||||
|
||||
ALTER TABLE t1 CHANGE a b mariadb_schema.DATE;
|
||||
SHOW CREATE TABLE t1;
|
||||
SELECT * FROM t1;
|
||||
|
||||
ALTER TABLE t1 CHANGE b a oracle_schema.DATE;
|
||||
SHOW CREATE TABLE t1;
|
||||
SELECT * FROM t1;
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # Qualified syntax is not supported yet in SP
|
||||
--echo # See MDEV-23353 Qualified data types in SP
|
||||
--echo #
|
||||
|
||||
SET sql_mode=ORACLE;
|
||||
DELIMITER $$;
|
||||
# Change to this when merging to 10.5:
|
||||
#--error ER_UNKNOWN_DATA_TYPE
|
||||
--error ER_PARSE_ERROR
|
||||
CREATE FUNCTION f1() RETURN mariadb_schema.DATE AS
|
||||
BEGIN
|
||||
RETURN CURRENT_DATE;
|
||||
END;
|
||||
$$
|
||||
--error ER_PARSE_ERROR
|
||||
CREATE PROCEDURE p1(a mariadb_schema.DATE) AS
|
||||
BEGIN
|
||||
NULL;
|
||||
END;
|
||||
$$
|
||||
--error ER_PARSE_ERROR
|
||||
CREATE PROCEDURE p1() AS
|
||||
a mariadb_schema.DATE;
|
||||
BEGIN
|
||||
NULL;
|
||||
END;
|
||||
$$
|
||||
DELIMITER ;$$
|
||||
|
@ -1896,9 +1896,13 @@ Warnings:
|
||||
Warning 1264 Out of range value for column 'c1' at row 3
|
||||
INSERT IGNORE INTO t5 VALUES('1e+52','-1e+52','1e+52',5),('1e-52','-1e-52','1e-52',6);
|
||||
Warnings:
|
||||
Warning 1264 Out of range value for column 'c1' at row 1
|
||||
Warning 1264 Out of range value for column 'c2' at row 1
|
||||
Warning 1264 Out of range value for column 'c3' at row 1
|
||||
INSERT IGNORE INTO t5 VALUES('1e+53','-1e+53','1e+53',7),('1e-53','-1e-53','1e-53',8);
|
||||
Warnings:
|
||||
Warning 1264 Out of range value for column 'c1' at row 1
|
||||
Warning 1264 Out of range value for column 'c2' at row 1
|
||||
Warning 1264 Out of range value for column 'c3' at row 1
|
||||
SELECT * FROM t5;
|
||||
c1 c2 c3 c4
|
||||
|
@ -46,3 +46,4 @@ partition : MDEV-19958 Galera test failure on galera.partition
|
||||
query_cache: MDEV-15805 Test failure on galera.query_cache
|
||||
sql_log_bin : MDEV-21491 galera.sql_log_bin
|
||||
versioning_trx_id : MDEV-18590 galera.versioning_trx_id
|
||||
MW-328A : MDEV-22666?
|
||||
|
@ -33,6 +33,7 @@ a
|
||||
20
|
||||
UPDATE t SET a=3 WHERE a=1;
|
||||
# restart: --innodb-read-only
|
||||
SET GLOBAL innodb_status_output= @@GLOBAL.innodb_status_output;
|
||||
# Starting with MariaDB 10.2, innodb_read_only implies READ UNCOMMITTED.
|
||||
# In earlier versions, this would return the last committed version
|
||||
# (only a=3; no record for a=20)!
|
||||
|
@ -59,6 +59,7 @@ SELECT * FROM t;
|
||||
UPDATE t SET a=3 WHERE a=1;
|
||||
--let $restart_parameters= --innodb-read-only
|
||||
--source include/restart_mysqld.inc
|
||||
SET GLOBAL innodb_status_output= @@GLOBAL.innodb_status_output;
|
||||
--echo # Starting with MariaDB 10.2, innodb_read_only implies READ UNCOMMITTED.
|
||||
--echo # In earlier versions, this would return the last committed version
|
||||
--echo # (only a=3; no record for a=20)!
|
||||
|
15
mysql-test/suite/maria/encrypt-no-key.result
Normal file
15
mysql-test/suite/maria/encrypt-no-key.result
Normal file
@ -0,0 +1,15 @@
|
||||
call mtr.add_suppression('Unknown key id 1. Can''t continue');
|
||||
set global aria_encrypt_tables= 1;
|
||||
create table t1 (pk int primary key, a int, key(a)) engine=aria transactional=1;
|
||||
alter table t1 disable keys;
|
||||
insert into t1 values (1,1);
|
||||
alter table t1 enable keys;
|
||||
ERROR HY000: Unknown key id 1. Can't continue!
|
||||
repair table t1 use_frm;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 repair warning Number of rows changed from 0 to 1
|
||||
test.t1 repair Error Unknown key id 1. Can't continue!
|
||||
test.t1 repair Error Unknown key id 1. Can't continue!
|
||||
test.t1 repair status OK
|
||||
drop table t1;
|
||||
set global aria_encrypt_tables= default;
|
14
mysql-test/suite/maria/encrypt-no-key.test
Normal file
14
mysql-test/suite/maria/encrypt-no-key.test
Normal file
@ -0,0 +1,14 @@
|
||||
#
|
||||
# MDEV-18496 Crash when Aria encryption is enabled but plugin not available
|
||||
#
|
||||
call mtr.add_suppression('Unknown key id 1. Can''t continue');
|
||||
|
||||
set global aria_encrypt_tables= 1;
|
||||
create table t1 (pk int primary key, a int, key(a)) engine=aria transactional=1;
|
||||
alter table t1 disable keys;
|
||||
insert into t1 values (1,1);
|
||||
error 192;
|
||||
alter table t1 enable keys;
|
||||
repair table t1 use_frm;
|
||||
drop table t1;
|
||||
set global aria_encrypt_tables= default;
|
@ -1,3 +1,4 @@
|
||||
FLUSH TABLES;
|
||||
#
|
||||
# Bug#13737949: CRASH IN HA_PARTITION::INDEX_INIT
|
||||
# Bug#18694052: SERVER CRASH IN HA_PARTITION::INIT_RECORD_PRIORITY_QUEUE
|
||||
|
@ -8,6 +8,10 @@
|
||||
# Crash tests don't work with embedded
|
||||
--source include/not_embedded.inc
|
||||
|
||||
# Make sure system tables are not open, as the test will kill the server
|
||||
# and it will cause corruption errors in the log
|
||||
FLUSH TABLES;
|
||||
|
||||
# Partitioning test that require debug features
|
||||
|
||||
--echo #
|
||||
|
5
mysql-test/suite/roles/drop_current_role.result
Normal file
5
mysql-test/suite/roles/drop_current_role.result
Normal file
@ -0,0 +1,5 @@
|
||||
create role r;
|
||||
set role r;
|
||||
drop role r;
|
||||
revoke all on *.* from current_role;
|
||||
ERROR OP000: Invalid role specification `r`
|
9
mysql-test/suite/roles/drop_current_role.test
Normal file
9
mysql-test/suite/roles/drop_current_role.test
Normal file
@ -0,0 +1,9 @@
|
||||
--source include/not_embedded.inc
|
||||
#
|
||||
# MDEV-22521 Server crashes in traverse_role_graph_up or Assertion `user' fails in traverse_role_graph_impl
|
||||
#
|
||||
create role r;
|
||||
set role r;
|
||||
drop role r;
|
||||
error ER_INVALID_ROLE;
|
||||
revoke all on *.* from current_role;
|
@ -51,9 +51,9 @@ INSERT into t1(name, salary, income_tax) values('Record_2', 501, 501*2.5/1000);
|
||||
INSERT into t1(name, salary, income_tax) values('Record_3', 210, 210*2.5/1000);
|
||||
SELECT * from t1;
|
||||
id name salary income_tax
|
||||
1 Record_1 100011 250.027
|
||||
2 Record_2 501 1.2525
|
||||
3 Record_3 210 0.525
|
||||
1 Record_1 100011 250.03
|
||||
2 Record_2 501 1.25
|
||||
3 Record_3 210 0.53
|
||||
connect test_con2, localhost, root,,;
|
||||
connection test_con2;
|
||||
## Verifying session & global value of variable ##
|
||||
@ -69,11 +69,11 @@ INSERT into t1(name, salary, income_tax) values('Record_5', 501, 501*2.5/1000);
|
||||
INSERT into t1(name, salary, income_tax) values('Record_6', 210, 210*2.5/1000);
|
||||
SELECT * from t1;
|
||||
id name salary income_tax
|
||||
1 Record_1 100011 250.027
|
||||
2 Record_2 501 1.2525
|
||||
3 Record_3 210 0.525
|
||||
4 Record_4 100011 250.027
|
||||
5 Record_5 501 1.2525
|
||||
1 Record_1 100011 250.03
|
||||
2 Record_2 501 1.25
|
||||
3 Record_3 210 0.53
|
||||
4 Record_4 100011 250.028
|
||||
5 Record_5 501 1.253
|
||||
6 Record_6 210 0.525
|
||||
## Dropping table t1 ##
|
||||
drop table t1;
|
||||
|
@ -29,7 +29,7 @@ set time_zone='+1:00';
|
||||
flush tables;
|
||||
select * from t1;
|
||||
a b v
|
||||
1 2 0.3333333330000000000
|
||||
1 2 0.3333000000000000000
|
||||
select * from t8;
|
||||
a b v
|
||||
1234567890 2 2009-02-14 00:31:30
|
||||
|
@ -34,19 +34,20 @@
|
||||
#include <execinfo.h>
|
||||
#endif
|
||||
|
||||
#ifdef __linux__
|
||||
#define PTR_SANE(p) ((p) && (char*)(p) >= heap_start && (char*)(p) <= heap_end)
|
||||
|
||||
static char *heap_start;
|
||||
|
||||
#if(defined HAVE_BSS_START) && !(defined __linux__)
|
||||
extern char *__bss_start;
|
||||
#endif
|
||||
#else
|
||||
#define PTR_SANE(p) (p)
|
||||
#endif /* __linux */
|
||||
|
||||
|
||||
void my_init_stacktrace()
|
||||
{
|
||||
#if(defined HAVE_BSS_START) && !(defined __linux__)
|
||||
#ifdef __linux__
|
||||
heap_start = (char*) &__bss_start;
|
||||
#endif
|
||||
#endif /* __linux */
|
||||
}
|
||||
|
||||
#ifdef __linux__
|
||||
@ -149,15 +150,15 @@ static int safe_print_str(const char *addr, size_t max_len)
|
||||
|
||||
int my_safe_print_str(const char* val, size_t max_len)
|
||||
{
|
||||
#ifdef __linux__
|
||||
char *heap_end;
|
||||
|
||||
#ifdef __linux__
|
||||
// Try and make use of /proc filesystem to safely print memory contents.
|
||||
if (!safe_print_str(val, max_len))
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
heap_end= (char*) sbrk(0);
|
||||
#endif
|
||||
|
||||
if (!PTR_SANE(val))
|
||||
{
|
||||
|
@ -67,6 +67,7 @@ Usage: $0 [OPTIONS]
|
||||
--cross-bootstrap For internal use. Used when building the MariaDB system
|
||||
tables on a different host than the target.
|
||||
--datadir=path The path to the MariaDB data directory.
|
||||
--no-defaults Don't read default options from any option file.
|
||||
--defaults-extra-file=name
|
||||
Read this file after the global files are read.
|
||||
--defaults-file=name Only read default options from the given file name.
|
||||
@ -79,8 +80,6 @@ Usage: $0 [OPTIONS]
|
||||
--help Display this help and exit.
|
||||
--ldata=path The path to the MariaDB data directory. Same as
|
||||
--datadir.
|
||||
--no-defaults Don't read default options from any option file.
|
||||
--defaults-file=path Read only this configuration file.
|
||||
--rpm For internal use. This option is used by RPM files
|
||||
during the MariaDB installation process.
|
||||
--skip-name-resolve Use IP addresses rather than hostnames when creating
|
||||
|
@ -132,6 +132,7 @@ SET (SQL_SOURCE
|
||||
rpl_gtid.cc rpl_parallel.cc
|
||||
semisync.cc semisync_master.cc semisync_slave.cc
|
||||
semisync_master_ack_receiver.cc
|
||||
sql_schema.cc
|
||||
sql_type.cc sql_mode.cc sql_type_json.cc
|
||||
item_windowfunc.cc sql_window.cc
|
||||
sql_cte.cc
|
||||
|
@ -305,7 +305,7 @@ uint my_datetime_binary_length(uint dec)
|
||||
|
||||
/*
|
||||
On disk we store as unsigned number with DATETIMEF_INT_OFS offset,
|
||||
for HA_KETYPE_BINARY compatibilty purposes.
|
||||
for HA_KETYPE_BINARY compatibility purposes.
|
||||
*/
|
||||
#define DATETIMEF_INT_OFS 0x8000000000LL
|
||||
|
||||
|
@ -33,7 +33,7 @@
|
||||
/*
|
||||
Action to perform at a synchronization point.
|
||||
NOTE: This structure is moved around in memory by realloc(), qsort(),
|
||||
and memmove(). Do not add objects with non-trivial constuctors
|
||||
and memmove(). Do not add objects with non-trivial constructors
|
||||
or destructors, which might prevent moving of this structure
|
||||
with these functions.
|
||||
*/
|
||||
@ -542,7 +542,7 @@ static void debug_sync_reset(THD *thd)
|
||||
@description
|
||||
Removing an action mainly means to decrement the ds_active counter.
|
||||
But if the action is between other active action in the array, then
|
||||
the array needs to be shrinked. The active actions above the one to
|
||||
the array needs to be shrunk. The active actions above the one to
|
||||
be removed have to be moved down by one slot.
|
||||
*/
|
||||
|
||||
|
@ -236,7 +236,7 @@ static File open_error_msg_file(const char *file_name, const char *language,
|
||||
MYF(0))) < 0)
|
||||
{
|
||||
/*
|
||||
Trying pre-5.4 sematics of the --language parameter.
|
||||
Trying pre-5.4 semantics of the --language parameter.
|
||||
It included the language-specific part, e.g.:
|
||||
--language=/path/to/english/
|
||||
*/
|
||||
|
@ -78,8 +78,8 @@ int initialize_encryption_plugin(st_plugin_int *plugin)
|
||||
(struct st_mariadb_encryption*) plugin->plugin->info;
|
||||
|
||||
/*
|
||||
Copmiler on Spark doesn't like the '?' operator here as it
|
||||
belives the (uint (*)...) implies the C++ call model.
|
||||
Compiler on Spark doesn't like the '?' operator here as it
|
||||
believes the (uint (*)...) implies the C++ call model.
|
||||
*/
|
||||
if (handle->crypt_ctx_size)
|
||||
encryption_handler.encryption_ctx_size_func= handle->crypt_ctx_size;
|
||||
|
@ -161,7 +161,7 @@ Event_creation_ctx::load_from_db(THD *thd,
|
||||
/*************************************************************************/
|
||||
|
||||
/*
|
||||
Initiliazes dbname and name of an Event_queue_element_for_exec
|
||||
Initializes dbname and name of an Event_queue_element_for_exec
|
||||
object
|
||||
|
||||
SYNOPSIS
|
||||
|
@ -672,7 +672,7 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data,
|
||||
DBUG_PRINT("info", ("name: %.*s", (int) parse_data->name.length,
|
||||
parse_data->name.str));
|
||||
|
||||
DBUG_PRINT("info", ("check existance of an event with the same name"));
|
||||
DBUG_PRINT("info", ("check existence of an event with the same name"));
|
||||
if (!find_named_event(&parse_data->dbname, &parse_data->name, table))
|
||||
{
|
||||
if (thd->lex->create_info.or_replace())
|
||||
|
@ -97,7 +97,7 @@ Event_parse_data::init_name(THD *thd, sp_name *spn)
|
||||
ENDS or AT is in the past, we are trying to create an event that
|
||||
will never be executed. If it has ON COMPLETION NOT PRESERVE
|
||||
(default), then it would normally be dropped already, so on CREATE
|
||||
EVENT we give a warning, and do not create anyting. On ALTER EVENT
|
||||
EVENT we give a warning, and do not create anything. On ALTER EVENT
|
||||
we give a error, and do not change the event.
|
||||
|
||||
If the event has ON COMPLETION PRESERVE, then we see if the event is
|
||||
@ -362,7 +362,7 @@ wrong_value:
|
||||
EVERY 5 MINUTE STARTS "2004-12-12 10:00:00" means that
|
||||
the event will be executed every 5 minutes but this will
|
||||
start at the date shown above. Expressions are possible :
|
||||
DATE_ADD(NOW(), INTERVAL 1 DAY) -- start tommorow at
|
||||
DATE_ADD(NOW(), INTERVAL 1 DAY) -- start tomorrow at
|
||||
same time.
|
||||
|
||||
RETURN VALUE
|
||||
@ -417,7 +417,7 @@ wrong_value:
|
||||
EVERY 5 MINUTE ENDS "2004-12-12 10:00:00" means that
|
||||
the event will be executed every 5 minutes but this will
|
||||
end at the date shown above. Expressions are possible :
|
||||
DATE_ADD(NOW(), INTERVAL 1 DAY) -- end tommorow at
|
||||
DATE_ADD(NOW(), INTERVAL 1 DAY) -- end tomorrow at
|
||||
same time.
|
||||
|
||||
RETURN VALUE
|
||||
|
@ -360,7 +360,7 @@ Event_queue::drop_matching_events(THD *thd, const LEX_CSTRING *pattern,
|
||||
We don't call mysql_cond_broadcast(&COND_queue_state);
|
||||
If we remove the top event:
|
||||
1. The queue is empty. The scheduler will wake up at some time and
|
||||
realize that the queue is empty. If create_event() comes inbetween
|
||||
realize that the queue is empty. If create_event() comes in between
|
||||
it will signal the scheduler
|
||||
2. The queue is not empty, but the next event after the previous top,
|
||||
won't be executed any time sooner than the element we removed. Hence,
|
||||
|
@ -129,7 +129,7 @@ bool Events::check_if_system_tables_error()
|
||||
|
||||
/**
|
||||
Reconstructs interval expression from interval type and expression
|
||||
value that is in form of a value of the smalles entity:
|
||||
value that is in form of a value of the smallest entity:
|
||||
For
|
||||
YEAR_MONTH - expression is in months
|
||||
DAY_MINUTE - expression is in minutes
|
||||
|
118
sql/field.cc
118
sql/field.cc
@ -42,7 +42,7 @@
|
||||
#define MAX_EXPONENT 1024
|
||||
|
||||
/*****************************************************************************
|
||||
Instansiate templates and static variables
|
||||
Instantiate templates and static variables
|
||||
*****************************************************************************/
|
||||
|
||||
static const char *zero_timestamp="0000-00-00 00:00:00.000000";
|
||||
@ -88,7 +88,7 @@ inline bool Field::marked_for_write_or_computed() const
|
||||
/*
|
||||
Rules for merging different types of fields in UNION
|
||||
|
||||
NOTE: to avoid 256*256 table, gap in table types numeration is skiped
|
||||
NOTE: to avoid 256*256 table, gap in table types numeration is skipped
|
||||
following #defines describe that gap and how to canculate number of fields
|
||||
and index of field in this array.
|
||||
*/
|
||||
@ -1490,7 +1490,7 @@ Item *Field_num::get_equal_zerofill_const_item(THD *thd, const Context &ctx,
|
||||
|
||||
|
||||
/**
|
||||
Contruct warning parameters using thd->no_errors
|
||||
Construct warning parameters using thd->no_errors
|
||||
to determine whether to generate or suppress warnings.
|
||||
We can get here in a query like this:
|
||||
SELECT COUNT(@@basedir);
|
||||
@ -1538,7 +1538,7 @@ Value_source::Converter_string_to_number::check_edom_and_truncation(THD *thd,
|
||||
if (filter.want_warning_edom())
|
||||
{
|
||||
/*
|
||||
We can use err.ptr() here as ErrConvString is guranteed to put an
|
||||
We can use err.ptr() here as ErrConvString is guaranteed to put an
|
||||
end \0 here.
|
||||
*/
|
||||
THD *wthd= thd ? thd : current_thd;
|
||||
@ -1570,7 +1570,7 @@ Value_source::Converter_string_to_number::check_edom_and_truncation(THD *thd,
|
||||
- found garbage at the end of the string.
|
||||
|
||||
@param type Data type name (e.g. "decimal", "integer", "double")
|
||||
@param edom Indicates that the string-to-number routine retuned
|
||||
@param edom Indicates that the string-to-number routine returned
|
||||
an error code equivalent to EDOM (value out of domain),
|
||||
i.e. the string fully consisted of garbage and the
|
||||
conversion routine could not get any digits from it.
|
||||
@ -1633,7 +1633,7 @@ int Field_num::check_edom_and_truncation(const char *type, bool edom,
|
||||
|
||||
|
||||
/*
|
||||
Conver a string to an integer then check bounds.
|
||||
Convert a string to an integer then check bounds.
|
||||
|
||||
SYNOPSIS
|
||||
Field_num::get_int
|
||||
@ -2730,7 +2730,7 @@ int Field_decimal::store(const char *from_arg, size_t len, CHARSET_INFO *cs)
|
||||
We only have to generate warnings if count_cuted_fields is set.
|
||||
This is to avoid extra checks of the number when they are not needed.
|
||||
Even if this flag is not set, it's OK to increment warnings, if
|
||||
it makes the code easer to read.
|
||||
it makes the code easier to read.
|
||||
*/
|
||||
|
||||
if (get_thd()->count_cuted_fields > CHECK_FIELD_EXPRESSION)
|
||||
@ -2813,7 +2813,7 @@ int Field_decimal::store(const char *from_arg, size_t len, CHARSET_INFO *cs)
|
||||
}
|
||||
|
||||
/*
|
||||
Now write the formated number
|
||||
Now write the formatted number
|
||||
|
||||
First the digits of the int_% parts.
|
||||
Do we have enough room to write these digits ?
|
||||
@ -3333,7 +3333,7 @@ int Field_new_decimal::store(const char *from, size_t length,
|
||||
If check_decimal() failed because of EDOM-alike error,
|
||||
(e.g. E_DEC_BAD_NUM), we have to initialize decimal_value to zero.
|
||||
Note: if check_decimal() failed because of truncation,
|
||||
decimal_value is alreay properly initialized.
|
||||
decimal_value is already properly initialized.
|
||||
*/
|
||||
my_decimal_set_zero(&decimal_value);
|
||||
/*
|
||||
@ -4749,11 +4749,12 @@ int truncate_double(double *nr, uint field_length, uint dec,
|
||||
{
|
||||
uint order= field_length - dec;
|
||||
uint step= array_elements(log_10) - 1;
|
||||
max_value= 1.0;
|
||||
double max_value_by_dec= 1.0;
|
||||
for (; order > step; order-= step)
|
||||
max_value*= log_10[step];
|
||||
max_value*= log_10[order];
|
||||
max_value-= 1.0 / log_10[dec];
|
||||
max_value_by_dec*= log_10[step];
|
||||
max_value_by_dec*= log_10[order];
|
||||
max_value_by_dec-= 1.0 / log_10[dec];
|
||||
set_if_smaller(max_value, max_value_by_dec);
|
||||
|
||||
/* Check for infinity so we don't get NaN in calculations */
|
||||
if (!std::isinf(res))
|
||||
@ -5041,7 +5042,7 @@ Field_timestamp::Field_timestamp(uchar *ptr_arg, uint32 len_arg,
|
||||
{
|
||||
/*
|
||||
We mark the flag with TIMESTAMP_FLAG to indicate to the client that
|
||||
this field will be automaticly updated on insert.
|
||||
this field will be automatically updated on insert.
|
||||
*/
|
||||
flags|= TIMESTAMP_FLAG;
|
||||
if (unireg_check != TIMESTAMP_DN_FIELD)
|
||||
@ -7543,7 +7544,7 @@ Field_string::unpack(uchar *to, const uchar *from, const uchar *from_end,
|
||||
with the real type. Since all allowable types have 0xF as most
|
||||
significant bits of the metadata word, lengths <256 will not affect
|
||||
the real type at all, while all other values will result in a
|
||||
non-existant type in the range 17-244.
|
||||
non-existent type in the range 17-244.
|
||||
|
||||
@see Field_string::unpack
|
||||
|
||||
@ -7729,8 +7730,7 @@ void Field_varstring::mark_unused_memory_as_defined()
|
||||
#endif
|
||||
|
||||
|
||||
int Field_varstring::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
|
||||
uint max_len)
|
||||
int Field_varstring::cmp(const uchar *a_ptr, const uchar *b_ptr)
|
||||
{
|
||||
uint a_length, b_length;
|
||||
int diff;
|
||||
@ -7745,8 +7745,8 @@ int Field_varstring::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
|
||||
a_length= uint2korr(a_ptr);
|
||||
b_length= uint2korr(b_ptr);
|
||||
}
|
||||
set_if_smaller(a_length, max_len);
|
||||
set_if_smaller(b_length, max_len);
|
||||
set_if_smaller(a_length, field_length);
|
||||
set_if_smaller(b_length, field_length);
|
||||
diff= field_charset->coll->strnncollsp(field_charset,
|
||||
a_ptr+
|
||||
length_bytes,
|
||||
@ -7758,6 +7758,43 @@ int Field_varstring::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
|
||||
}
|
||||
|
||||
|
||||
static int cmp_str_prefix(const uchar *ua, size_t alen, const uchar *ub,
|
||||
size_t blen, size_t prefix, CHARSET_INFO *cs)
|
||||
{
|
||||
const char *a= (char*)ua, *b= (char*)ub;
|
||||
MY_STRCOPY_STATUS status;
|
||||
prefix/= cs->mbmaxlen;
|
||||
alen= cs->cset->well_formed_char_length(cs, a, a + alen, prefix, &status);
|
||||
blen= cs->cset->well_formed_char_length(cs, b, b + blen, prefix, &status);
|
||||
return cs->coll->strnncollsp(cs, ua, alen, ub, blen);
|
||||
}
|
||||
|
||||
|
||||
|
||||
int Field_varstring::cmp_prefix(const uchar *a_ptr, const uchar *b_ptr,
|
||||
size_t prefix_len)
|
||||
{
|
||||
/* avoid expensive well_formed_char_length if possible */
|
||||
if (prefix_len == table->field[field_index]->field_length)
|
||||
return Field_varstring::cmp(a_ptr, b_ptr);
|
||||
|
||||
size_t a_length, b_length;
|
||||
|
||||
if (length_bytes == 1)
|
||||
{
|
||||
a_length= *a_ptr;
|
||||
b_length= *b_ptr;
|
||||
}
|
||||
else
|
||||
{
|
||||
a_length= uint2korr(a_ptr);
|
||||
b_length= uint2korr(b_ptr);
|
||||
}
|
||||
return cmp_str_prefix(a_ptr+length_bytes, a_length, b_ptr+length_bytes,
|
||||
b_length, prefix_len, field_charset);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
@note
|
||||
varstring and blob keys are ALWAYS stored with a 2 byte length prefix
|
||||
@ -8262,8 +8299,7 @@ longlong Field_varstring_compressed::val_int(void)
|
||||
}
|
||||
|
||||
|
||||
int Field_varstring_compressed::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
|
||||
uint max_len)
|
||||
int Field_varstring_compressed::cmp(const uchar *a_ptr, const uchar *b_ptr)
|
||||
{
|
||||
String a, b;
|
||||
uint a_length, b_length;
|
||||
@ -8282,11 +8318,6 @@ int Field_varstring_compressed::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
|
||||
uncompress(&a, &a, a_ptr + length_bytes, a_length);
|
||||
uncompress(&b, &b, b_ptr + length_bytes, b_length);
|
||||
|
||||
if (a.length() > max_len)
|
||||
a.length(max_len);
|
||||
if (b.length() > max_len)
|
||||
b.length(max_len);
|
||||
|
||||
return sortcmp(&a, &b, field_charset);
|
||||
}
|
||||
|
||||
@ -8521,16 +8552,24 @@ int Field_blob::cmp(const uchar *a,uint32 a_length, const uchar *b,
|
||||
}
|
||||
|
||||
|
||||
int Field_blob::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
|
||||
uint max_length)
|
||||
int Field_blob::cmp(const uchar *a_ptr, const uchar *b_ptr)
|
||||
{
|
||||
uchar *blob1,*blob2;
|
||||
memcpy(&blob1, a_ptr+packlength, sizeof(char*));
|
||||
memcpy(&blob2, b_ptr+packlength, sizeof(char*));
|
||||
uint a_len= get_length(a_ptr), b_len= get_length(b_ptr);
|
||||
set_if_smaller(a_len, max_length);
|
||||
set_if_smaller(b_len, max_length);
|
||||
return Field_blob::cmp(blob1,a_len,blob2,b_len);
|
||||
size_t a_len= get_length(a_ptr), b_len= get_length(b_ptr);
|
||||
return cmp(blob1, (uint32)a_len, blob2, (uint32)b_len);
|
||||
}
|
||||
|
||||
|
||||
int Field_blob::cmp_prefix(const uchar *a_ptr, const uchar *b_ptr,
|
||||
size_t prefix_len)
|
||||
{
|
||||
uchar *blob1,*blob2;
|
||||
memcpy(&blob1, a_ptr+packlength, sizeof(char*));
|
||||
memcpy(&blob2, b_ptr+packlength, sizeof(char*));
|
||||
size_t a_len= get_length(a_ptr), b_len= get_length(b_ptr);
|
||||
return cmp_str_prefix(blob1, a_len, blob2, b_len, prefix_len, field_charset);
|
||||
}
|
||||
|
||||
|
||||
@ -9997,7 +10036,7 @@ my_decimal *Field_bit::val_decimal(my_decimal *deciaml_value)
|
||||
The a and b pointer must be pointers to the field in a record
|
||||
(not the table->record[0] necessarily)
|
||||
*/
|
||||
int Field_bit::cmp_max(const uchar *a, const uchar *b, uint max_len)
|
||||
int Field_bit::cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len)
|
||||
{
|
||||
my_ptrdiff_t a_diff= a - ptr;
|
||||
my_ptrdiff_t b_diff= b - ptr;
|
||||
@ -10481,6 +10520,19 @@ void Column_definition::set_attributes(const Lex_field_type_st &type,
|
||||
set_handler(type.type_handler());
|
||||
charset= cs;
|
||||
|
||||
#if MYSQL_VERSION_ID > 100500
|
||||
#error When merging to 10.5, please move the code below to
|
||||
#error Type_handler_timestamp_common::Column_definition_set_attributes()
|
||||
#else
|
||||
/*
|
||||
Unlike other types TIMESTAMP fields are NOT NULL by default.
|
||||
Unless --explicit-defaults-for-timestamp is given.
|
||||
*/
|
||||
if (!opt_explicit_defaults_for_timestamp &&
|
||||
type.type_handler()->field_type() == MYSQL_TYPE_TIMESTAMP)
|
||||
flags|= NOT_NULL_FLAG;
|
||||
#endif
|
||||
|
||||
if (type.length())
|
||||
{
|
||||
int err;
|
||||
|
30
sql/field.h
30
sql/field.h
@ -280,7 +280,7 @@ protected:
|
||||
};
|
||||
|
||||
|
||||
// String-to-number convertion methods for the old code compatibility
|
||||
// String-to-number conversion methods for the old code compatibility
|
||||
longlong longlong_from_string_with_check(CHARSET_INFO *cs, const char *cptr,
|
||||
const char *end) const
|
||||
{
|
||||
@ -361,7 +361,7 @@ public:
|
||||
/*
|
||||
Item context attributes.
|
||||
Comparison functions pass their attributes to propagate_equal_fields().
|
||||
For exmple, for string comparison, the collation of the comparison
|
||||
For example, for string comparison, the collation of the comparison
|
||||
operation is important inside propagate_equal_fields().
|
||||
*/
|
||||
class Context
|
||||
@ -1089,9 +1089,13 @@ public:
|
||||
return type();
|
||||
}
|
||||
inline int cmp(const uchar *str) { return cmp(ptr,str); }
|
||||
virtual int cmp_max(const uchar *a, const uchar *b, uint max_len)
|
||||
{ return cmp(a, b); }
|
||||
virtual int cmp(const uchar *,const uchar *)=0;
|
||||
/*
|
||||
The following method is used for comparing prefix keys.
|
||||
Currently it's only used in partitioning.
|
||||
*/
|
||||
virtual int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len)
|
||||
{ return cmp(a, b); }
|
||||
virtual int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0U)
|
||||
{ return memcmp(a,b,pack_length()); }
|
||||
virtual int cmp_offset(my_ptrdiff_t row_offset)
|
||||
@ -3702,11 +3706,8 @@ public:
|
||||
longlong val_int(void);
|
||||
String *val_str(String*,String *);
|
||||
my_decimal *val_decimal(my_decimal *);
|
||||
int cmp_max(const uchar *, const uchar *, uint max_length);
|
||||
int cmp(const uchar *a,const uchar *b)
|
||||
{
|
||||
return cmp_max(a, b, ~0U);
|
||||
}
|
||||
int cmp(const uchar *a,const uchar *b);
|
||||
int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len);
|
||||
void sort_string(uchar *buff,uint length);
|
||||
uint get_key_image(uchar *buff,uint length, imagetype type);
|
||||
void set_key_image(const uchar *buff,uint length);
|
||||
@ -3776,7 +3777,7 @@ private:
|
||||
{
|
||||
return (field_length - 1) / field_charset->mbmaxlen;
|
||||
}
|
||||
int cmp_max(const uchar *a_ptr, const uchar *b_ptr, uint max_len);
|
||||
int cmp(const uchar *a_ptr, const uchar *b_ptr);
|
||||
|
||||
/*
|
||||
Compressed fields can't have keys as two rows may have different
|
||||
@ -3944,9 +3945,8 @@ public:
|
||||
longlong val_int(void);
|
||||
String *val_str(String*,String *);
|
||||
my_decimal *val_decimal(my_decimal *);
|
||||
int cmp_max(const uchar *, const uchar *, uint max_length);
|
||||
int cmp(const uchar *a,const uchar *b)
|
||||
{ return cmp_max(a, b, ~0U); }
|
||||
int cmp(const uchar *a,const uchar *b);
|
||||
int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len);
|
||||
int cmp(const uchar *a, uint32 a_length, const uchar *b, uint32 b_length);
|
||||
int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0U);
|
||||
int key_cmp(const uchar *,const uchar*);
|
||||
@ -4391,7 +4391,7 @@ private:
|
||||
This is the reason:
|
||||
- Field_bit::cmp_binary() is only implemented in the base class
|
||||
(Field::cmp_binary()).
|
||||
- Field::cmp_binary() currenly use pack_length() to calculate how
|
||||
- Field::cmp_binary() currently uses pack_length() to calculate how
|
||||
long the data is.
|
||||
- pack_length() includes size of the bits stored in the NULL bytes
|
||||
of the record.
|
||||
@ -4450,7 +4450,7 @@ public:
|
||||
}
|
||||
int cmp_binary_offset(uint row_offset)
|
||||
{ return cmp_offset(row_offset); }
|
||||
int cmp_max(const uchar *a, const uchar *b, uint max_length);
|
||||
int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len);
|
||||
int key_cmp(const uchar *a, const uchar *b)
|
||||
{ return cmp_binary((uchar *) a, (uchar *) b); }
|
||||
int key_cmp(const uchar *str, uint length);
|
||||
|
@ -230,7 +230,7 @@ static void do_skip(Copy_field *copy __attribute__((unused)))
|
||||
|
||||
note: if the record we're copying from is NULL-complemetned (i.e.
|
||||
from_field->table->null_row==1), it will also have all NULLable columns to be
|
||||
set to NULLs, so we dont need to check table->null_row here.
|
||||
set to NULLs, so we don't need to check table->null_row here.
|
||||
*/
|
||||
|
||||
static void do_copy_null(Copy_field *copy)
|
||||
|
@ -850,12 +850,12 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
|
||||
}
|
||||
if (!quick_select)
|
||||
{
|
||||
(void) file->extra(HA_EXTRA_NO_CACHE); /* End cacheing of records */
|
||||
(void) file->extra(HA_EXTRA_NO_CACHE); /* End caching of records */
|
||||
if (!next_pos)
|
||||
file->ha_rnd_end();
|
||||
}
|
||||
|
||||
/* Signal we should use orignal column read and write maps */
|
||||
/* Signal we should use original column read and write maps */
|
||||
sort_form->column_bitmaps_set(save_read_set, save_write_set);
|
||||
|
||||
if (unlikely(thd->is_error()))
|
||||
|
@ -1877,7 +1877,7 @@ int Gcalc_scan_iterator::add_eq_node(Gcalc_heap::Info *node, point *sp)
|
||||
if (!en)
|
||||
GCALC_DBUG_RETURN(1);
|
||||
|
||||
/* eq_node iserted after teh equal point. */
|
||||
/* eq_node inserted after the equal point. */
|
||||
en->next= node->get_next();
|
||||
node->next= en;
|
||||
|
||||
|
@ -362,9 +362,9 @@ enum Gcalc_scan_events
|
||||
|
||||
|
||||
/*
|
||||
Gcalc_scan_iterator incapsulates the slisescan algorithm.
|
||||
It takes filled Gcalc_heap as an datasource. Then can be
|
||||
iterated trought the vertexes and intersection points with
|
||||
Gcalc_scan_iterator incapsulates the slicescan algorithm.
|
||||
It takes filled Gcalc_heap as a datasource. Then can be
|
||||
iterated through the vertexes and intersection points with
|
||||
the step() method. After the 'step()' one usually observes
|
||||
the current 'slice' to do the necessary calculations, like
|
||||
looking for intersections, calculating the area, whatever.
|
||||
|
@ -1184,14 +1184,14 @@ int Gcalc_operation_reducer::connect_threads(
|
||||
{
|
||||
rp0->outer_poly= prev_range->thread_start;
|
||||
tb->thread_start= prev_range->thread_start;
|
||||
/* Chack if needed */
|
||||
/* Check if needed */
|
||||
ta->thread_start= prev_range->thread_start;
|
||||
}
|
||||
else
|
||||
{
|
||||
rp0->outer_poly= 0;
|
||||
ta->thread_start= rp0;
|
||||
/* Chack if needed */
|
||||
/* Check if needed */
|
||||
tb->thread_start= rp0;
|
||||
}
|
||||
GCALC_DBUG_RETURN(0);
|
||||
|
@ -1480,7 +1480,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
|
||||
|
||||
|
||||
/**
|
||||
@brief Check and repair the table if neccesary
|
||||
@brief Check and repair the table if necessary
|
||||
|
||||
@param thd Thread object
|
||||
|
||||
@ -2969,7 +2969,7 @@ error:
|
||||
/**
|
||||
Read the .par file to get the partitions engines and names
|
||||
|
||||
@param name Name of table file (without extention)
|
||||
@param name Name of table file (without extension)
|
||||
|
||||
@return Operation status
|
||||
@retval true Failure
|
||||
@ -3199,7 +3199,7 @@ static uchar *get_part_name(PART_NAME_DEF *part, size_t *length,
|
||||
|
||||
@return Operation status
|
||||
@retval true Failure
|
||||
@retval false Sucess
|
||||
@retval false Success
|
||||
*/
|
||||
|
||||
bool ha_partition::insert_partition_name_in_hash(const char *name, uint part_id,
|
||||
@ -3325,7 +3325,7 @@ err:
|
||||
|
||||
@return Operation status
|
||||
@retval true Failure
|
||||
@retval false Sucess
|
||||
@retval false Success
|
||||
*/
|
||||
|
||||
bool ha_partition::set_ha_share_ref(Handler_share **ha_share_arg)
|
||||
@ -4298,7 +4298,7 @@ int ha_partition::write_row(const uchar * buf)
|
||||
/*
|
||||
If we have failed to set the auto-increment value for this row,
|
||||
it is highly likely that we will not be able to insert it into
|
||||
the correct partition. We must check and fail if neccessary.
|
||||
the correct partition. We must check and fail if necessary.
|
||||
*/
|
||||
if (unlikely(error))
|
||||
goto exit;
|
||||
@ -4369,7 +4369,7 @@ exit:
|
||||
have the previous row record in it, while new_data will have the newest
|
||||
data in it.
|
||||
Keep in mind that the server can do updates based on ordering if an
|
||||
ORDER BY clause was used. Consecutive ordering is not guarenteed.
|
||||
ORDER BY clause was used. Consecutive ordering is not guaranteed.
|
||||
|
||||
Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
|
||||
new_data is always record[0]
|
||||
@ -4502,7 +4502,7 @@ exit:
|
||||
(from either a previous rnd_xxx() or index_xxx() call).
|
||||
If you keep a pointer to the last row or can access a primary key it will
|
||||
make doing the deletion quite a bit easier.
|
||||
Keep in mind that the server does no guarentee consecutive deletions.
|
||||
Keep in mind that the server does no guarantee consecutive deletions.
|
||||
ORDER BY clauses can be used.
|
||||
|
||||
Called in sql_acl.cc and sql_udf.cc to manage internal table information.
|
||||
@ -4921,7 +4921,7 @@ int ha_partition::end_bulk_insert()
|
||||
|
||||
When scan is used we will scan one handler partition at a time.
|
||||
When preparing for rnd_pos we will init all handler partitions.
|
||||
No extra cache handling is needed when scannning is not performed.
|
||||
No extra cache handling is needed when scanning is not performed.
|
||||
|
||||
Before initialising we will call rnd_end to ensure that we clean up from
|
||||
any previous incarnation of a table scan.
|
||||
@ -8630,7 +8630,7 @@ static int end_keyread_cb(handler* h, void *unused)
|
||||
function after completing a query.
|
||||
3) It is called when deleting the QUICK_RANGE_SELECT object if the
|
||||
QUICK_RANGE_SELECT object had its own handler object. It is called
|
||||
immediatley before close of this local handler object.
|
||||
immediately before close of this local handler object.
|
||||
HA_EXTRA_KEYREAD:
|
||||
HA_EXTRA_NO_KEYREAD:
|
||||
These parameters are used to provide an optimisation hint to the handler.
|
||||
@ -8667,7 +8667,7 @@ static int end_keyread_cb(handler* h, void *unused)
|
||||
HA_EXTRA_IGNORE_DUP_KEY:
|
||||
HA_EXTRA_NO_IGNORE_DUP_KEY:
|
||||
Informs the handler to we will not stop the transaction if we get an
|
||||
duplicate key errors during insert/upate.
|
||||
duplicate key errors during insert/update.
|
||||
Always called in pair, triggered by INSERT IGNORE and other similar
|
||||
SQL constructs.
|
||||
Not used by MyISAM.
|
||||
@ -10158,7 +10158,7 @@ bool ha_partition::prepare_inplace_alter_table(TABLE *altered_table,
|
||||
|
||||
/*
|
||||
Changing to similar partitioning, only update metadata.
|
||||
Non allowed changes would be catched in prep_alter_part_table().
|
||||
Non allowed changes would be caought in prep_alter_part_table().
|
||||
*/
|
||||
if (ha_alter_info->alter_info->partition_flags == ALTER_PARTITION_INFO)
|
||||
{
|
||||
@ -10194,7 +10194,7 @@ bool ha_partition::inplace_alter_table(TABLE *altered_table,
|
||||
|
||||
/*
|
||||
Changing to similar partitioning, only update metadata.
|
||||
Non allowed changes would be catched in prep_alter_part_table().
|
||||
Non allowed changes would be caught in prep_alter_part_table().
|
||||
*/
|
||||
if (ha_alter_info->alter_info->partition_flags == ALTER_PARTITION_INFO)
|
||||
{
|
||||
@ -10242,7 +10242,7 @@ bool ha_partition::commit_inplace_alter_table(TABLE *altered_table,
|
||||
|
||||
/*
|
||||
Changing to similar partitioning, only update metadata.
|
||||
Non allowed changes would be catched in prep_alter_part_table().
|
||||
Non allowed changes would be caught in prep_alter_part_table().
|
||||
*/
|
||||
if (ha_alter_info->alter_info->partition_flags == ALTER_PARTITION_INFO)
|
||||
{
|
||||
|
@ -512,7 +512,7 @@ public:
|
||||
-------------------------------------------------------------------------
|
||||
MODULE create/delete handler object
|
||||
-------------------------------------------------------------------------
|
||||
Object create/delete methode. The normal called when a table object
|
||||
Object create/delete method. Normally called when a table object
|
||||
exists. There is also a method to create the handler object with only
|
||||
partition information. This is used from mysql_create_table when the
|
||||
table is to be created and the engine type is deduced to be the
|
||||
@ -826,7 +826,7 @@ public:
|
||||
|
||||
/**
|
||||
@breif
|
||||
Positions an index cursor to the index specified in the hanlde. Fetches the
|
||||
Positions an index cursor to the index specified in the handle. Fetches the
|
||||
row if available. If the key value is null, begin at first key of the
|
||||
index.
|
||||
*/
|
||||
@ -1124,7 +1124,7 @@ public:
|
||||
|
||||
HA_REC_NOT_IN_SEQ:
|
||||
This flag is set for handlers that cannot guarantee that the rows are
|
||||
returned accroding to incremental positions (0, 1, 2, 3...).
|
||||
returned according to incremental positions (0, 1, 2, 3...).
|
||||
This also means that rnd_next() should return HA_ERR_RECORD_DELETED
|
||||
if it finds a deleted row.
|
||||
(MyISAM (not fixed length row), HEAP, InnoDB)
|
||||
|
@ -745,7 +745,7 @@ int ha_end()
|
||||
DBUG_ENTER("ha_end");
|
||||
|
||||
/*
|
||||
This should be eventualy based on the graceful shutdown flag.
|
||||
This should be eventually based on the graceful shutdown flag.
|
||||
So if flag is equal to HA_PANIC_CLOSE, the deallocate
|
||||
the errors.
|
||||
*/
|
||||
@ -1406,8 +1406,8 @@ int ha_commit_trans(THD *thd, bool all)
|
||||
THD_TRANS *trans= all ? &thd->transaction.all : &thd->transaction.stmt;
|
||||
/*
|
||||
"real" is a nick name for a transaction for which a commit will
|
||||
make persistent changes. E.g. a 'stmt' transaction inside a 'all'
|
||||
transation is not 'real': even though it's possible to commit it,
|
||||
make persistent changes. E.g. a 'stmt' transaction inside an 'all'
|
||||
transaction is not 'real': even though it's possible to commit it,
|
||||
the changes are not durable as they might be rolled back if the
|
||||
enclosing 'all' transaction is rolled back.
|
||||
*/
|
||||
@ -2672,7 +2672,7 @@ handler *handler::clone(const char *name, MEM_ROOT *mem_root)
|
||||
|
||||
/*
|
||||
TODO: Implement a more efficient way to have more than one index open for
|
||||
the same table instance. The ha_open call is not cachable for clone.
|
||||
the same table instance. The ha_open call is not cacheable for clone.
|
||||
|
||||
This is not critical as the engines already have the table open
|
||||
and should be able to use the original instance of the table.
|
||||
@ -3529,7 +3529,7 @@ int handler::update_auto_increment()
|
||||
index_init() or rnd_init() and in any column_bitmaps_signal() call after
|
||||
this.
|
||||
|
||||
The handler is allowd to do changes to the bitmap after a index_init or
|
||||
The handler is allowed to do changes to the bitmap after a index_init or
|
||||
rnd_init() call is made as after this, MySQL will not use the bitmap
|
||||
for any program logic checking.
|
||||
*/
|
||||
@ -3592,7 +3592,7 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment,
|
||||
{ // Autoincrement at key-start
|
||||
error= ha_index_last(table->record[1]);
|
||||
/*
|
||||
MySQL implicitely assumes such method does locking (as MySQL decides to
|
||||
MySQL implicitly assumes such method does locking (as MySQL decides to
|
||||
use nr+increment without checking again with the handler, in
|
||||
handler::update_auto_increment()), so reserves to infinite.
|
||||
*/
|
||||
@ -6615,7 +6615,7 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *h,
|
||||
DBUG_ASSERT(fnc->arguments()[0]->type() == Item::FIELD_ITEM);
|
||||
t_field= static_cast<Item_field *>(fnc->arguments()[0])->field;
|
||||
uint length= (uint)fnc->arguments()[1]->val_int();
|
||||
if (t_field->cmp_max(t_field->ptr, t_field->ptr + diff, length))
|
||||
if (t_field->cmp_prefix(t_field->ptr, t_field->ptr + diff, length))
|
||||
is_same= false;
|
||||
}
|
||||
}
|
||||
|
@ -231,7 +231,7 @@ enum enum_alter_inplace_result {
|
||||
this flag must implement start_read_removal() and end_read_removal().
|
||||
The handler may return "fake" rows constructed from the key of the row
|
||||
asked for. This is used to optimize UPDATE and DELETE by reducing the
|
||||
numer of roundtrips between handler and storage engine.
|
||||
number of roundtrips between handler and storage engine.
|
||||
|
||||
Example:
|
||||
UPDATE a=1 WHERE pk IN (<keys>)
|
||||
@ -560,7 +560,7 @@ enum enum_binlog_command {
|
||||
|
||||
/* Bits in used_fields */
|
||||
#define HA_CREATE_USED_AUTO (1UL << 0)
|
||||
#define HA_CREATE_USED_RAID (1UL << 1) //RAID is no longer availble
|
||||
#define HA_CREATE_USED_RAID (1UL << 1) //RAID is no longer available
|
||||
#define HA_CREATE_USED_UNION (1UL << 2)
|
||||
#define HA_CREATE_USED_INSERT_METHOD (1UL << 3)
|
||||
#define HA_CREATE_USED_MIN_ROWS (1UL << 4)
|
||||
@ -1221,7 +1221,7 @@ struct handler_iterator {
|
||||
/*
|
||||
Pointer to buffer for the iterator to use.
|
||||
Should be allocated by function which created the iterator and
|
||||
destroied by freed by above "destroy" call
|
||||
destroyed by freed by above "destroy" call
|
||||
*/
|
||||
void *buffer;
|
||||
};
|
||||
@ -1439,7 +1439,7 @@ struct handlerton
|
||||
"cookie".
|
||||
|
||||
The flush and call of commit_checkpoint_notify_ha() need not happen
|
||||
immediately - it can be scheduled and performed asynchroneously (ie. as
|
||||
immediately - it can be scheduled and performed asynchronously (ie. as
|
||||
part of next prepare(), or sync every second, or whatever), but should
|
||||
not be postponed indefinitely. It is however also permissible to do it
|
||||
immediately, before returning from commit_checkpoint_request().
|
||||
@ -1529,7 +1529,7 @@ struct handlerton
|
||||
file extention. This is implied by the open_table_error()
|
||||
and the default discovery implementation.
|
||||
|
||||
Second element - data file extention. This is implied
|
||||
Second element - data file extension. This is implied
|
||||
assumed by REPAIR TABLE ... USE_FRM implementation.
|
||||
*/
|
||||
const char **tablefile_extensions; // by default - empty list
|
||||
@ -2223,7 +2223,7 @@ struct HA_CREATE_INFO: public Table_scope_and_contents_source_st,
|
||||
CONVERT TO CHARACTER SET DEFAULT
|
||||
to
|
||||
CONVERT TO CHARACTER SET <character-set-of-the-current-database>
|
||||
TODO: Should't we postpone resolution of DEFAULT until the
|
||||
TODO: Shouldn't we postpone resolution of DEFAULT until the
|
||||
character set of the table owner database is loaded from its db.opt?
|
||||
*/
|
||||
DBUG_ASSERT(cs);
|
||||
@ -3017,7 +3017,7 @@ public:
|
||||
ha_statistics stats;
|
||||
|
||||
/** MultiRangeRead-related members: */
|
||||
range_seq_t mrr_iter; /* Interator to traverse the range sequence */
|
||||
range_seq_t mrr_iter; /* Iterator to traverse the range sequence */
|
||||
RANGE_SEQ_IF mrr_funcs; /* Range sequence traversal functions */
|
||||
HANDLER_BUFFER *multi_range_buffer; /* MRR buffer info */
|
||||
uint ranges_in_seq; /* Total number of ranges in the traversed sequence */
|
||||
@ -4022,7 +4022,7 @@ public:
|
||||
This method offers the storage engine, the possibility to store a reference
|
||||
to a table name which is going to be used with query cache.
|
||||
The method is called each time a statement is written to the cache and can
|
||||
be used to verify if a specific statement is cachable. It also offers
|
||||
be used to verify if a specific statement is cacheable. It also offers
|
||||
the possibility to register a generic (but static) call back function which
|
||||
is called each time a statement is matched against the query cache.
|
||||
|
||||
|
12
sql/item.cc
12
sql/item.cc
@ -5011,7 +5011,7 @@ static bool mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current,
|
||||
|
||||
@note
|
||||
We have to mark all items between current_sel (including) and
|
||||
last_select (excluding) as dependend (select before last_select should
|
||||
last_select (excluding) as dependent (select before last_select should
|
||||
be marked with actual table mask used by resolved item, all other with
|
||||
OUTER_REF_TABLE_BIT) and also write dependence information to Item of
|
||||
resolved identifier.
|
||||
@ -5387,7 +5387,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
|
||||
bool upward_lookup= FALSE;
|
||||
TABLE_LIST *table_list;
|
||||
|
||||
/* Calulate the TABLE_LIST for the table */
|
||||
/* Calculate the TABLE_LIST for the table */
|
||||
table_list= (cached_table ? cached_table :
|
||||
field_found && (*from_field) != view_ref_found ?
|
||||
(*from_field)->table->pos_in_table_list : 0);
|
||||
@ -6123,7 +6123,7 @@ Item *Item_field::propagate_equal_fields(THD *thd,
|
||||
but failed to create a valid DATE literal from the given string literal.
|
||||
|
||||
Do not do constant propagation in such cases and unlink
|
||||
"this" from the found Item_equal (as this equality not usefull).
|
||||
"this" from the found Item_equal (as this equality not useful).
|
||||
*/
|
||||
item_equal= NULL;
|
||||
return this;
|
||||
@ -7844,7 +7844,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
|
||||
/*
|
||||
Due to cache, find_field_in_tables() can return field which
|
||||
doesn't belong to provided outer_context. In this case we have
|
||||
to find proper field context in order to fix field correcly.
|
||||
to find proper field context in order to fix field correctly.
|
||||
*/
|
||||
do
|
||||
{
|
||||
@ -8029,9 +8029,9 @@ Item* Item_ref::transform(THD *thd, Item_transformer transformer, uchar *arg)
|
||||
callback functions.
|
||||
|
||||
First the function applies the analyzer to the Item_ref object. Then
|
||||
if the analizer succeeeds we first applies the compile method to the
|
||||
if the analyzer succeeds we first apply the compile method to the
|
||||
object the Item_ref object is referencing. If this returns a new
|
||||
item the old item is substituted for a new one. After this the
|
||||
item the old item is substituted for a new one. After this the
|
||||
transformer is applied to the Item_ref object itself.
|
||||
The compile function is not called if the analyzer returns NULL
|
||||
in the parameter arg_p.
|
||||
|
17
sql/item.h
17
sql/item.h
@ -164,7 +164,7 @@ void dummy_error_processor(THD *thd, void *data);
|
||||
void view_error_processor(THD *thd, void *data);
|
||||
|
||||
/*
|
||||
Instances of Name_resolution_context store the information necesary for
|
||||
Instances of Name_resolution_context store the information necessary for
|
||||
name resolution of Items and other context analysis of a query made in
|
||||
fix_fields().
|
||||
|
||||
@ -344,7 +344,7 @@ public:
|
||||
Monotonicity is defined only for Item* trees that represent table
|
||||
partitioning expressions (i.e. have no subselects/user vars/PS parameters
|
||||
etc etc). An Item* tree is assumed to have the same monotonicity properties
|
||||
as its correspoinding function F:
|
||||
as its corresponding function F:
|
||||
|
||||
[signed] longlong F(field1, field2, ...) {
|
||||
put values of field_i into table record buffer;
|
||||
@ -1131,7 +1131,7 @@ public:
|
||||
/*
|
||||
real_type() is the type of base item. This is same as type() for
|
||||
most items, except Item_ref() and Item_cache_wrapper() where it
|
||||
shows the type for the underlaying item.
|
||||
shows the type for the underlying item.
|
||||
*/
|
||||
virtual enum Type real_type() const { return type(); }
|
||||
|
||||
@ -1277,7 +1277,7 @@ public:
|
||||
The caller can modify the returned String, if it's not marked
|
||||
"const" (with the String::mark_as_const() method). That means that
|
||||
if the item returns its own internal buffer (e.g. tmp_value), it
|
||||
*must* be marked "const" [1]. So normally it's preferrable to
|
||||
*must* be marked "const" [1]. So normally it's preferable to
|
||||
return the result value in the String, that was passed as an
|
||||
argument. But, for example, SUBSTR() returns a String that simply
|
||||
points into the buffer of SUBSTR()'s args[0]->val_str(). Such a
|
||||
@ -1755,7 +1755,7 @@ public:
|
||||
@param cond_ptr[OUT] Store a replacement item here if the condition
|
||||
can be simplified, e.g.:
|
||||
WHERE part1 OR part2 OR part3
|
||||
with one of the partN evalutating to SEL_TREE::ALWAYS.
|
||||
with one of the partN evaluating to SEL_TREE::ALWAYS.
|
||||
*/
|
||||
virtual SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr);
|
||||
/*
|
||||
@ -2292,8 +2292,9 @@ public:
|
||||
virtual bool is_outer_field() const { DBUG_ASSERT(is_fixed()); return FALSE; }
|
||||
|
||||
/**
|
||||
Checks if this item or any of its decendents contains a subquery. This is a
|
||||
replacement of the former Item::has_subquery() and Item::with_subselect.
|
||||
Checks if this item or any of its descendents contains a subquery.
|
||||
This is a replacement of the former Item::has_subquery() and
|
||||
Item::with_subselect.
|
||||
*/
|
||||
virtual bool with_subquery() const { DBUG_ASSERT(is_fixed()); return false; }
|
||||
|
||||
@ -6055,7 +6056,7 @@ public:
|
||||
|
||||
This is the method that updates the cached value.
|
||||
It must be explicitly called by the user of this class to store the value
|
||||
of the orginal item in the cache.
|
||||
of the original item in the cache.
|
||||
*/
|
||||
virtual void copy() = 0;
|
||||
|
||||
|
@ -192,7 +192,7 @@ bool Cached_item_field::cmp(void)
|
||||
|
||||
/*
|
||||
If value is not null and value changed (from null to not null or
|
||||
becasue of value change), then copy the new value to buffer.
|
||||
because of value change), then copy the new value to buffer.
|
||||
*/
|
||||
if (! null_value && (tmp || (tmp= (field->cmp(buff) != 0))))
|
||||
field->get_image(buff,length,field->charset());
|
||||
|
@ -1396,7 +1396,7 @@ bool Item_in_optimizer::fix_fields(THD *thd, Item **ref)
|
||||
@note
|
||||
Item_in_optimizer should work as pass-through for
|
||||
- subqueries that were processed by ALL/ANY->MIN/MAX rewrite
|
||||
- subqueries taht were originally EXISTS subqueries (and were coverted by
|
||||
- subqueries that were originally EXISTS subqueries (and were coinverted by
|
||||
the EXISTS->IN rewrite)
|
||||
|
||||
When Item_in_optimizer is not not working as a pass-through, it
|
||||
@ -1986,8 +1986,8 @@ longlong Item_func_interval::val_int()
|
||||
interval_range *range= intervals + mid;
|
||||
my_bool cmp_result;
|
||||
/*
|
||||
The values in the range intervall may have different types,
|
||||
Only do a decimal comparision of the first argument is a decimal
|
||||
The values in the range interval may have different types,
|
||||
Only do a decimal comparison if the first argument is a decimal
|
||||
and we are comparing against a decimal
|
||||
*/
|
||||
if (dec && range->type == DECIMAL_RESULT)
|
||||
@ -2619,7 +2619,7 @@ Item_func_nullif::fix_length_and_dec()
|
||||
Some examples of what NULLIF can end up with after argument
|
||||
substitution (we don't mention args[1] in some cases for simplicity):
|
||||
|
||||
1. l_expr is not an aggragate function:
|
||||
1. l_expr is not an aggregate function:
|
||||
|
||||
a. No conversion happened.
|
||||
args[0] and args[2] were not replaced to something else
|
||||
@ -2743,7 +2743,7 @@ Item_func_nullif::fix_length_and_dec()
|
||||
In this case we remember and reuse m_arg0 during EXECUTE time as args[2].
|
||||
|
||||
QQ: How to make sure that m_args0 does not point
|
||||
to something temporary which will be destoyed between PREPARE and EXECUTE.
|
||||
to something temporary which will be destroyed between PREPARE and EXECUTE.
|
||||
The condition below should probably be more strict and somehow check that:
|
||||
- change_item_tree() was called for the new args[0]
|
||||
- m_args0 is referenced from inside args[0], e.g. as a function argument,
|
||||
@ -7154,7 +7154,7 @@ Item* Item_equal::get_first(JOIN_TAB *context, Item *field_item)
|
||||
and not ot2.col.
|
||||
|
||||
eliminate_item_equal() also has code that deals with equality substitution
|
||||
in presense of SJM nests.
|
||||
in presence of SJM nests.
|
||||
*/
|
||||
|
||||
TABLE_LIST *emb_nest;
|
||||
|
@ -180,7 +180,7 @@ protected:
|
||||
/*
|
||||
Return the full select tree for "field_item" and "value":
|
||||
- a single SEL_TREE if the field is not in a multiple equality, or
|
||||
- a conjuction of all SEL_TREEs for all fields from
|
||||
- a conjunction of all SEL_TREEs for all fields from
|
||||
the same multiple equality with "field_item".
|
||||
*/
|
||||
SEL_TREE *get_full_func_mm_tree(RANGE_OPT_PARAM *param,
|
||||
|
@ -506,7 +506,7 @@ Item *Item_func::transform(THD *thd, Item_transformer transformer, uchar *argume
|
||||
callback functions.
|
||||
|
||||
First the function applies the analyzer to the root node of
|
||||
the Item_func object. Then if the analizer succeeeds (returns TRUE)
|
||||
the Item_func object. Then if the analyzer succeeds (returns TRUE)
|
||||
the function recursively applies the compile method to each argument
|
||||
of the Item_func node.
|
||||
If the call of the method for an argument item returns a new item
|
||||
@ -1482,13 +1482,14 @@ double Item_func_div::real_op()
|
||||
my_decimal *Item_func_div::decimal_op(my_decimal *decimal_value)
|
||||
{
|
||||
int err;
|
||||
my_decimal tmp;
|
||||
VDec2_lazy val(args[0], args[1]);
|
||||
if ((null_value= val.has_null()))
|
||||
return 0;
|
||||
if ((err= check_decimal_overflow(my_decimal_div(E_DEC_FATAL_ERROR &
|
||||
~E_DEC_OVERFLOW &
|
||||
~E_DEC_DIV_ZERO,
|
||||
decimal_value,
|
||||
&tmp,
|
||||
val.m_a.ptr(), val.m_b.ptr(),
|
||||
prec_increment))) > 3)
|
||||
{
|
||||
@ -1497,6 +1498,7 @@ my_decimal *Item_func_div::decimal_op(my_decimal *decimal_value)
|
||||
null_value= 1;
|
||||
return 0;
|
||||
}
|
||||
tmp.round_to(decimal_value, decimals, HALF_UP);
|
||||
return decimal_value;
|
||||
}
|
||||
|
||||
@ -1553,7 +1555,7 @@ bool Item_func_div::fix_length_and_dec()
|
||||
DBUG_ENTER("Item_func_div::fix_length_and_dec");
|
||||
DBUG_PRINT("info", ("name %s", func_name()));
|
||||
prec_increment= current_thd->variables.div_precincrement;
|
||||
maybe_null= 1; // devision by zero
|
||||
maybe_null= 1; // division by zero
|
||||
|
||||
const Type_aggregator *aggregator= &type_handler_data->m_type_aggregator_for_div;
|
||||
DBUG_EXECUTE_IF("num_op", aggregator= &type_handler_data->m_type_aggregator_non_commutative_test;);
|
||||
@ -4685,7 +4687,7 @@ bool Item_func_set_user_var::register_field_in_bitmap(void *arg)
|
||||
@param type type of new value
|
||||
@param cs charset info for new value
|
||||
@param dv derivation for new value
|
||||
@param unsigned_arg indiates if a value of type INT_RESULT is unsigned
|
||||
@param unsigned_arg indicates if a value of type INT_RESULT is unsigned
|
||||
|
||||
@note Sets error and fatal error if allocation fails.
|
||||
|
||||
@ -6585,7 +6587,7 @@ Item_func_sp::fix_fields(THD *thd, Item **ref)
|
||||
/*
|
||||
Here we check privileges of the stored routine only during view
|
||||
creation, in order to validate the view. A runtime check is
|
||||
perfomed in Item_func_sp::execute(), and this method is not
|
||||
performed in Item_func_sp::execute(), and this method is not
|
||||
called during context analysis. Notice, that during view
|
||||
creation we do not infer into stored routine bodies and do not
|
||||
check privileges of its statements, which would probably be a
|
||||
|
@ -372,7 +372,7 @@ bool Inet6::make_from_item(Item *item)
|
||||
|
||||
@return Completion status.
|
||||
@retval true - error, the given string does not represent an IPv4-address.
|
||||
@retval false - ok, the string has been converted sucessfully.
|
||||
@retval false - ok, the string has been converted successfully.
|
||||
|
||||
@note The problem with inet_pton() is that it treats leading zeros in
|
||||
IPv4-part differently on different platforms.
|
||||
@ -496,7 +496,7 @@ bool Inet4::ascii_to_ipv4(const char *str, size_t str_length)
|
||||
|
||||
@return Completion status.
|
||||
@retval true - error, the given string does not represent an IPv6-address.
|
||||
@retval false - ok, the string has been converted sucessfully.
|
||||
@retval false - ok, the string has been converted successfully.
|
||||
|
||||
@note The problem with inet_pton() is that it treats leading zeros in
|
||||
IPv4-part differently on different platforms.
|
||||
@ -851,7 +851,7 @@ size_t Inet6::to_string(char *dst, size_t dstsize) const
|
||||
|
||||
@return Completion status.
|
||||
@retval NULL Given string does not represent an IP-address.
|
||||
@retval !NULL The string has been converted sucessfully.
|
||||
@retval !NULL The string has been converted successfully.
|
||||
*/
|
||||
|
||||
String *Item_func_inet6_aton::val_str(String *buffer)
|
||||
|
@ -989,7 +989,7 @@ String *Item_func_concat_ws::val_str(String *str)
|
||||
goto null; // Must be a blob
|
||||
}
|
||||
else if (res2 == &tmp_value)
|
||||
{ // This can happend only 1 time
|
||||
{ // This can happen only 1 time
|
||||
if (tmp_value.replace(0,0,*sep_str) || tmp_value.replace(0,0,*res))
|
||||
goto null;
|
||||
res= &tmp_value;
|
||||
@ -1139,7 +1139,7 @@ bool Item_func_reverse::fix_length_and_dec()
|
||||
}
|
||||
|
||||
/**
|
||||
Replace all occurences of string2 in string1 with string3.
|
||||
Replace all occurrences of string2 in string1 with string3.
|
||||
|
||||
Don't reallocate val_str() if not needed.
|
||||
|
||||
@ -3999,7 +3999,7 @@ bool Item_func_export_set::fix_length_and_dec()
|
||||
using in a SQL statement.
|
||||
|
||||
Adds a \\ before all characters that needs to be escaped in a SQL string.
|
||||
We also escape '^Z' (END-OF-FILE in windows) to avoid probelms when
|
||||
We also escape '^Z' (END-OF-FILE in windows) to avoid problems when
|
||||
running commands from a file in windows.
|
||||
|
||||
This function is very useful when you want to generate SQL statements.
|
||||
|
@ -1135,12 +1135,12 @@ void Item_singlerow_subselect::reset()
|
||||
|
||||
/**
|
||||
@todo
|
||||
- We cant change name of Item_field or Item_ref, because it will
|
||||
prevent it's correct resolving, but we should save name of
|
||||
- We can't change name of Item_field or Item_ref, because it will
|
||||
prevent its correct resolving, but we should save name of
|
||||
removed item => we do not make optimization if top item of
|
||||
list is field or reference.
|
||||
- switch off this optimization for prepare statement,
|
||||
because we do not rollback this changes.
|
||||
because we do not rollback these changes.
|
||||
Make rollback for it, or special name resolving mode in 5.0.
|
||||
|
||||
@param join Join object of the subquery (i.e. 'child' join).
|
||||
@ -1163,8 +1163,8 @@ Item_singlerow_subselect::select_transformer(JOIN *join)
|
||||
select_lex->item_list.elements == 1 &&
|
||||
!select_lex->item_list.head()->with_sum_func() &&
|
||||
/*
|
||||
We cant change name of Item_field or Item_ref, because it will
|
||||
prevent it's correct resolving, but we should save name of
|
||||
We can't change name of Item_field or Item_ref, because it will
|
||||
prevent its correct resolving, but we should save name of
|
||||
removed item => we do not make optimization if top item of
|
||||
list is field or reference.
|
||||
TODO: solve above problem
|
||||
@ -1662,7 +1662,7 @@ longlong Item_exists_subselect::val_int()
|
||||
Return the result of EXISTS as a string value
|
||||
|
||||
Converts the true/false result into a string value.
|
||||
Note that currently this cannot be NULL, so if the query exection fails
|
||||
Note that currently this cannot be NULL, so if the query execution fails
|
||||
it will return 0.
|
||||
|
||||
@param decimal_value[out] buffer to hold the resulting string value
|
||||
@ -1685,7 +1685,7 @@ String *Item_exists_subselect::val_str(String *str)
|
||||
Return the result of EXISTS as a decimal value
|
||||
|
||||
Converts the true/false result into a decimal value.
|
||||
Note that currently this cannot be NULL, so if the query exection fails
|
||||
Note that currently this cannot be NULL, so if the query execution fails
|
||||
it will return 0.
|
||||
|
||||
@param decimal_value[out] Buffer to hold the resulting decimal value
|
||||
@ -2383,7 +2383,7 @@ Item_in_subselect::row_value_transformer(JOIN *join)
|
||||
is_not_null_test(v3))
|
||||
where is_not_null_test registers NULLs values but reject rows.
|
||||
|
||||
in case when we do not need correct NULL, we have simplier construction:
|
||||
in case when we do not need correct NULL, we have simpler construction:
|
||||
EXISTS (SELECT ... WHERE where and
|
||||
(l1 = v1) and
|
||||
(l2 = v2) and
|
||||
@ -2786,6 +2786,8 @@ bool Item_exists_subselect::select_prepare_to_be_in()
|
||||
Check if 'func' is an equality in form "inner_table.column = outer_expr"
|
||||
|
||||
@param func Expression to check
|
||||
@param allow_subselect If true, the outer_expr part can have a subquery
|
||||
If false, it cannot.
|
||||
@param local_field OUT Return "inner_table.column" here
|
||||
@param outer_expr OUT Return outer_expr here
|
||||
|
||||
@ -2793,6 +2795,7 @@ bool Item_exists_subselect::select_prepare_to_be_in()
|
||||
*/
|
||||
|
||||
static bool check_equality_for_exist2in(Item_func *func,
|
||||
bool allow_subselect,
|
||||
Item_ident **local_field,
|
||||
Item **outer_exp)
|
||||
{
|
||||
@ -2803,7 +2806,8 @@ static bool check_equality_for_exist2in(Item_func *func,
|
||||
args= func->arguments();
|
||||
if (args[0]->real_type() == Item::FIELD_ITEM &&
|
||||
args[0]->all_used_tables() != OUTER_REF_TABLE_BIT &&
|
||||
args[1]->all_used_tables() == OUTER_REF_TABLE_BIT)
|
||||
args[1]->all_used_tables() == OUTER_REF_TABLE_BIT &&
|
||||
(allow_subselect || !args[1]->with_subquery()))
|
||||
{
|
||||
/* It is Item_field or Item_direct_view_ref) */
|
||||
DBUG_ASSERT(args[0]->type() == Item::FIELD_ITEM ||
|
||||
@ -2814,7 +2818,8 @@ static bool check_equality_for_exist2in(Item_func *func,
|
||||
}
|
||||
else if (args[1]->real_type() == Item::FIELD_ITEM &&
|
||||
args[1]->all_used_tables() != OUTER_REF_TABLE_BIT &&
|
||||
args[0]->all_used_tables() == OUTER_REF_TABLE_BIT)
|
||||
args[0]->all_used_tables() == OUTER_REF_TABLE_BIT &&
|
||||
(allow_subselect || !args[0]->with_subquery()))
|
||||
{
|
||||
/* It is Item_field or Item_direct_view_ref) */
|
||||
DBUG_ASSERT(args[1]->type() == Item::FIELD_ITEM ||
|
||||
@ -2843,6 +2848,13 @@ typedef struct st_eq_field_outer
|
||||
|
||||
outer1=inner_tbl1.col1 AND ... AND outer2=inner_tbl1.col2 AND remainder_cond
|
||||
|
||||
if there is just one outer_expr=inner_expr pair, then outer_expr can have a
|
||||
subselect in it. If there are many such pairs, then none of outer_expr can
|
||||
have a subselect in it. If we allow this, the query will fail with an error:
|
||||
|
||||
This version of MariaDB doesn't yet support 'SUBQUERY in ROW in left
|
||||
expression of IN/ALL/ANY'
|
||||
|
||||
@param conds Condition to be checked
|
||||
@parm result Array to collect EQ_FIELD_OUTER elements describing
|
||||
inner-vs-outer equalities the function has found.
|
||||
@ -2860,14 +2872,17 @@ static bool find_inner_outer_equalities(Item **conds,
|
||||
{
|
||||
List_iterator<Item> li(*((Item_cond*)*conds)->argument_list());
|
||||
Item *item;
|
||||
bool allow_subselect= true;
|
||||
while ((item= li++))
|
||||
{
|
||||
if (item->type() == Item::FUNC_ITEM &&
|
||||
check_equality_for_exist2in((Item_func *)item,
|
||||
allow_subselect,
|
||||
&element.local_field,
|
||||
&element.outer_exp))
|
||||
{
|
||||
found= TRUE;
|
||||
allow_subselect= false;
|
||||
element.eq_ref= li.ref();
|
||||
if (result.append(element))
|
||||
goto alloc_err;
|
||||
@ -2876,6 +2891,7 @@ static bool find_inner_outer_equalities(Item **conds,
|
||||
}
|
||||
else if ((*conds)->type() == Item::FUNC_ITEM &&
|
||||
check_equality_for_exist2in((Item_func *)*conds,
|
||||
true,
|
||||
&element.local_field,
|
||||
&element.outer_exp))
|
||||
{
|
||||
@ -3242,7 +3258,7 @@ Item_in_subselect::select_in_like_transformer(JOIN *join)
|
||||
/*
|
||||
In some optimisation cases we will not need this Item_in_optimizer
|
||||
object, but we can't know it here, but here we need address correct
|
||||
reference on left expresion.
|
||||
reference on left expression.
|
||||
|
||||
note: we won't need Item_in_optimizer when handling degenerate cases
|
||||
like "... IN (SELECT 1)"
|
||||
@ -3274,7 +3290,7 @@ Item_in_subselect::select_in_like_transformer(JOIN *join)
|
||||
and all that items do not make permanent changes in current item arena
|
||||
which allow to us call them with changed arena (if we do not know nature
|
||||
of Item, we have to call fix_fields() for it only with original arena to
|
||||
avoid memory leack)
|
||||
avoid memory leak)
|
||||
*/
|
||||
if (left_expr->cols() == 1)
|
||||
trans_res= single_value_transformer(join);
|
||||
@ -3437,7 +3453,7 @@ bool Item_in_subselect::setup_mat_engine()
|
||||
|
||||
/*
|
||||
The select_engine (that executes transformed IN=>EXISTS subselects) is
|
||||
pre-created at parse time, and is stored in statment memory (preserved
|
||||
pre-created at parse time, and is stored in statement memory (preserved
|
||||
across PS executions).
|
||||
*/
|
||||
DBUG_ASSERT(engine->engine_type() == subselect_engine::SINGLE_SELECT_ENGINE);
|
||||
@ -3906,7 +3922,7 @@ int subselect_single_select_engine::exec()
|
||||
For at least one of the pushed predicates the following is true:
|
||||
We should not apply optimizations based on the condition that was
|
||||
pushed down into the subquery. Those optimizations are ref[_or_null]
|
||||
acceses. Change them to be full table scans.
|
||||
accesses. Change them to be full table scans.
|
||||
*/
|
||||
JOIN_TAB *tab;
|
||||
for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
|
||||
@ -6135,7 +6151,7 @@ int subselect_partial_match_engine::exec()
|
||||
if (has_covering_null_row)
|
||||
{
|
||||
/*
|
||||
If there is a NULL-only row that coveres all columns the result of IN
|
||||
If there is a NULL-only row that covers all columns the result of IN
|
||||
is UNKNOWN.
|
||||
*/
|
||||
item_in->value= 0;
|
||||
@ -6330,7 +6346,7 @@ subselect_rowid_merge_engine::init(MY_BITMAP *non_null_key_parts,
|
||||
for (uint i= (non_null_key ? 1 : 0); i < merge_keys_count; i++)
|
||||
{
|
||||
/*
|
||||
Check if the first and only indexed column contains NULL in the curent
|
||||
Check if the first and only indexed column contains NULL in the current
|
||||
row, and add the row number to the corresponding key.
|
||||
*/
|
||||
if (merge_keys[i]->get_field(0)->is_null())
|
||||
@ -6542,7 +6558,7 @@ bool subselect_rowid_merge_engine::partial_match()
|
||||
}
|
||||
|
||||
/*
|
||||
If all nullable columns contain only NULLs, then there is a guranteed
|
||||
If all nullable columns contain only NULLs, then there is a guaranteed
|
||||
partial match, and we don't need to search for a matching row.
|
||||
*/
|
||||
if (has_covering_null_columns)
|
||||
|
@ -568,7 +568,7 @@ public:
|
||||
bool jtbm_const_row_found;
|
||||
|
||||
/*
|
||||
TRUE<=>this is a flattenable semi-join, false overwise.
|
||||
TRUE<=>this is a flattenable semi-join, false otherwise.
|
||||
*/
|
||||
bool is_flattenable_semijoin;
|
||||
|
||||
@ -993,7 +993,7 @@ class subselect_indexsubquery_engine: public subselect_uniquesubquery_engine
|
||||
/* FALSE for 'ref', TRUE for 'ref-or-null'. */
|
||||
bool check_null;
|
||||
/*
|
||||
The "having" clause. This clause (further reffered to as "artificial
|
||||
The "having" clause. This clause (further referred to as "artificial
|
||||
having") was inserted by subquery transformation code. It contains
|
||||
Item(s) that have a side-effect: they record whether the subquery has
|
||||
produced a row with NULL certain components. We need to use it for cases
|
||||
@ -1014,7 +1014,7 @@ class subselect_indexsubquery_engine: public subselect_uniquesubquery_engine
|
||||
However, subqueries like the above are currently not handled by index
|
||||
lookup-based subquery engines, the engine applicability check misses
|
||||
them: it doesn't switch the engine for case of artificial having and
|
||||
[eq_]ref access (only for artifical having + ref_or_null or no having).
|
||||
[eq_]ref access (only for artificial having + ref_or_null or no having).
|
||||
The above example subquery is handled as a full-blown SELECT with eq_ref
|
||||
access to one table.
|
||||
|
||||
@ -1085,7 +1085,7 @@ public:
|
||||
*/
|
||||
JOIN *materialize_join;
|
||||
/*
|
||||
A conjunction of all the equality condtions between all pairs of expressions
|
||||
A conjunction of all the equality conditions between all pairs of expressions
|
||||
that are arguments of an IN predicate. We need these to post-filter some
|
||||
IN results because index lookups sometimes match values that are actually
|
||||
not equal to the search key in SQL terms.
|
||||
|
@ -704,7 +704,7 @@ int Aggregator_distinct::composite_key_cmp(void* arg, uchar* key1, uchar* key2)
|
||||
|
||||
C_MODE_START
|
||||
|
||||
/* Declarations for auxilary C-callbacks */
|
||||
/* Declarations for auxiliary C-callbacks */
|
||||
|
||||
int simple_raw_key_cmp(void* arg, const void* key1, const void* key2)
|
||||
{
|
||||
@ -736,7 +736,7 @@ C_MODE_END
|
||||
@param thd Thread descriptor
|
||||
@return status
|
||||
@retval FALSE success
|
||||
@retval TRUE faliure
|
||||
@retval TRUE failure
|
||||
|
||||
Prepares Aggregator_distinct to process the incoming stream.
|
||||
Creates the temporary table and the Unique class if needed.
|
||||
@ -1940,7 +1940,7 @@ void Item_sum_count::cleanup()
|
||||
|
||||
|
||||
/*
|
||||
Avgerage
|
||||
Average
|
||||
*/
|
||||
|
||||
void Item_sum_avg::fix_length_and_dec_decimal()
|
||||
@ -2206,7 +2206,7 @@ bool Item_sum_variance::fix_length_and_dec()
|
||||
/*
|
||||
According to the SQL2003 standard (Part 2, Foundations; sec 10.9,
|
||||
aggregate function; paragraph 7h of Syntax Rules), "the declared
|
||||
type of the result is an implementation-defined aproximate numeric
|
||||
type of the result is an implementation-defined approximate numeric
|
||||
type.
|
||||
*/
|
||||
if (args[0]->type_handler()->Item_sum_variance_fix_length_and_dec(this))
|
||||
@ -2279,7 +2279,7 @@ double Item_sum_variance::val_real()
|
||||
is one or zero. If it's zero, i.e. a population variance, then we only
|
||||
set nullness when the count is zero.
|
||||
|
||||
Another way to read it is that 'sample' is the numerical threshhold, at and
|
||||
Another way to read it is that 'sample' is the numerical threshold, at and
|
||||
below which a 'count' number of items is called NULL.
|
||||
*/
|
||||
DBUG_ASSERT((sample == 0) || (sample == 1));
|
||||
@ -4181,7 +4181,7 @@ bool Item_func_group_concat::setup(THD *thd)
|
||||
{
|
||||
/*
|
||||
Force the create_tmp_table() to convert BIT columns to INT
|
||||
as we cannot compare two table records containg BIT fields
|
||||
as we cannot compare two table records containing BIT fields
|
||||
stored in the the tree used for distinct/order by.
|
||||
Moreover we don't even save in the tree record null bits
|
||||
where BIT fields store parts of their data.
|
||||
|
@ -251,7 +251,7 @@ class Window_spec;
|
||||
The field 'aggr_level' is to contain the nest level of the subquery
|
||||
where the set function is aggregated.
|
||||
|
||||
The field 'max_arg_level' is for the maximun of the nest levels of the
|
||||
The field 'max_arg_level' is for the maximum of the nest levels of the
|
||||
unbound column references occurred in the set function. A column reference
|
||||
is unbound within a set function if it is not bound by any subquery
|
||||
used as a subexpression in this function. A column reference is bound by
|
||||
|
@ -475,7 +475,7 @@ err:
|
||||
|
||||
|
||||
/**
|
||||
Create a formated date/time value in a string.
|
||||
Create a formatted date/time value in a string.
|
||||
*/
|
||||
|
||||
static bool make_date_time(const LEX_CSTRING &format, MYSQL_TIME *l_time,
|
||||
@ -1052,7 +1052,7 @@ uint week_mode(uint mode)
|
||||
a date at start of january) In this case one can get 53 for the
|
||||
first week of next year. This flag ensures that the week is
|
||||
relevant for the given year. Note that this flag is only
|
||||
releveant if WEEK_JANUARY is not set.
|
||||
relevant if WEEK_JANUARY is not set.
|
||||
|
||||
If set Week is in range 1-53.
|
||||
|
||||
@ -1354,7 +1354,7 @@ bool get_interval_value(THD *thd, Item *args,
|
||||
if (!(res= args->val_str_ascii(&str_value)))
|
||||
return (1);
|
||||
|
||||
/* record negative intervalls in interval->neg */
|
||||
/* record negative intervals in interval->neg */
|
||||
str=res->ptr();
|
||||
cs= res->charset();
|
||||
const char *end=str+res->length();
|
||||
@ -1497,7 +1497,7 @@ bool Item_func_from_days::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzz
|
||||
|
||||
|
||||
/**
|
||||
Converts current time in my_time_t to MYSQL_TIME represenatation for local
|
||||
Converts current time in my_time_t to MYSQL_TIME representation for local
|
||||
time zone. Defines time zone (local) used for whole CURDATE function.
|
||||
*/
|
||||
void Item_func_curdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||
@ -1508,7 +1508,7 @@ void Item_func_curdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||
|
||||
|
||||
/**
|
||||
Converts current time in my_time_t to MYSQL_TIME represenatation for UTC
|
||||
Converts current time in my_time_t to MYSQL_TIME representation for UTC
|
||||
time zone. Defines time zone (UTC) used for whole UTC_DATE function.
|
||||
*/
|
||||
void Item_func_curdate_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||
@ -1586,7 +1586,7 @@ static void set_sec_part(ulong sec_part, MYSQL_TIME *ltime, Item *item)
|
||||
}
|
||||
|
||||
/**
|
||||
Converts current time in my_time_t to MYSQL_TIME represenatation for local
|
||||
Converts current time in my_time_t to MYSQL_TIME representation for local
|
||||
time zone. Defines time zone (local) used for whole CURTIME function.
|
||||
*/
|
||||
void Item_func_curtime_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||
@ -1600,7 +1600,7 @@ void Item_func_curtime_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||
|
||||
|
||||
/**
|
||||
Converts current time in my_time_t to MYSQL_TIME represenatation for UTC
|
||||
Converts current time in my_time_t to MYSQL_TIME representation for UTC
|
||||
time zone. Defines time zone (UTC) used for whole UTC_TIME function.
|
||||
*/
|
||||
void Item_func_curtime_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||
@ -1654,7 +1654,7 @@ int Item_func_now_local::save_in_field(Field *field, bool no_conversions)
|
||||
|
||||
|
||||
/**
|
||||
Converts current time in my_time_t to MYSQL_TIME represenatation for local
|
||||
Converts current time in my_time_t to MYSQL_TIME representation for local
|
||||
time zone. Defines time zone (local) used for whole NOW function.
|
||||
*/
|
||||
void Item_func_now_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||
@ -1666,7 +1666,7 @@ void Item_func_now_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||
|
||||
|
||||
/**
|
||||
Converts current time in my_time_t to MYSQL_TIME represenatation for UTC
|
||||
Converts current time in my_time_t to MYSQL_TIME representation for UTC
|
||||
time zone. Defines time zone (UTC) used for whole UTC_TIMESTAMP function.
|
||||
*/
|
||||
void Item_func_now_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||
@ -1696,7 +1696,7 @@ bool Item_func_now::get_date(THD *thd, MYSQL_TIME *res,
|
||||
|
||||
|
||||
/**
|
||||
Converts current time in my_time_t to MYSQL_TIME represenatation for local
|
||||
Converts current time in my_time_t to MYSQL_TIME representation for local
|
||||
time zone. Defines time zone (local) used for whole SYSDATE function.
|
||||
*/
|
||||
void Item_func_sysdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||
@ -1982,7 +1982,7 @@ bool Item_func_convert_tz::get_date(THD *thd, MYSQL_TIME *ltime,
|
||||
uint not_used;
|
||||
my_time_tmp= from_tz->TIME_to_gmt_sec(ltime, ¬_used);
|
||||
ulong sec_part= ltime->second_part;
|
||||
/* my_time_tmp is guranteed to be in the allowed range */
|
||||
/* my_time_tmp is guaranteed to be in the allowed range */
|
||||
if (my_time_tmp)
|
||||
to_tz->gmt_sec_to_TIME(ltime, my_time_tmp);
|
||||
/* we rely on the fact that no timezone conversion can change sec_part */
|
||||
@ -2403,7 +2403,7 @@ void Item_char_typecast::fix_length_and_dec_internal(CHARSET_INFO *from_cs)
|
||||
uint32 char_length;
|
||||
/*
|
||||
We always force character set conversion if cast_cs
|
||||
is a multi-byte character set. It garantees that the
|
||||
is a multi-byte character set. It guarantees that the
|
||||
result of CAST is a well-formed string.
|
||||
For single-byte character sets we allow just to copy
|
||||
from the argument. A single-byte character sets string
|
||||
|
@ -62,7 +62,7 @@ typedef struct my_xml_node_st
|
||||
} MY_XML_NODE;
|
||||
|
||||
|
||||
/* Lexical analizer token */
|
||||
/* Lexical analyzer token */
|
||||
typedef struct my_xpath_lex_st
|
||||
{
|
||||
int term; /* token type, see MY_XPATH_LEX_XXXXX below */
|
||||
@ -1121,7 +1121,7 @@ static Item* nametestfunc(MY_XPATH *xpath,
|
||||
|
||||
|
||||
/*
|
||||
Tokens consisting of one character, for faster lexical analizer.
|
||||
Tokens consisting of one character, for faster lexical analyzer.
|
||||
*/
|
||||
static char simpletok[128]=
|
||||
{
|
||||
@ -1441,7 +1441,7 @@ my_xpath_function(const char *beg, const char *end)
|
||||
}
|
||||
|
||||
|
||||
/* Initialize a lex analizer token */
|
||||
/* Initialize a lex analyzer token */
|
||||
static void
|
||||
my_xpath_lex_init(MY_XPATH_LEX *lex,
|
||||
const char *str, const char *strend)
|
||||
@ -1472,7 +1472,7 @@ my_xdigit(int c)
|
||||
SYNOPSYS
|
||||
Scan the next token from the input.
|
||||
lex->term is set to the scanned token type.
|
||||
lex->beg and lex->end are set to the beginnig
|
||||
lex->beg and lex->end are set to the beginning
|
||||
and to the end of the token.
|
||||
RETURN
|
||||
N/A
|
||||
@ -1498,7 +1498,7 @@ my_xpath_lex_scan(MY_XPATH *xpath,
|
||||
(const uchar*) end)) > 0 &&
|
||||
((ctype & (_MY_L | _MY_U)) || *beg == '_'))
|
||||
{
|
||||
// scan untill the end of the idenfitier
|
||||
// scan until the end of the identifier
|
||||
for (beg+= length;
|
||||
(length= xpath->cs->cset->ctype(xpath->cs, &ctype,
|
||||
(const uchar*) beg,
|
||||
@ -1627,7 +1627,7 @@ static int my_xpath_parse_AxisName(MY_XPATH *xpath)
|
||||
** Grammar rules, according to http://www.w3.org/TR/xpath
|
||||
** Implemented using recursive descendant method.
|
||||
** All the following grammar processing functions accept
|
||||
** a signle "xpath" argument and return 1 on success and 0 on error.
|
||||
** a single "xpath" argument and return 1 on success and 0 on error.
|
||||
** They also modify "xpath" argument by creating new items.
|
||||
*/
|
||||
|
||||
@ -2523,7 +2523,7 @@ public:
|
||||
as it is in conflict with abbreviated step.
|
||||
1 + .123 does not work,
|
||||
1 + 0.123 does.
|
||||
Perhaps it is better to move this code into lex analizer.
|
||||
Perhaps it is better to move this code into lex analyzer.
|
||||
|
||||
RETURN
|
||||
1 - success
|
||||
@ -2878,7 +2878,7 @@ append_node(String *str, MY_XML_NODE *node)
|
||||
SYNOPSYS
|
||||
|
||||
A call-back function executed when XML parser
|
||||
is entering a tag or an attribue.
|
||||
is entering a tag or an attribute.
|
||||
Appends the new node into data->pxml.
|
||||
Increments data->level.
|
||||
|
||||
@ -2914,7 +2914,7 @@ int xml_enter(MY_XML_PARSER *st,const char *attr, size_t len)
|
||||
SYNOPSYS
|
||||
|
||||
A call-back function executed when XML parser
|
||||
is entering into a tag or an attribue textual value.
|
||||
is entering into a tag or an attribute textual value.
|
||||
The value is appended into data->pxml.
|
||||
|
||||
RETURN
|
||||
@ -2942,7 +2942,7 @@ int xml_value(MY_XML_PARSER *st,const char *attr, size_t len)
|
||||
SYNOPSYS
|
||||
|
||||
A call-back function executed when XML parser
|
||||
is leaving a tag or an attribue.
|
||||
is leaving a tag or an attribute.
|
||||
Decrements data->level.
|
||||
|
||||
RETURN
|
||||
|
@ -228,7 +228,7 @@ void key_restore(uchar *to_record, const uchar *from_key, KEY *key_info,
|
||||
{
|
||||
/*
|
||||
This in fact never happens, as we have only partial BLOB
|
||||
keys yet anyway, so it's difficult to find any sence to
|
||||
keys yet anyway, so it's difficult to find any sense to
|
||||
restore the part of a record.
|
||||
Maybe this branch is to be removed, but now we
|
||||
have to ignore GCov compaining.
|
||||
@ -612,8 +612,8 @@ int key_rec_cmp(void *key_p, uchar *first_rec, uchar *second_rec)
|
||||
max length. The exceptions are the BLOB and VARCHAR field types
|
||||
that take the max length into account.
|
||||
*/
|
||||
if ((result= field->cmp_max(field->ptr+first_diff, field->ptr+sec_diff,
|
||||
key_part->length)))
|
||||
if ((result= field->cmp_prefix(field->ptr+first_diff, field->ptr+sec_diff,
|
||||
key_part->length)))
|
||||
DBUG_RETURN(result);
|
||||
next_loop:
|
||||
key_part++;
|
||||
|
@ -1144,7 +1144,7 @@ bool Global_read_lock::make_global_read_lock_block_commit(THD *thd)
|
||||
{
|
||||
DBUG_ENTER("make_global_read_lock_block_commit");
|
||||
/*
|
||||
If we didn't succeed lock_global_read_lock(), or if we already suceeded
|
||||
If we didn't succeed lock_global_read_lock(), or if we already succeeded
|
||||
make_global_read_lock_block_commit(), do nothing.
|
||||
*/
|
||||
|
||||
|
18
sql/log.cc
18
sql/log.cc
@ -162,7 +162,7 @@ void setup_log_handling()
|
||||
|
||||
/**
|
||||
purge logs, master and slave sides both, related error code
|
||||
convertor.
|
||||
converter.
|
||||
Called from @c purge_error_message(), @c MYSQL_BIN_LOG::reset_logs()
|
||||
|
||||
@param res an internal to purging routines error code
|
||||
@ -381,7 +381,7 @@ public:
|
||||
never zero.
|
||||
|
||||
This is done while calling the constructor binlog_cache_mngr.
|
||||
We cannot set informaton in the constructor binlog_cache_data
|
||||
We cannot set information in the constructor binlog_cache_data
|
||||
because the space for binlog_cache_mngr is allocated through
|
||||
a placement new.
|
||||
|
||||
@ -3031,7 +3031,7 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time,
|
||||
|
||||
mysql_mutex_lock(&LOCK_log);
|
||||
if (is_open())
|
||||
{ // Safety agains reopen
|
||||
{ // Safety against reopen
|
||||
char buff[80], *end;
|
||||
char query_time_buff[22+7], lock_time_buff[22+7];
|
||||
size_t buff_len;
|
||||
@ -3330,7 +3330,7 @@ void MYSQL_BIN_LOG::cleanup()
|
||||
|
||||
/*
|
||||
Free data for global binlog state.
|
||||
We can't do that automaticly as we need to do this before
|
||||
We can't do that automatically as we need to do this before
|
||||
safemalloc is shut down
|
||||
*/
|
||||
if (!is_relay_log)
|
||||
@ -4105,7 +4105,7 @@ err:
|
||||
|
||||
|
||||
/**
|
||||
Delete all logs refered to in the index file.
|
||||
Delete all logs referred to in the index file.
|
||||
|
||||
The new index file will only contain this file.
|
||||
|
||||
@ -5685,7 +5685,7 @@ binlog_cache_mngr *THD::binlog_setup_trx_data()
|
||||
|
||||
- Start a statement transaction to allow us to truncate the cache.
|
||||
|
||||
- Save the currrent binlog position so that we can roll back the
|
||||
- Save the current binlog position so that we can roll back the
|
||||
statement by truncating the cache.
|
||||
|
||||
We only update the saved position if the old one was undefined,
|
||||
@ -6870,7 +6870,7 @@ static const char* get_first_binlog(char* buf_arg)
|
||||
}
|
||||
if (normalize_binlog_name(buf_arg, fname, false))
|
||||
{
|
||||
errmsg= "cound not normalize the first file name in the binlog index";
|
||||
errmsg= "could not normalize the first file name in the binlog index";
|
||||
goto end;
|
||||
}
|
||||
end:
|
||||
@ -9863,7 +9863,7 @@ TC_LOG_BINLOG::mark_xid_done(ulong binlog_id, bool write_checkpoint)
|
||||
than compare all found against each other to find the one pointing to the
|
||||
most recent binlog.
|
||||
|
||||
Note also that we need to first release LOCK_xid_list, then aquire
|
||||
Note also that we need to first release LOCK_xid_list, then acquire
|
||||
LOCK_log, then re-aquire LOCK_xid_list. If we were to take LOCK_log while
|
||||
holding LOCK_xid_list, we might deadlock with other threads that take the
|
||||
locks in the opposite order.
|
||||
@ -9948,7 +9948,7 @@ TC_LOG_BINLOG::commit_checkpoint_notify(void *cookie)
|
||||
necessary stuff.
|
||||
|
||||
In the future, this thread could also be used to do log rotation in the
|
||||
background, which could elimiate all stalls around binlog rotations.
|
||||
background, which could eliminate all stalls around binlog rotations.
|
||||
*/
|
||||
pthread_handler_t
|
||||
binlog_background_thread(void *arg __attribute__((unused)))
|
||||
|
@ -4533,7 +4533,7 @@ get_str_len_and_pointer(const Log_event::Byte **src,
|
||||
const Log_event::Byte *end)
|
||||
{
|
||||
if (*src >= end)
|
||||
return -1; // Will be UINT_MAX in two-complement arithmetics
|
||||
return -1; // Will be UINT_MAX in two-complement arithmetic
|
||||
uint length= **src;
|
||||
if (length > 0)
|
||||
{
|
||||
@ -4921,7 +4921,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
|
||||
|
||||
/* A 2nd variable part; this is common to all versions */
|
||||
memcpy((char*) start, end, data_len); // Copy db and query
|
||||
start[data_len]= '\0'; // End query with \0 (For safetly)
|
||||
start[data_len]= '\0'; // End query with \0 (For safety)
|
||||
db= (char *)start;
|
||||
query= (char *)(start + db_len + 1);
|
||||
q_len= data_len - db_len -1;
|
||||
@ -6624,7 +6624,7 @@ int Format_description_log_event::do_update_pos(rpl_group_info *rgi)
|
||||
If we do not skip stepping the group log position (and the
|
||||
server id was changed when restarting the server), it might well
|
||||
be that we start executing at a position that is invalid, e.g.,
|
||||
at a Rows_log_event or a Query_log_event preceeded by a
|
||||
at a Rows_log_event or a Query_log_event preceded by a
|
||||
Intvar_log_event instead of starting at a Table_map_log_event or
|
||||
the Intvar_log_event respectively.
|
||||
*/
|
||||
@ -6735,7 +6735,7 @@ Format_description_log_event::is_version_before_checksum(const master_version_sp
|
||||
|
||||
@return the version-safe checksum alg descriptor where zero
|
||||
designates no checksum, 255 - the orginator is
|
||||
checksum-unaware (effectively no checksum) and the actuall
|
||||
checksum-unaware (effectively no checksum) and the actual
|
||||
[1-254] range alg descriptor.
|
||||
*/
|
||||
enum enum_binlog_checksum_alg get_checksum_alg(const char* buf, ulong len)
|
||||
@ -7461,7 +7461,7 @@ int Load_log_event::do_apply_event(NET* net, rpl_group_info *rgi,
|
||||
/*
|
||||
When replication is running fine, if it was DUP_ERROR on the
|
||||
master then we could choose IGNORE here, because if DUP_ERROR
|
||||
suceeded on master, and data is identical on the master and slave,
|
||||
succeeded on master, and data is identical on the master and slave,
|
||||
then there should be no uniqueness errors on slave, so IGNORE is
|
||||
the same as DUP_ERROR. But in the unlikely case of uniqueness errors
|
||||
(because the data on the master and slave happen to be different
|
||||
@ -8008,7 +8008,7 @@ Gtid_log_event::Gtid_log_event(THD *thd_arg, uint64 seq_no_arg,
|
||||
|
||||
/*
|
||||
Used to record GTID while sending binlog to slave, without having to
|
||||
fully contruct every Gtid_log_event() needlessly.
|
||||
fully construct every Gtid_log_event() needlessly.
|
||||
*/
|
||||
bool
|
||||
Gtid_log_event::peek(const char *event_start, size_t event_len,
|
||||
@ -8574,7 +8574,7 @@ err:
|
||||
|
||||
/*
|
||||
Used to record gtid_list event while sending binlog to slave, without having to
|
||||
fully contruct the event object.
|
||||
fully construct the event object.
|
||||
*/
|
||||
bool
|
||||
Gtid_list_log_event::peek(const char *event_start, size_t event_len,
|
||||
@ -8654,7 +8654,7 @@ Intvar_log_event::Intvar_log_event(const char* buf,
|
||||
const Format_description_log_event* description_event)
|
||||
:Log_event(buf, description_event)
|
||||
{
|
||||
/* The Post-Header is empty. The Varible Data part begins immediately. */
|
||||
/* The Post-Header is empty. The Variable Data part begins immediately. */
|
||||
buf+= description_event->common_header_len +
|
||||
description_event->post_header_len[INTVAR_EVENT-1];
|
||||
type= buf[I_TYPE_OFFSET];
|
||||
@ -9944,7 +9944,7 @@ void Create_file_log_event::pack_info(Protocol *protocol)
|
||||
|
||||
/**
|
||||
Create_file_log_event::do_apply_event()
|
||||
Constructor for Create_file_log_event to intantiate an event
|
||||
Constructor for Create_file_log_event to instantiate an event
|
||||
from the relay log on the slave.
|
||||
|
||||
@retval
|
||||
@ -11018,7 +11018,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len,
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
/* if my_bitmap_init fails, catched in is_valid() */
|
||||
/* if my_bitmap_init fails, caught in is_valid() */
|
||||
if (likely(!my_bitmap_init(&m_cols,
|
||||
m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL,
|
||||
m_width,
|
||||
@ -11435,7 +11435,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
|
||||
*/
|
||||
|
||||
{
|
||||
DBUG_PRINT("debug", ("Checking compability of tables to lock - tables_to_lock: %p",
|
||||
DBUG_PRINT("debug", ("Checking compatibility of tables to lock - tables_to_lock: %p",
|
||||
rgi->tables_to_lock));
|
||||
|
||||
/**
|
||||
@ -11490,7 +11490,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
|
||||
ptr->table->s->table_name.str));
|
||||
/*
|
||||
We should not honour --slave-skip-errors at this point as we are
|
||||
having severe errors which should not be skiped.
|
||||
having severe errors which should not be skipped.
|
||||
*/
|
||||
thd->is_slave_error= 1;
|
||||
/* remove trigger's tables */
|
||||
@ -11879,7 +11879,7 @@ static int rows_event_stmt_cleanup(rpl_group_info *rgi, THD * thd)
|
||||
/**
|
||||
The method either increments the relay log position or
|
||||
commits the current statement and increments the master group
|
||||
possition if the event is STMT_END_F flagged and
|
||||
position if the event is STMT_END_F flagged and
|
||||
the statement corresponds to the autocommit query (i.e replicated
|
||||
without wrapping in BEGIN/COMMIT)
|
||||
|
||||
@ -12091,7 +12091,7 @@ err:
|
||||
|
||||
/**
|
||||
Print an event "body" cache to @c file possibly in two fragments.
|
||||
Each fragement is optionally per @c do_wrap to produce an SQL statement.
|
||||
Each fragment is optionally per @c do_wrap to produce an SQL statement.
|
||||
|
||||
@param file a file to print to
|
||||
@param body the "body" IO_CACHE of event
|
||||
@ -13904,7 +13904,7 @@ record_compare_exit:
|
||||
Find the best key to use when locating the row in @c find_row().
|
||||
|
||||
A primary key is preferred if it exists; otherwise a unique index is
|
||||
preferred. Else we pick the index with the smalles rec_per_key value.
|
||||
preferred. Else we pick the index with the smallest rec_per_key value.
|
||||
|
||||
If a suitable key is found, set @c m_key, @c m_key_nr and @c m_key_info
|
||||
member fields appropriately.
|
||||
@ -14038,7 +14038,7 @@ static int row_not_found_error(rpl_group_info *rgi)
|
||||
Locate the current row in event's table.
|
||||
|
||||
The current row is pointed by @c m_curr_row. Member @c m_width tells
|
||||
how many columns are there in the row (this can be differnet from
|
||||
how many columns are there in the row (this can be different from
|
||||
the number of columns in the table). It is assumed that event's
|
||||
table is already open and pointed by @c m_table.
|
||||
|
||||
@ -14079,7 +14079,7 @@ int Rows_log_event::find_row(rpl_group_info *rgi)
|
||||
rpl_row_tabledefs.test specifies that
|
||||
if the extra field on the slave does not have a default value
|
||||
and this is okay with Delete or Update events.
|
||||
Todo: fix wl3228 hld that requires defauls for all types of events
|
||||
Todo: fix wl3228 hld that requires defaults for all types of events
|
||||
*/
|
||||
|
||||
prepare_record(table, m_width, FALSE);
|
||||
@ -14332,7 +14332,7 @@ int Rows_log_event::find_row(rpl_group_info *rgi)
|
||||
while (record_compare(table));
|
||||
|
||||
/*
|
||||
Note: above record_compare will take into accout all record fields
|
||||
Note: above record_compare will take into account all record fields
|
||||
which might be incorrect in case a partial row was given in the event
|
||||
*/
|
||||
|
||||
|
@ -455,7 +455,7 @@ class String;
|
||||
/**
|
||||
@def LOG_EVENT_ARTIFICIAL_F
|
||||
|
||||
Artificial events are created arbitarily and not written to binary
|
||||
Artificial events are created arbitrarily and not written to binary
|
||||
log
|
||||
|
||||
These events should not update the master log position when slave
|
||||
@ -962,13 +962,13 @@ private:
|
||||
};
|
||||
|
||||
/**
|
||||
the struct aggregates two paramenters that identify an event
|
||||
the struct aggregates two parameters that identify an event
|
||||
uniquely in scope of communication of a particular master and slave couple.
|
||||
I.e there can not be 2 events from the same staying connected master which
|
||||
have the same coordinates.
|
||||
@note
|
||||
Such identifier is not yet unique generally as the event originating master
|
||||
is resetable. Also the crashed master can be replaced with some other.
|
||||
is resettable. Also the crashed master can be replaced with some other.
|
||||
*/
|
||||
typedef struct event_coordinates
|
||||
{
|
||||
@ -2792,7 +2792,7 @@ public:
|
||||
uint8 number_of_event_types;
|
||||
/*
|
||||
The list of post-headers' lengths followed
|
||||
by the checksum alg decription byte
|
||||
by the checksum alg description byte
|
||||
*/
|
||||
uint8 *post_header_len;
|
||||
class master_version_split: public Version {
|
||||
@ -3131,7 +3131,7 @@ public:
|
||||
*/
|
||||
bool is_deferred() { return deferred; }
|
||||
/*
|
||||
In case of the deffered applying the variable instance is flagged
|
||||
In case of the deferred applying the variable instance is flagged
|
||||
and the parsing time query id is stored to be used at applying time.
|
||||
*/
|
||||
void set_deferred(query_id_t qid) { deferred= true; query_id= qid; }
|
||||
@ -5019,7 +5019,7 @@ private:
|
||||
/**
|
||||
@class Incident_log_event
|
||||
|
||||
Class representing an incident, an occurance out of the ordinary,
|
||||
Class representing an incident, an occurence out of the ordinary,
|
||||
that happened on the master.
|
||||
|
||||
The event is used to inform the slave that something out of the
|
||||
@ -5063,7 +5063,7 @@ public:
|
||||
m_message.str= NULL; /* Just as a precaution */
|
||||
m_message.length= 0;
|
||||
set_direct_logging();
|
||||
/* Replicate the incident irregardless of @@skip_replication. */
|
||||
/* Replicate the incident regardless of @@skip_replication. */
|
||||
flags&= ~LOG_EVENT_SKIP_REPLICATION_F;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
@ -5084,7 +5084,7 @@ public:
|
||||
strmake(m_message.str, msg->str, msg->length);
|
||||
m_message.length= msg->length;
|
||||
set_direct_logging();
|
||||
/* Replicate the incident irregardless of @@skip_replication. */
|
||||
/* Replicate the incident regardless of @@skip_replication. */
|
||||
flags&= ~LOG_EVENT_SKIP_REPLICATION_F;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
@ -1227,7 +1227,7 @@ Old_rows_log_event::Old_rows_log_event(const char *buf, uint event_len,
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
/* if my_bitmap_init fails, catched in is_valid() */
|
||||
/* if my_bitmap_init fails, caught in is_valid() */
|
||||
if (likely(!my_bitmap_init(&m_cols,
|
||||
m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL,
|
||||
m_width,
|
||||
|
@ -4398,7 +4398,11 @@ static int replace_user_table(THD *thd, const User_table &user_table,
|
||||
{
|
||||
if (revoke_grant)
|
||||
{
|
||||
my_error(ER_NONEXISTING_GRANT, MYF(0), combo->user.str, combo->host.str);
|
||||
if (combo->host.length)
|
||||
my_error(ER_NONEXISTING_GRANT, MYF(0), combo->user.str,
|
||||
combo->host.str);
|
||||
else
|
||||
my_error(ER_INVALID_ROLE, MYF(0), combo->user.str);
|
||||
goto end;
|
||||
}
|
||||
/*
|
||||
@ -5922,6 +5926,8 @@ static void propagate_role_grants(ACL_ROLE *role,
|
||||
enum PRIVS_TO_MERGE::what what,
|
||||
const char *db= 0, const char *name= 0)
|
||||
{
|
||||
if (!role)
|
||||
return;
|
||||
|
||||
mysql_mutex_assert_owner(&acl_cache->lock);
|
||||
PRIVS_TO_MERGE data= { what, db, name };
|
||||
@ -8107,6 +8113,21 @@ err:
|
||||
}
|
||||
|
||||
|
||||
static void check_grant_column_int(GRANT_TABLE *grant_table, const char *name,
|
||||
uint length, ulong *want_access)
|
||||
{
|
||||
if (grant_table)
|
||||
{
|
||||
*want_access&= ~grant_table->privs;
|
||||
if (*want_access & grant_table->cols)
|
||||
{
|
||||
GRANT_COLUMN *grant_column= column_hash_search(grant_table, name, length);
|
||||
if (grant_column)
|
||||
*want_access&= ~grant_column->rights;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Check column rights in given security context
|
||||
|
||||
@ -8129,9 +8150,6 @@ bool check_grant_column(THD *thd, GRANT_INFO *grant,
|
||||
const char *db_name, const char *table_name,
|
||||
const char *name, size_t length, Security_context *sctx)
|
||||
{
|
||||
GRANT_TABLE *grant_table;
|
||||
GRANT_TABLE *grant_table_role;
|
||||
GRANT_COLUMN *grant_column;
|
||||
ulong want_access= grant->want_privilege & ~grant->privilege;
|
||||
DBUG_ENTER("check_grant_column");
|
||||
DBUG_PRINT("enter", ("table: %s want_access: %lu", table_name, want_access));
|
||||
@ -8156,45 +8174,20 @@ bool check_grant_column(THD *thd, GRANT_INFO *grant,
|
||||
grant->version= grant_version; /* purecov: inspected */
|
||||
}
|
||||
|
||||
grant_table= grant->grant_table_user;
|
||||
grant_table_role= grant->grant_table_role;
|
||||
check_grant_column_int(grant->grant_table_user, name, (uint)length,
|
||||
&want_access);
|
||||
check_grant_column_int(grant->grant_table_role, name, (uint)length,
|
||||
&want_access);
|
||||
|
||||
if (!grant_table && !grant_table_role)
|
||||
goto err;
|
||||
|
||||
if (grant_table)
|
||||
{
|
||||
grant_column= column_hash_search(grant_table, name, length);
|
||||
if (grant_column)
|
||||
{
|
||||
want_access&= ~grant_column->rights;
|
||||
}
|
||||
}
|
||||
if (grant_table_role)
|
||||
{
|
||||
grant_column= column_hash_search(grant_table_role, name, length);
|
||||
if (grant_column)
|
||||
{
|
||||
want_access&= ~grant_column->rights;
|
||||
}
|
||||
}
|
||||
if (!want_access)
|
||||
{
|
||||
mysql_rwlock_unlock(&LOCK_grant);
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
err:
|
||||
mysql_rwlock_unlock(&LOCK_grant);
|
||||
if (!want_access)
|
||||
DBUG_RETURN(0);
|
||||
|
||||
char command[128];
|
||||
get_privilege_desc(command, sizeof(command), want_access);
|
||||
/* TODO perhaps error should print current rolename aswell */
|
||||
my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0),
|
||||
command,
|
||||
sctx->priv_user,
|
||||
sctx->host_or_ip,
|
||||
name,
|
||||
table_name);
|
||||
my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0), command, sctx->priv_user,
|
||||
sctx->host_or_ip, name, table_name);
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
|
||||
|
@ -768,8 +768,18 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
|
||||
{
|
||||
compl_result_code= result_code= HA_ADMIN_INVALID;
|
||||
}
|
||||
|
||||
/*
|
||||
The check for ALTER_PARTITION_ADMIN implements this logic:
|
||||
do not collect EITS STATS for this syntax:
|
||||
ALTER TABLE ... ANALYZE PARTITION p
|
||||
EITS statistics is global (not per-partition). Collecting global stats
|
||||
is much more expensive processing just one partition, so the most
|
||||
appropriate action is to just not collect EITS stats for this command.
|
||||
*/
|
||||
collect_eis=
|
||||
(table->table->s->table_category == TABLE_CATEGORY_USER &&
|
||||
!(lex->alter_info.flags & ALTER_PARTITION_ADMIN) &&
|
||||
(check_eits_collection_allowed(thd) ||
|
||||
lex->with_persistent_for_clause));
|
||||
}
|
||||
|
@ -1419,7 +1419,7 @@ bool wait_while_table_is_used(THD *thd, TABLE *table,
|
||||
FALSE);
|
||||
/* extra() call must come only after all instances above are closed */
|
||||
if (function != HA_EXTRA_NOT_USED)
|
||||
(void) table->file->extra(function);
|
||||
DBUG_RETURN(table->file->extra(function));
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
@ -7944,15 +7944,11 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
|
||||
FALSE ok; In this case *map will include the chosen index
|
||||
TRUE error
|
||||
*/
|
||||
bool setup_tables_and_check_access(THD *thd,
|
||||
Name_resolution_context *context,
|
||||
bool setup_tables_and_check_access(THD *thd, Name_resolution_context *context,
|
||||
List<TABLE_LIST> *from_clause,
|
||||
TABLE_LIST *tables,
|
||||
List<TABLE_LIST> &leaves,
|
||||
bool select_insert,
|
||||
ulong want_access_first,
|
||||
ulong want_access,
|
||||
bool full_table_list)
|
||||
TABLE_LIST *tables, List<TABLE_LIST> &leaves,
|
||||
bool select_insert, ulong want_access_first,
|
||||
ulong want_access, bool full_table_list)
|
||||
{
|
||||
DBUG_ENTER("setup_tables_and_check_access");
|
||||
|
||||
|
@ -1205,10 +1205,8 @@ extern "C" my_thread_id next_thread_id_noinline()
|
||||
#endif
|
||||
|
||||
|
||||
const Type_handler *THD::type_handler_for_date() const
|
||||
const Type_handler *THD::type_handler_for_datetime() const
|
||||
{
|
||||
if (!(variables.sql_mode & MODE_ORACLE))
|
||||
return &type_handler_newdate;
|
||||
if (opt_mysql56_temporal_format)
|
||||
return &type_handler_datetime2;
|
||||
return &type_handler_datetime;
|
||||
|
@ -3418,7 +3418,7 @@ public:
|
||||
{
|
||||
return !MY_TEST(variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES);
|
||||
}
|
||||
const Type_handler *type_handler_for_date() const;
|
||||
const Type_handler *type_handler_for_datetime() const;
|
||||
bool timestamp_to_TIME(MYSQL_TIME *ltime, my_time_t ts,
|
||||
ulong sec_part, date_mode_t fuzzydate);
|
||||
inline my_time_t query_start() { return start_time; }
|
||||
|
@ -240,6 +240,7 @@ void
|
||||
st_parsing_options::reset()
|
||||
{
|
||||
allows_variable= TRUE;
|
||||
lookup_keywords_after_qualifier= false;
|
||||
}
|
||||
|
||||
|
||||
@ -1612,7 +1613,10 @@ int Lex_input_stream::lex_one_token(YYSTYPE *yylval, THD *thd)
|
||||
yylval->lex_str.str= (char*) get_ptr();
|
||||
yylval->lex_str.length= 1;
|
||||
c= yyGet(); // should be '.'
|
||||
next_state= MY_LEX_IDENT_START; // Next is ident (not keyword)
|
||||
if (lex->parsing_options.lookup_keywords_after_qualifier)
|
||||
next_state= MY_LEX_IDENT_OR_KEYWORD;
|
||||
else
|
||||
next_state= MY_LEX_IDENT_START; // Next is ident (not keyword)
|
||||
if (!ident_map[(uchar) yyPeek()]) // Probably ` or "
|
||||
next_state= MY_LEX_START;
|
||||
return((int) c);
|
||||
@ -4197,7 +4201,8 @@ bool st_select_lex::optimize_unflattened_subqueries(bool const_only)
|
||||
sl->options|= SELECT_DESCRIBE;
|
||||
inner_join->select_options|= SELECT_DESCRIBE;
|
||||
}
|
||||
res= inner_join->optimize();
|
||||
if ((res= inner_join->optimize()))
|
||||
return TRUE;
|
||||
if (!inner_join->cleaned)
|
||||
sl->update_used_tables();
|
||||
sl->update_correlated_cache();
|
||||
@ -10418,3 +10423,31 @@ Spvar_definition *LEX::row_field_name(THD *thd, const Lex_ident_sys_st &name)
|
||||
init_last_field(res, &name, thd->variables.collation_database);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
bool LEX::map_data_type(const Lex_ident_sys_st &schema_name,
|
||||
Lex_field_type_st *type) const
|
||||
{
|
||||
const Schema *schema= schema_name.str ?
|
||||
Schema::find_by_name(schema_name) :
|
||||
Schema::find_implied(thd);
|
||||
if (!schema)
|
||||
{
|
||||
char buf[128];
|
||||
const Name type_name= type->type_handler()->name();
|
||||
my_snprintf(buf, sizeof(buf), "%.*s.%.*s",
|
||||
(int) schema_name.length, schema_name.str,
|
||||
(int) type_name.length(), type_name.ptr());
|
||||
#if MYSQL_VERSION_ID > 100500
|
||||
#error Please remove the old code
|
||||
my_error(ER_UNKNOWN_DATA_TYPE, MYF(0), buf);
|
||||
#else
|
||||
my_printf_error(ER_UNKNOWN_ERROR, "Unknown data type: '%-.64s'",
|
||||
MYF(0), buf);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
const Type_handler *mapped= schema->map_data_type(thd, type->type_handler());
|
||||
type->set_handler(mapped);
|
||||
return false;
|
||||
}
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "sp.h" // enum stored_procedure_type
|
||||
#include "sql_tvc.h"
|
||||
#include "item.h"
|
||||
#include "sql_schema.h"
|
||||
|
||||
/* Used for flags of nesting constructs */
|
||||
#define SELECT_NESTING_MAP_SIZE 64
|
||||
@ -2301,6 +2302,7 @@ private:
|
||||
struct st_parsing_options
|
||||
{
|
||||
bool allows_variable;
|
||||
bool lookup_keywords_after_qualifier;
|
||||
|
||||
st_parsing_options() { reset(); }
|
||||
void reset();
|
||||
@ -4524,6 +4526,9 @@ public:
|
||||
const LEX_CSTRING &soname);
|
||||
Spvar_definition *row_field_name(THD *thd, const Lex_ident_sys_st &name);
|
||||
|
||||
bool map_data_type(const Lex_ident_sys_st &schema,
|
||||
Lex_field_type_st *type) const;
|
||||
|
||||
void mark_first_table_as_inserting();
|
||||
};
|
||||
|
||||
|
80
sql/sql_schema.cc
Normal file
80
sql/sql_schema.cc
Normal file
@ -0,0 +1,80 @@
|
||||
/*
|
||||
Copyright (c) 2020, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; version 2 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
|
||||
|
||||
#include "mariadb.h"
|
||||
#include "sql_type.h"
|
||||
#include "sql_schema.h"
|
||||
#include "sql_class.h"
|
||||
|
||||
class Schema_oracle: public Schema
|
||||
{
|
||||
public:
|
||||
Schema_oracle(const LEX_CSTRING &name)
|
||||
:Schema(name)
|
||||
{ }
|
||||
const Type_handler *map_data_type(THD *thd, const Type_handler *src)
|
||||
const
|
||||
{
|
||||
if (src == &type_handler_newdate)
|
||||
return thd->type_handler_for_datetime();
|
||||
return src;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class Schema_maxdb: public Schema
|
||||
{
|
||||
public:
|
||||
Schema_maxdb(const LEX_CSTRING &name)
|
||||
:Schema(name)
|
||||
{ }
|
||||
const Type_handler *map_data_type(THD *thd, const Type_handler *src)
|
||||
const
|
||||
{
|
||||
if (src == &type_handler_timestamp ||
|
||||
src == &type_handler_timestamp2)
|
||||
return thd->type_handler_for_datetime();
|
||||
return src;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
Schema mariadb_schema(Lex_cstring(STRING_WITH_LEN("mariadb_schema")));
|
||||
Schema_oracle oracle_schema(Lex_cstring(STRING_WITH_LEN("oracle_schema")));
|
||||
Schema_maxdb maxdb_schema(Lex_cstring(STRING_WITH_LEN("maxdb_schema")));
|
||||
|
||||
|
||||
Schema *Schema::find_by_name(const LEX_CSTRING &name)
|
||||
{
|
||||
DBUG_ASSERT(name.str);
|
||||
if (mariadb_schema.eq_name(name))
|
||||
return &mariadb_schema;
|
||||
if (oracle_schema.eq_name(name))
|
||||
return &oracle_schema;
|
||||
if (maxdb_schema.eq_name(name))
|
||||
return &maxdb_schema;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
Schema *Schema::find_implied(THD *thd)
|
||||
{
|
||||
if (thd->variables.sql_mode & MODE_ORACLE)
|
||||
return &oracle_schema;
|
||||
if (thd->variables.sql_mode & MODE_MAXDB)
|
||||
return &maxdb_schema;
|
||||
return &mariadb_schema;
|
||||
}
|
70
sql/sql_schema.h
Normal file
70
sql/sql_schema.h
Normal file
@ -0,0 +1,70 @@
|
||||
#ifndef SQL_SCHEMA_H_INCLUDED
|
||||
#define SQL_SCHEMA_H_INCLUDED
|
||||
/*
|
||||
Copyright (c) 2020, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; version 2 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
|
||||
|
||||
#include "mysqld.h"
|
||||
#include "lex_string.h"
|
||||
|
||||
class Schema
|
||||
{
|
||||
LEX_CSTRING m_name;
|
||||
public:
|
||||
Schema(const LEX_CSTRING &name)
|
||||
:m_name(name)
|
||||
{ }
|
||||
virtual ~Schema() { }
|
||||
const LEX_CSTRING &name() const { return m_name; }
|
||||
virtual const Type_handler *map_data_type(THD *thd, const Type_handler *src)
|
||||
const
|
||||
{
|
||||
return src;
|
||||
}
|
||||
/*
|
||||
For now we have *hard-coded* compatibility schemas:
|
||||
schema_mariadb, schema_oracle, schema_maxdb.
|
||||
But eventually we'll turn then into real databases on disk.
|
||||
So the code below compares names according to the filesystem
|
||||
case sensitivity, like it is done for regular databases.
|
||||
|
||||
Note, this is different to information_schema, whose name
|
||||
is always case insensitive. This is intentional!
|
||||
The assymetry will be gone when we'll implement SQL standard
|
||||
regular and delimited identifiers.
|
||||
*/
|
||||
bool eq_name(const LEX_CSTRING &name) const
|
||||
{
|
||||
#if MYSQL_VERSION_ID > 100500
|
||||
#error Remove the old code
|
||||
return !table_alias_charset->strnncoll(m_name.str, m_name.length,
|
||||
name.str, name.length);
|
||||
#else
|
||||
// Please remove this when merging to 10.5
|
||||
return !table_alias_charset->coll->strnncoll(table_alias_charset,
|
||||
(const uchar *) m_name.str,
|
||||
m_name.length,
|
||||
(const uchar *) name.str,
|
||||
name.length, FALSE);
|
||||
#endif
|
||||
}
|
||||
static Schema *find_by_name(const LEX_CSTRING &name);
|
||||
static Schema *find_implied(THD *thd);
|
||||
};
|
||||
|
||||
|
||||
extern Schema mariadb_schema;
|
||||
|
||||
#endif // SQL_SCHEMA_H_INCLUDED
|
@ -1547,7 +1547,6 @@ err:
|
||||
bool JOIN::build_explain()
|
||||
{
|
||||
DBUG_ENTER("JOIN::build_explain");
|
||||
create_explain_query_if_not_exists(thd->lex, thd->mem_root);
|
||||
have_query_plan= QEP_AVAILABLE;
|
||||
|
||||
/*
|
||||
@ -1589,6 +1588,7 @@ bool JOIN::build_explain()
|
||||
int JOIN::optimize()
|
||||
{
|
||||
int res= 0;
|
||||
create_explain_query_if_not_exists(thd->lex, thd->mem_root);
|
||||
join_optimization_state init_state= optimization_state;
|
||||
if (select_lex->pushdown_select)
|
||||
{
|
||||
|
@ -2232,6 +2232,13 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
|
||||
append_identifier(thd, packet, &field->field_name);
|
||||
packet->append(' ');
|
||||
|
||||
const Type_handler *th= field->type_handler();
|
||||
const Schema *implied_schema= Schema::find_implied(thd);
|
||||
if (th != implied_schema->map_data_type(thd, th))
|
||||
{
|
||||
packet->append(th->schema()->name(), system_charset_info);
|
||||
packet->append(STRING_WITH_LEN("."), system_charset_info);
|
||||
}
|
||||
type.set(tmp, sizeof(tmp), system_charset_info);
|
||||
field->sql_type(type);
|
||||
packet->append(type.ptr(), type.length(), system_charset_info);
|
||||
|
@ -912,6 +912,10 @@ public:
|
||||
|
||||
// Append with optional character set conversion from cs to charset()
|
||||
bool append(const char *s, size_t arg_length, CHARSET_INFO *cs);
|
||||
bool append(const LEX_CSTRING &s, CHARSET_INFO *cs)
|
||||
{
|
||||
return append(s.str, s.length, cs);
|
||||
}
|
||||
|
||||
void strip_sp();
|
||||
friend int sortcmp(const String *a,const String *b, CHARSET_INFO *cs);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user