Merge branch '10.2' into 10.3
This commit is contained in:
commit
c32f71af7e
@ -31,7 +31,6 @@ SET(HAVE_BACKTRACE_SYMBOLS CACHE INTERNAL "")
|
|||||||
SET(HAVE_BACKTRACE_SYMBOLS_FD CACHE INTERNAL "")
|
SET(HAVE_BACKTRACE_SYMBOLS_FD CACHE INTERNAL "")
|
||||||
SET(HAVE_BFILL CACHE INTERNAL "")
|
SET(HAVE_BFILL CACHE INTERNAL "")
|
||||||
SET(HAVE_BSD_SIGNALS CACHE INTERNAL "")
|
SET(HAVE_BSD_SIGNALS CACHE INTERNAL "")
|
||||||
SET(HAVE_BSS_START CACHE INTERNAL "")
|
|
||||||
SET(HAVE_CLOCK_GETTIME CACHE INTERNAL "")
|
SET(HAVE_CLOCK_GETTIME CACHE INTERNAL "")
|
||||||
SET(HAVE_COMPRESS CACHE INTERNAL "")
|
SET(HAVE_COMPRESS CACHE INTERNAL "")
|
||||||
SET(HAVE_CRYPT CACHE INTERNAL "")
|
SET(HAVE_CRYPT CACHE INTERNAL "")
|
||||||
|
@ -243,7 +243,6 @@
|
|||||||
/* Symbols we may use */
|
/* Symbols we may use */
|
||||||
#cmakedefine HAVE_SYS_ERRLIST 1
|
#cmakedefine HAVE_SYS_ERRLIST 1
|
||||||
/* used by stacktrace functions */
|
/* used by stacktrace functions */
|
||||||
#cmakedefine HAVE_BSS_START 1
|
|
||||||
#cmakedefine HAVE_BACKTRACE 1
|
#cmakedefine HAVE_BACKTRACE 1
|
||||||
#cmakedefine HAVE_BACKTRACE_SYMBOLS 1
|
#cmakedefine HAVE_BACKTRACE_SYMBOLS 1
|
||||||
#cmakedefine HAVE_BACKTRACE_SYMBOLS_FD 1
|
#cmakedefine HAVE_BACKTRACE_SYMBOLS_FD 1
|
||||||
|
@ -802,14 +802,6 @@ CHECK_CXX_SOURCE_COMPILES("
|
|||||||
HAVE_ABI_CXA_DEMANGLE)
|
HAVE_ABI_CXA_DEMANGLE)
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
CHECK_C_SOURCE_COMPILES("
|
|
||||||
int main(int argc, char **argv)
|
|
||||||
{
|
|
||||||
extern char *__bss_start;
|
|
||||||
return __bss_start ? 1 : 0;
|
|
||||||
}"
|
|
||||||
HAVE_BSS_START)
|
|
||||||
|
|
||||||
CHECK_C_SOURCE_COMPILES("
|
CHECK_C_SOURCE_COMPILES("
|
||||||
int main()
|
int main()
|
||||||
{
|
{
|
||||||
|
@ -1 +1 @@
|
|||||||
Subproject commit ce74fd0c4009ed9f4bcbdb4a01e96c823e961dc3
|
Subproject commit a746c3af449a8754e78ad7971e59e79af7957cdb
|
@ -1,6 +1,6 @@
|
|||||||
'\" t
|
'\" t
|
||||||
.\"
|
.\"
|
||||||
.TH "\FBMYSQL_UPGRADE\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
|
.TH "\FBMYSQL_UPGRADE\FR" "1" "20 July 2020" "MariaDB 10\&.3" "MariaDB Database System"
|
||||||
.\" -----------------------------------------------------------------
|
.\" -----------------------------------------------------------------
|
||||||
.\" * set default formatting
|
.\" * set default formatting
|
||||||
.\" -----------------------------------------------------------------
|
.\" -----------------------------------------------------------------
|
||||||
@ -165,6 +165,8 @@ in the data directory\&. This is used to quickly check whether all tables have b
|
|||||||
\fB\-\-force\fR
|
\fB\-\-force\fR
|
||||||
option\&.
|
option\&.
|
||||||
.PP
|
.PP
|
||||||
|
For this reason, \fBmysql_upgrade\fR needs to be run as a user with write access to the data directory\&.
|
||||||
|
.PP
|
||||||
If you install MariaDB from RPM packages on Linux, you must install the server and client RPMs\&.
|
If you install MariaDB from RPM packages on Linux, you must install the server and client RPMs\&.
|
||||||
\fBmysql_upgrade\fR
|
\fBmysql_upgrade\fR
|
||||||
is included in the server RPM but requires the client RPM because the latter includes
|
is included in the server RPM but requires the client RPM because the latter includes
|
||||||
|
@ -1186,13 +1186,13 @@ i count(*) std(e1/e2)
|
|||||||
3 4 0.00000000
|
3 4 0.00000000
|
||||||
select std(s1/s2) from bug22555;
|
select std(s1/s2) from bug22555;
|
||||||
std(s1/s2)
|
std(s1/s2)
|
||||||
0.21325764
|
0.21328517
|
||||||
select std(o1/o2) from bug22555;
|
select std(o1/o2) from bug22555;
|
||||||
std(o1/o2)
|
std(o1/o2)
|
||||||
0.2132576358664934
|
0.2132576358664934
|
||||||
select std(e1/e2) from bug22555;
|
select std(e1/e2) from bug22555;
|
||||||
std(e1/e2)
|
std(e1/e2)
|
||||||
0.21325764
|
0.21328517
|
||||||
set @saved_div_precision_increment=@@div_precision_increment;
|
set @saved_div_precision_increment=@@div_precision_increment;
|
||||||
set div_precision_increment=19;
|
set div_precision_increment=19;
|
||||||
select i, count(*), std(s1/s2) from bug22555 group by i order by i;
|
select i, count(*), std(s1/s2) from bug22555 group by i order by i;
|
||||||
|
@ -25,6 +25,19 @@ ERROR HY000: Table 'user' was not locked with LOCK TABLES
|
|||||||
REVOKE PROCESS ON *.* FROM u;
|
REVOKE PROCESS ON *.* FROM u;
|
||||||
ERROR HY000: Table 'user' was not locked with LOCK TABLES
|
ERROR HY000: Table 'user' was not locked with LOCK TABLES
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
create database mysqltest1;
|
||||||
|
use mysqltest1;
|
||||||
|
create table t1(id int);
|
||||||
|
insert t1 values(2);
|
||||||
|
create user u1@localhost;
|
||||||
|
grant select on mysqltest1.t1 to u1@localhost;
|
||||||
|
grant update on mysqltest1.* to u1@localhost;
|
||||||
|
connect u1, localhost, u1;
|
||||||
|
update mysqltest1.t1 set id=1 where id=2;
|
||||||
|
connection default;
|
||||||
|
disconnect u1;
|
||||||
|
drop user u1@localhost;
|
||||||
|
drop database mysqltest1;
|
||||||
#
|
#
|
||||||
# MDEV-20076: SHOW GRANTS does not quote role names properly
|
# MDEV-20076: SHOW GRANTS does not quote role names properly
|
||||||
#
|
#
|
||||||
|
@ -34,6 +34,27 @@ REVOKE EXECUTE ON PROCEDURE sp FROM u;
|
|||||||
REVOKE PROCESS ON *.* FROM u;
|
REVOKE PROCESS ON *.* FROM u;
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
#
|
||||||
|
# MDEV-23010 UPDATE privilege at Database and Table level fail to update with SELECT command denied to user
|
||||||
|
#
|
||||||
|
create database mysqltest1;
|
||||||
|
use mysqltest1;
|
||||||
|
create table t1(id int);
|
||||||
|
insert t1 values(2);
|
||||||
|
create user u1@localhost;
|
||||||
|
grant select on mysqltest1.t1 to u1@localhost;
|
||||||
|
grant update on mysqltest1.* to u1@localhost;
|
||||||
|
connect u1, localhost, u1;
|
||||||
|
update mysqltest1.t1 set id=1 where id=2;
|
||||||
|
connection default;
|
||||||
|
disconnect u1;
|
||||||
|
drop user u1@localhost;
|
||||||
|
drop database mysqltest1;
|
||||||
|
|
||||||
|
#
|
||||||
|
# End of 10.1 tests
|
||||||
|
#
|
||||||
|
|
||||||
--echo #
|
--echo #
|
||||||
--echo # MDEV-20076: SHOW GRANTS does not quote role names properly
|
--echo # MDEV-20076: SHOW GRANTS does not quote role names properly
|
||||||
--echo #
|
--echo #
|
||||||
|
@ -619,7 +619,7 @@ select 4 - 3 * 2, (4 - 3) * 2, 4 - (3 * 2);
|
|||||||
Testing that / is left associative
|
Testing that / is left associative
|
||||||
select 15 / 5 / 3, (15 / 5) / 3, 15 / (5 / 3);
|
select 15 / 5 / 3, (15 / 5) / 3, 15 / (5 / 3);
|
||||||
15 / 5 / 3 (15 / 5) / 3 15 / (5 / 3)
|
15 / 5 / 3 (15 / 5) / 3 15 / (5 / 3)
|
||||||
1.00000000 1.00000000 9.0000
|
1.00000000 1.00000000 8.9998
|
||||||
Testing that / has precedence over |
|
Testing that / has precedence over |
|
||||||
select 105 / 5 | 2, (105 / 5) | 2, 105 / (5 | 2);
|
select 105 / 5 | 2, (105 / 5) | 2, 105 / (5 | 2);
|
||||||
105 / 5 | 2 (105 / 5) | 2 105 / (5 | 2)
|
105 / 5 | 2 (105 / 5) | 2 105 / (5 | 2)
|
||||||
|
@ -2759,5 +2759,45 @@ SELECT 1 FROM t1 WHERE a XOR 'a';
|
|||||||
1
|
1
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
#
|
#
|
||||||
|
# Bug #25207522: INCORRECT ORDER-BY BEHAVIOR ON A PARTITIONED TABLE
|
||||||
|
# WITH A COMPOSITE PREFIX INDEX
|
||||||
|
#
|
||||||
|
create table t1(id int unsigned not null,
|
||||||
|
data varchar(2) default null,
|
||||||
|
key data_idx (data(1),id)
|
||||||
|
) default charset=utf8
|
||||||
|
partition by range (id) (
|
||||||
|
partition p10 values less than (10),
|
||||||
|
partition p20 values less than (20)
|
||||||
|
);
|
||||||
|
insert t1 values (6, 'ab'), (4, 'ab'), (5, 'ab'), (16, 'ab'), (14, 'ab'), (15, 'ab'), (5, 'ac'), (15, 'aa') ;
|
||||||
|
select id from t1 where data = 'ab' order by id;
|
||||||
|
id
|
||||||
|
4
|
||||||
|
5
|
||||||
|
6
|
||||||
|
14
|
||||||
|
15
|
||||||
|
16
|
||||||
|
drop table t1;
|
||||||
|
create table t1(id int unsigned not null,
|
||||||
|
data text default null,
|
||||||
|
key data_idx (data(1),id)
|
||||||
|
) default charset=utf8
|
||||||
|
partition by range (id) (
|
||||||
|
partition p10 values less than (10),
|
||||||
|
partition p20 values less than (20)
|
||||||
|
);
|
||||||
|
insert t1 values (6, 'ab'), (4, 'ab'), (5, 'ab'), (16, 'ab'), (14, 'ab'), (15, 'ab'), (5, 'ac'), (15, 'aa') ;
|
||||||
|
select id from t1 where data = 'ab' order by id;
|
||||||
|
id
|
||||||
|
4
|
||||||
|
5
|
||||||
|
6
|
||||||
|
14
|
||||||
|
15
|
||||||
|
16
|
||||||
|
drop table t1;
|
||||||
|
#
|
||||||
# End of 10.1 tests
|
# End of 10.1 tests
|
||||||
#
|
#
|
||||||
|
@ -2973,6 +2973,34 @@ CREATE TABLE t1(a BINARY(80)) PARTITION BY KEY(a) PARTITIONS 3;
|
|||||||
SELECT 1 FROM t1 WHERE a XOR 'a';
|
SELECT 1 FROM t1 WHERE a XOR 'a';
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # Bug #25207522: INCORRECT ORDER-BY BEHAVIOR ON A PARTITIONED TABLE
|
||||||
|
--echo # WITH A COMPOSITE PREFIX INDEX
|
||||||
|
--echo #
|
||||||
|
create table t1(id int unsigned not null,
|
||||||
|
data varchar(2) default null,
|
||||||
|
key data_idx (data(1),id)
|
||||||
|
) default charset=utf8
|
||||||
|
partition by range (id) (
|
||||||
|
partition p10 values less than (10),
|
||||||
|
partition p20 values less than (20)
|
||||||
|
);
|
||||||
|
insert t1 values (6, 'ab'), (4, 'ab'), (5, 'ab'), (16, 'ab'), (14, 'ab'), (15, 'ab'), (5, 'ac'), (15, 'aa') ;
|
||||||
|
select id from t1 where data = 'ab' order by id;
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
|
create table t1(id int unsigned not null,
|
||||||
|
data text default null,
|
||||||
|
key data_idx (data(1),id)
|
||||||
|
) default charset=utf8
|
||||||
|
partition by range (id) (
|
||||||
|
partition p10 values less than (10),
|
||||||
|
partition p20 values less than (20)
|
||||||
|
);
|
||||||
|
insert t1 values (6, 'ab'), (4, 'ab'), (5, 'ab'), (16, 'ab'), (14, 'ab'), (15, 'ab'), (5, 'ac'), (15, 'aa') ;
|
||||||
|
select id from t1 where data = 'ab' order by id;
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
--echo #
|
--echo #
|
||||||
--echo # End of 10.1 tests
|
--echo # End of 10.1 tests
|
||||||
--echo #
|
--echo #
|
||||||
|
@ -9,5 +9,38 @@ ANALYZE TABLE t1;
|
|||||||
Table Op Msg_type Msg_text
|
Table Op Msg_type Msg_text
|
||||||
test.t1 analyze status Engine-independent statistics collected
|
test.t1 analyze status Engine-independent statistics collected
|
||||||
test.t1 analyze status OK
|
test.t1 analyze status OK
|
||||||
SET use_stat_tables = DEFAULT;
|
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
#
|
||||||
|
# MDEV-21472: ALTER TABLE ... ANALYZE PARTITION ... with EITS reads and locks all rows
|
||||||
|
#
|
||||||
|
CREATE TABLE t1 (
|
||||||
|
id int(11) auto_increment primary key,
|
||||||
|
c1 int(11) DEFAULT NULL
|
||||||
|
) PARTITION BY RANGE (id) (
|
||||||
|
PARTITION p0 VALUES LESS THAN (4),
|
||||||
|
PARTITION p1 VALUES LESS THAN MAXVALUE
|
||||||
|
);
|
||||||
|
insert into t1(c1) values (1),(1),(1),(1), (1),(1),(1),(1);
|
||||||
|
insert into t1(c1) select c1 from t1;
|
||||||
|
insert into t1(c1) select c1 from t1;
|
||||||
|
select count(*) from t1;
|
||||||
|
count(*)
|
||||||
|
32
|
||||||
|
select count(*) from t1 where id <4;
|
||||||
|
count(*)
|
||||||
|
3
|
||||||
|
flush status;
|
||||||
|
set session use_stat_tables='preferably';
|
||||||
|
# Must NOT show "Engine-independent statistics collected":
|
||||||
|
alter table t1 analyze partition p0;
|
||||||
|
Table Op Msg_type Msg_text
|
||||||
|
test.t1 analyze status Engine-independent statistics collected
|
||||||
|
test.t1 analyze status OK
|
||||||
|
# Should not have Handler_read_rnd_next=34
|
||||||
|
show session status like 'Handler_read_rnd%';
|
||||||
|
Variable_name Value
|
||||||
|
Handler_read_rnd 0
|
||||||
|
Handler_read_rnd_deleted 0
|
||||||
|
Handler_read_rnd_next 34
|
||||||
|
drop table t1;
|
||||||
|
SET use_stat_tables = DEFAULT;
|
||||||
|
@ -11,7 +11,33 @@ CREATE TABLE t1 (pk int PRIMARY KEY, a bit(1), INDEX idx(a)
|
|||||||
INSERT INTO t1 VALUES (1,1),(2,0),(3,0),(4,1);
|
INSERT INTO t1 VALUES (1,1),(2,0),(3,0),(4,1);
|
||||||
|
|
||||||
ANALYZE TABLE t1;
|
ANALYZE TABLE t1;
|
||||||
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-21472: ALTER TABLE ... ANALYZE PARTITION ... with EITS reads and locks all rows
|
||||||
|
--echo #
|
||||||
|
CREATE TABLE t1 (
|
||||||
|
id int(11) auto_increment primary key,
|
||||||
|
c1 int(11) DEFAULT NULL
|
||||||
|
) PARTITION BY RANGE (id) (
|
||||||
|
PARTITION p0 VALUES LESS THAN (4),
|
||||||
|
PARTITION p1 VALUES LESS THAN MAXVALUE
|
||||||
|
);
|
||||||
|
|
||||||
|
insert into t1(c1) values (1),(1),(1),(1), (1),(1),(1),(1);
|
||||||
|
insert into t1(c1) select c1 from t1;
|
||||||
|
insert into t1(c1) select c1 from t1;
|
||||||
|
|
||||||
|
select count(*) from t1;
|
||||||
|
select count(*) from t1 where id <4;
|
||||||
|
flush status;
|
||||||
|
set session use_stat_tables='preferably';
|
||||||
|
|
||||||
|
--echo # Must NOT show "Engine-independent statistics collected":
|
||||||
|
alter table t1 analyze partition p0;
|
||||||
|
|
||||||
|
--echo # Should not have Handler_read_rnd_next=34
|
||||||
|
show session status like 'Handler_read_rnd%';
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
SET use_stat_tables = DEFAULT;
|
SET use_stat_tables = DEFAULT;
|
||||||
|
|
||||||
DROP TABLE t1;
|
|
||||||
|
@ -2585,6 +2585,30 @@ e 2
|
|||||||
o 6
|
o 6
|
||||||
DROP TABLE t1, t2;
|
DROP TABLE t1, t2;
|
||||||
#
|
#
|
||||||
|
# MDEV-19232: Floating point precision / value comparison problem
|
||||||
|
#
|
||||||
|
CREATE TABLE t1 (region varchar(60), area decimal(10,0), population decimal(11,0));
|
||||||
|
INSERT INTO t1 VALUES ('Central America and the Caribbean',91,11797);
|
||||||
|
INSERT INTO t1 VALUES ('Central America and the Caribbean',442,66422);
|
||||||
|
SET @save_optimizer_switch=@@optimizer_switch;
|
||||||
|
SET optimizer_switch='subquery_cache=on';
|
||||||
|
SELECT
|
||||||
|
population, area, population/area,
|
||||||
|
cast(population/area as DECIMAL(20,9)) FROM t1 LIMIT 1;
|
||||||
|
population area population/area cast(population/area as DECIMAL(20,9))
|
||||||
|
11797 91 129.6374 129.637400000
|
||||||
|
SELECT * FROM t1 A
|
||||||
|
WHERE population/area = (SELECT MAX(population/area) from t1 B where A.region = B.region);
|
||||||
|
region area population
|
||||||
|
Central America and the Caribbean 442 66422
|
||||||
|
SET optimizer_switch='subquery_cache=off';
|
||||||
|
SELECT * FROM t1 A
|
||||||
|
WHERE population/area = (SELECT MAX(population/area) from t1 B where A.region = B.region);
|
||||||
|
region area population
|
||||||
|
Central America and the Caribbean 442 66422
|
||||||
|
SET @@optimizer_switch= @save_optimizer_switch;
|
||||||
|
DROP TABLE t1;
|
||||||
|
#
|
||||||
# MDEV-22852: SIGSEGV in sortlength (optimized builds)
|
# MDEV-22852: SIGSEGV in sortlength (optimized builds)
|
||||||
#
|
#
|
||||||
SET @save_optimizer_switch=@@optimizer_switch;
|
SET @save_optimizer_switch=@@optimizer_switch;
|
||||||
|
@ -2115,6 +2115,32 @@ EXPLAIN EXTENDED SELECT * FROM t1 where ( t1.l1 < ANY (SELECT MAX(t2.v1) FROM t
|
|||||||
SELECT * FROM t1 where ( t1.l1 < ANY (SELECT MAX(t2.v1) FROM t2));
|
SELECT * FROM t1 where ( t1.l1 < ANY (SELECT MAX(t2.v1) FROM t2));
|
||||||
DROP TABLE t1, t2;
|
DROP TABLE t1, t2;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-19232: Floating point precision / value comparison problem
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
CREATE TABLE t1 (region varchar(60), area decimal(10,0), population decimal(11,0));
|
||||||
|
INSERT INTO t1 VALUES ('Central America and the Caribbean',91,11797);
|
||||||
|
INSERT INTO t1 VALUES ('Central America and the Caribbean',442,66422);
|
||||||
|
|
||||||
|
SET @save_optimizer_switch=@@optimizer_switch;
|
||||||
|
SET optimizer_switch='subquery_cache=on';
|
||||||
|
|
||||||
|
SELECT
|
||||||
|
population, area, population/area,
|
||||||
|
cast(population/area as DECIMAL(20,9)) FROM t1 LIMIT 1;
|
||||||
|
|
||||||
|
SELECT * FROM t1 A
|
||||||
|
WHERE population/area = (SELECT MAX(population/area) from t1 B where A.region = B.region);
|
||||||
|
|
||||||
|
SET optimizer_switch='subquery_cache=off';
|
||||||
|
SELECT * FROM t1 A
|
||||||
|
WHERE population/area = (SELECT MAX(population/area) from t1 B where A.region = B.region);
|
||||||
|
|
||||||
|
SET @@optimizer_switch= @save_optimizer_switch;
|
||||||
|
|
||||||
|
DROP TABLE t1;
|
||||||
|
|
||||||
--echo #
|
--echo #
|
||||||
--echo # MDEV-22852: SIGSEGV in sortlength (optimized builds)
|
--echo # MDEV-22852: SIGSEGV in sortlength (optimized builds)
|
||||||
--echo #
|
--echo #
|
||||||
|
@ -972,4 +972,131 @@ id
|
|||||||
DROP PROCEDURE p1;
|
DROP PROCEDURE p1;
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
# End of 10.0 tests
|
# End of 10.0 tests
|
||||||
|
#
|
||||||
|
# MDEV-23221: A subquery causes crash
|
||||||
|
#
|
||||||
|
create table t1 (
|
||||||
|
location_code varchar(10),
|
||||||
|
country_id varchar(10)
|
||||||
|
);
|
||||||
|
insert into t1 values ('HKG', 'HK');
|
||||||
|
insert into t1 values ('NYC', 'US');
|
||||||
|
insert into t1 values ('LAX', 'US');
|
||||||
|
create table t2 (
|
||||||
|
container_id varchar(10),
|
||||||
|
cntr_activity_type varchar(10),
|
||||||
|
cntr_dest varchar(10)
|
||||||
|
);
|
||||||
|
insert into t2 values ('AAAA1111', 'VSL', 'NYC');
|
||||||
|
insert into t2 values ('AAAA1111', 'CUV', 'NYC');
|
||||||
|
insert into t2 values ('BBBB2222', 'VSL', 'LAX');
|
||||||
|
insert into t2 values ('BBBB2222', 'XYZ', 'LAX');
|
||||||
|
# Must not crash or return an error:
|
||||||
|
select
|
||||||
|
(select country_id from t1 where location_code = cl1.cntr_dest) as dest_cntry,
|
||||||
|
(select
|
||||||
|
max(container_id)
|
||||||
|
from t2 as cl2
|
||||||
|
where
|
||||||
|
cl2.container_id = cl1.container_id and
|
||||||
|
cl2.cntr_activity_type = 'CUV' and
|
||||||
|
exists (select location_code
|
||||||
|
from t1
|
||||||
|
where
|
||||||
|
location_code = cl2.cntr_dest and
|
||||||
|
country_id = dest_cntry)
|
||||||
|
) as CUV
|
||||||
|
from
|
||||||
|
t2 cl1;
|
||||||
|
dest_cntry CUV
|
||||||
|
US AAAA1111
|
||||||
|
US AAAA1111
|
||||||
|
US NULL
|
||||||
|
US NULL
|
||||||
|
prepare s from "select
|
||||||
|
(select country_id from t1 where location_code = cl1.cntr_dest) as dest_cntry,
|
||||||
|
(select
|
||||||
|
max(container_id)
|
||||||
|
from t2 as cl2
|
||||||
|
where
|
||||||
|
cl2.container_id = cl1.container_id and
|
||||||
|
cl2.cntr_activity_type = 'CUV' and
|
||||||
|
exists (select location_code
|
||||||
|
from t1
|
||||||
|
where
|
||||||
|
location_code = cl2.cntr_dest and
|
||||||
|
country_id = dest_cntry)
|
||||||
|
) as CUV
|
||||||
|
from
|
||||||
|
t2 cl1";
|
||||||
|
execute s;
|
||||||
|
dest_cntry CUV
|
||||||
|
US AAAA1111
|
||||||
|
US AAAA1111
|
||||||
|
US NULL
|
||||||
|
US NULL
|
||||||
|
execute s;
|
||||||
|
dest_cntry CUV
|
||||||
|
US AAAA1111
|
||||||
|
US AAAA1111
|
||||||
|
US NULL
|
||||||
|
US NULL
|
||||||
|
drop table t1,t2;
|
||||||
|
#
|
||||||
|
# MDEV-20557: SQL query with duplicate table aliases consistently crashes server
|
||||||
|
# (Just a testcase)
|
||||||
|
#
|
||||||
|
create table t1 (id int, id2 int);
|
||||||
|
create table t2 (id int, id2 int, a int);
|
||||||
|
create table t3 (id int);
|
||||||
|
create table t4 (id int);
|
||||||
|
select (select 1 from t1 where (exists
|
||||||
|
(select 1 from t2
|
||||||
|
where t2.a = (select t4.id from t4 where t4.id = t3.id) and t2.id2 = t1.id2))) dt
|
||||||
|
from t3;
|
||||||
|
ERROR 42000: This version of MariaDB doesn't yet support 'SUBQUERY in ROW in left expression of IN/ALL/ANY'
|
||||||
|
drop table t1,t2,t3,t4;
|
||||||
|
#
|
||||||
|
# MDEV-21649: Crash when using nested EXISTS
|
||||||
|
# (Just a testcase)
|
||||||
|
#
|
||||||
|
CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(id));
|
||||||
|
CREATE TABLE t2 (id INT NOT NULL AUTO_INCREMENT, ip_id INT, PRIMARY KEY(id));
|
||||||
|
CREATE TABLE t3 (id INT NOT NULL AUTO_INCREMENT, storage_method_id INT, storage_target_id INT, PRIMARY KEY(id));
|
||||||
|
SELECT
|
||||||
|
W0.`id`
|
||||||
|
FROM
|
||||||
|
`t1` W0
|
||||||
|
WHERE (
|
||||||
|
EXISTS(
|
||||||
|
SELECT
|
||||||
|
V0.`id`
|
||||||
|
FROM
|
||||||
|
`t2` V0
|
||||||
|
WHERE (
|
||||||
|
EXISTS(
|
||||||
|
SELECT
|
||||||
|
U0.`id`
|
||||||
|
FROM
|
||||||
|
`t2` U0
|
||||||
|
INNER JOIN `t3` U4 ON (U0.`id` = U4.`storage_target_id`)
|
||||||
|
WHERE (
|
||||||
|
U0.`ip_id` = V0.`ip_id`
|
||||||
|
AND U4.`storage_method_id` = (
|
||||||
|
SELECT
|
||||||
|
U5.`storage_method_id`
|
||||||
|
FROM
|
||||||
|
`t3` U5
|
||||||
|
WHERE
|
||||||
|
U5.`storage_target_id` = V0.`id`
|
||||||
|
LIMIT
|
||||||
|
1
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
id
|
||||||
|
drop table t1,t2,t3;
|
||||||
set optimizer_switch=default;
|
set optimizer_switch=default;
|
||||||
|
@ -829,5 +829,117 @@ DROP TABLE t1;
|
|||||||
|
|
||||||
--echo # End of 10.0 tests
|
--echo # End of 10.0 tests
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-23221: A subquery causes crash
|
||||||
|
--echo #
|
||||||
|
create table t1 (
|
||||||
|
location_code varchar(10),
|
||||||
|
country_id varchar(10)
|
||||||
|
);
|
||||||
|
insert into t1 values ('HKG', 'HK');
|
||||||
|
insert into t1 values ('NYC', 'US');
|
||||||
|
insert into t1 values ('LAX', 'US');
|
||||||
|
|
||||||
|
create table t2 (
|
||||||
|
container_id varchar(10),
|
||||||
|
cntr_activity_type varchar(10),
|
||||||
|
cntr_dest varchar(10)
|
||||||
|
);
|
||||||
|
insert into t2 values ('AAAA1111', 'VSL', 'NYC');
|
||||||
|
insert into t2 values ('AAAA1111', 'CUV', 'NYC');
|
||||||
|
insert into t2 values ('BBBB2222', 'VSL', 'LAX');
|
||||||
|
insert into t2 values ('BBBB2222', 'XYZ', 'LAX');
|
||||||
|
|
||||||
|
let $query=
|
||||||
|
select
|
||||||
|
(select country_id from t1 where location_code = cl1.cntr_dest) as dest_cntry,
|
||||||
|
(select
|
||||||
|
max(container_id)
|
||||||
|
from t2 as cl2
|
||||||
|
where
|
||||||
|
cl2.container_id = cl1.container_id and
|
||||||
|
cl2.cntr_activity_type = 'CUV' and
|
||||||
|
exists (select location_code
|
||||||
|
from t1
|
||||||
|
where
|
||||||
|
location_code = cl2.cntr_dest and
|
||||||
|
country_id = dest_cntry)
|
||||||
|
) as CUV
|
||||||
|
from
|
||||||
|
t2 cl1;
|
||||||
|
|
||||||
|
--echo # Must not crash or return an error:
|
||||||
|
eval $query;
|
||||||
|
|
||||||
|
eval prepare s from "$query";
|
||||||
|
execute s;
|
||||||
|
execute s;
|
||||||
|
|
||||||
|
drop table t1,t2;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-20557: SQL query with duplicate table aliases consistently crashes server
|
||||||
|
--echo # (Just a testcase)
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
create table t1 (id int, id2 int);
|
||||||
|
create table t2 (id int, id2 int, a int);
|
||||||
|
create table t3 (id int);
|
||||||
|
create table t4 (id int);
|
||||||
|
|
||||||
|
--error ER_NOT_SUPPORTED_YET
|
||||||
|
select (select 1 from t1 where (exists
|
||||||
|
(select 1 from t2
|
||||||
|
where t2.a = (select t4.id from t4 where t4.id = t3.id) and t2.id2 = t1.id2))) dt
|
||||||
|
from t3;
|
||||||
|
|
||||||
|
drop table t1,t2,t3,t4;
|
||||||
|
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-21649: Crash when using nested EXISTS
|
||||||
|
--echo # (Just a testcase)
|
||||||
|
--echo #
|
||||||
|
CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(id));
|
||||||
|
CREATE TABLE t2 (id INT NOT NULL AUTO_INCREMENT, ip_id INT, PRIMARY KEY(id));
|
||||||
|
CREATE TABLE t3 (id INT NOT NULL AUTO_INCREMENT, storage_method_id INT, storage_target_id INT, PRIMARY KEY(id));
|
||||||
|
|
||||||
|
SELECT
|
||||||
|
W0.`id`
|
||||||
|
FROM
|
||||||
|
`t1` W0
|
||||||
|
WHERE (
|
||||||
|
EXISTS(
|
||||||
|
SELECT
|
||||||
|
V0.`id`
|
||||||
|
FROM
|
||||||
|
`t2` V0
|
||||||
|
WHERE (
|
||||||
|
EXISTS(
|
||||||
|
SELECT
|
||||||
|
U0.`id`
|
||||||
|
FROM
|
||||||
|
`t2` U0
|
||||||
|
INNER JOIN `t3` U4 ON (U0.`id` = U4.`storage_target_id`)
|
||||||
|
WHERE (
|
||||||
|
U0.`ip_id` = V0.`ip_id`
|
||||||
|
AND U4.`storage_method_id` = (
|
||||||
|
SELECT
|
||||||
|
U5.`storage_method_id`
|
||||||
|
FROM
|
||||||
|
`t3` U5
|
||||||
|
WHERE
|
||||||
|
U5.`storage_target_id` = V0.`id`
|
||||||
|
LIMIT
|
||||||
|
1
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
drop table t1,t2,t3;
|
||||||
|
|
||||||
#restore defaults
|
#restore defaults
|
||||||
set optimizer_switch=default;
|
set optimizer_switch=default;
|
||||||
|
@ -676,6 +676,27 @@ Warnings:
|
|||||||
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 2010e0
|
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 2010e0
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
#
|
#
|
||||||
|
# MDEV-23282 FLOAT(53,0) badly handles out-of-range values
|
||||||
|
#
|
||||||
|
CREATE OR REPLACE TABLE t1 (c1 FLOAT NOT NULL, c2 FLOAT NOT NULL);
|
||||||
|
INSERT IGNORE INTO t1 VALUES (1e+40, -1e+40);
|
||||||
|
Warnings:
|
||||||
|
Warning 1264 Out of range value for column 'c1' at row 1
|
||||||
|
Warning 1264 Out of range value for column 'c2' at row 1
|
||||||
|
SELECT c1, c2 FROM t1;
|
||||||
|
c1 c2
|
||||||
|
3.40282e38 -3.40282e38
|
||||||
|
DROP TABLE t1;
|
||||||
|
CREATE OR REPLACE TABLE t1 (c1 FLOAT(53,0) NOT NULL, c2 FLOAT(53,0) NOT NULL);
|
||||||
|
INSERT IGNORE INTO t1 VALUES (1e+40, -1e+40);
|
||||||
|
Warnings:
|
||||||
|
Warning 1264 Out of range value for column 'c1' at row 1
|
||||||
|
Warning 1264 Out of range value for column 'c2' at row 1
|
||||||
|
SELECT c1, c2 FROM t1;
|
||||||
|
c1 c2
|
||||||
|
340282346638528860000000000000000000000 -340282346638528860000000000000000000000
|
||||||
|
DROP TABLE t1;
|
||||||
|
#
|
||||||
# End of 10.1 tests
|
# End of 10.1 tests
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
|
@ -485,6 +485,20 @@ EXPLAIN EXTENDED SELECT * FROM t1 WHERE a=2010e0 AND a>=2010e0;
|
|||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-23282 FLOAT(53,0) badly handles out-of-range values
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
CREATE OR REPLACE TABLE t1 (c1 FLOAT NOT NULL, c2 FLOAT NOT NULL);
|
||||||
|
INSERT IGNORE INTO t1 VALUES (1e+40, -1e+40);
|
||||||
|
SELECT c1, c2 FROM t1;
|
||||||
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
CREATE OR REPLACE TABLE t1 (c1 FLOAT(53,0) NOT NULL, c2 FLOAT(53,0) NOT NULL);
|
||||||
|
INSERT IGNORE INTO t1 VALUES (1e+40, -1e+40);
|
||||||
|
SELECT c1, c2 FROM t1;
|
||||||
|
DROP TABLE t1;
|
||||||
|
|
||||||
--echo #
|
--echo #
|
||||||
--echo # End of 10.1 tests
|
--echo # End of 10.1 tests
|
||||||
--echo #
|
--echo #
|
||||||
|
@ -1532,11 +1532,8 @@ select (1.20396873 * 0.89550000 * 0.68000000 * 1.08721696 * 0.99500000 *
|
|||||||
1.01500000 * 1.01500000 * 0.99500000)
|
1.01500000 * 1.01500000 * 0.99500000)
|
||||||
0.81298807395367312459230693948000000000
|
0.81298807395367312459230693948000000000
|
||||||
create table t1 as select 5.05 / 0.014;
|
create table t1 as select 5.05 / 0.014;
|
||||||
Warnings:
|
|
||||||
Note 1265 Data truncated for column '5.05 / 0.014' at row 1
|
|
||||||
show warnings;
|
show warnings;
|
||||||
Level Code Message
|
Level Code Message
|
||||||
Note 1265 Data truncated for column '5.05 / 0.014' at row 1
|
|
||||||
show create table t1;
|
show create table t1;
|
||||||
Table Create Table
|
Table Create Table
|
||||||
t1 CREATE TABLE `t1` (
|
t1 CREATE TABLE `t1` (
|
||||||
@ -1651,8 +1648,6 @@ my_col
|
|||||||
0.12345678912345678912345678912345678912
|
0.12345678912345678912345678912345678912
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
CREATE TABLE t1 SELECT 1 / .123456789123456789123456789123456789123456789123456789123456789123456789123456789 AS my_col;
|
CREATE TABLE t1 SELECT 1 / .123456789123456789123456789123456789123456789123456789123456789123456789123456789 AS my_col;
|
||||||
Warnings:
|
|
||||||
Note 1265 Data truncated for column 'my_col' at row 1
|
|
||||||
DESCRIBE t1;
|
DESCRIBE t1;
|
||||||
Field Type Null Key Default Extra
|
Field Type Null Key Default Extra
|
||||||
my_col decimal(65,4) YES NULL
|
my_col decimal(65,4) YES NULL
|
||||||
|
@ -91,8 +91,6 @@ DROP INDEX test ON t1;
|
|||||||
insert into t1 values (10, 1,1,1,1,1,1,1,1,1,1,1,1,1,NULL,0,0,0,1,1,1,1,'one','one');
|
insert into t1 values (10, 1,1,1,1,1,1,1,1,1,1,1,1,1,NULL,0,0,0,1,1,1,1,'one','one');
|
||||||
insert into t1 values (NULL,2,2,2,2,2,2,2,2,2,2,2,2,2,NULL,NULL,NULL,NULL,NULL,NULL,2,2,'two','two,one');
|
insert into t1 values (NULL,2,2,2,2,2,2,2,2,2,2,2,2,2,NULL,NULL,NULL,NULL,NULL,NULL,2,2,'two','two,one');
|
||||||
insert ignore into t1 values (0,1/3,3,3,3,3,3,3,3,3,3,3,3,3,NULL,'19970303','10:10:10','19970303101010','','','','3',3,3);
|
insert ignore into t1 values (0,1/3,3,3,3,3,3,3,3,3,3,3,3,3,NULL,'19970303','10:10:10','19970303101010','','','','3',3,3);
|
||||||
Warnings:
|
|
||||||
Warning 1265 Data truncated for column 'string' at row 1
|
|
||||||
insert ignore into t1 values (0,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,NULL,19970807,080706,19970403090807,-1,-1,-1,'-1',-1,-1);
|
insert ignore into t1 values (0,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,NULL,19970807,080706,19970403090807,-1,-1,-1,'-1',-1,-1);
|
||||||
Warnings:
|
Warnings:
|
||||||
Warning 1264 Out of range value for column 'utiny' at row 1
|
Warning 1264 Out of range value for column 'utiny' at row 1
|
||||||
@ -130,7 +128,7 @@ select auto,string,tiny,short,medium,long_int,longlong,real_float,real_double,ut
|
|||||||
auto string tiny short medium long_int longlong real_float real_double utiny ushort umedium ulong ulonglong mod(floor(time_stamp/1000000),1000000)-mod(curdate(),1000000) date_field time_field date_time blob_col tinyblob_col mediumblob_col longblob_col
|
auto string tiny short medium long_int longlong real_float real_double utiny ushort umedium ulong ulonglong mod(floor(time_stamp/1000000),1000000)-mod(curdate(),1000000) date_field time_field date_time blob_col tinyblob_col mediumblob_col longblob_col
|
||||||
10 1 1 1 1 1 1 1.0 1.0000 1 00001 1 1 1 0 0000-00-00 00:00:00 0000-00-00 00:00:00 1 1 1 1
|
10 1 1 1 1 1 1 1.0 1.0000 1 00001 1 1 1 0 0000-00-00 00:00:00 0000-00-00 00:00:00 1 1 1 1
|
||||||
11 2 2 2 2 2 2 2.0 2.0000 2 00002 2 2 2 0 NULL NULL NULL NULL NULL 2 2
|
11 2 2 2 2 2 2 2.0 2.0000 2 00002 2 2 2 0 NULL NULL NULL NULL NULL 2 2
|
||||||
12 0.33333333 3 3 3 3 3 3.0 3.0000 3 00003 3 3 3 0 1997-03-03 10:10:10 1997-03-03 10:10:10 3
|
12 0.3333 3 3 3 3 3 3.0 3.0000 3 00003 3 3 3 0 1997-03-03 10:10:10 1997-03-03 10:10:10 3
|
||||||
13 -1 -1 -1 -1 -1 -1 -1.0 -1.0000 0 00000 0 0 0 0 1997-08-07 08:07:06 1997-04-03 09:08:07 -1 -1 -1 -1
|
13 -1 -1 -1 -1 -1 -1 -1.0 -1.0000 0 00000 0 0 0 0 1997-08-07 08:07:06 1997-04-03 09:08:07 -1 -1 -1 -1
|
||||||
14 -429496729 -128 -32768 -8388608 -2147483648 -4294967295 -4294967296.0 -4294967295.0000 0 00000 0 0 0 0 0000-00-00 00:00:00 0000-00-00 00:00:00 -4294967295 -4294967295 -4294967295 -4294967295
|
14 -429496729 -128 -32768 -8388608 -2147483648 -4294967295 -4294967296.0 -4294967295.0000 0 00000 0 0 0 0 0000-00-00 00:00:00 0000-00-00 00:00:00 -4294967295 -4294967295 -4294967295 -4294967295
|
||||||
15 4294967295 127 32767 8388607 2147483647 4294967295 4294967296.0 4294967295.0000 255 65535 16777215 4294967295 4294967295 0 0000-00-00 00:00:00 0000-00-00 00:00:00 4294967295 4294967295 4294967295 4294967295
|
15 4294967295 127 32767 8388607 2147483647 4294967295 4294967296.0 4294967295.0000 255 65535 16777215 4294967295 4294967295 0 0000-00-00 00:00:00 0000-00-00 00:00:00 4294967295 4294967295 4294967295 4294967295
|
||||||
@ -182,7 +180,7 @@ Warning 1265 Data truncated for column 'new_field' at row 7
|
|||||||
select * from t2;
|
select * from t2;
|
||||||
auto string mediumblob_col new_field
|
auto string mediumblob_col new_field
|
||||||
1 2 2 ne
|
1 2 2 ne
|
||||||
2 0.33333333 ne
|
2 0.3333 ne
|
||||||
3 -1 -1 ne
|
3 -1 -1 ne
|
||||||
4 -429496729 -4294967295 ne
|
4 -429496729 -4294967295 ne
|
||||||
5 4294967295 4294967295 ne
|
5 4294967295 4294967295 ne
|
||||||
|
@ -854,8 +854,7 @@ sub run_test_server ($$$) {
|
|||||||
My::CoreDump->show($core_file, $exe_mysqld, $opt_parallel);
|
My::CoreDump->show($core_file, $exe_mysqld, $opt_parallel);
|
||||||
|
|
||||||
# Limit number of core files saved
|
# Limit number of core files saved
|
||||||
if ($opt_max_save_core > 0 &&
|
if ($num_saved_cores >= $opt_max_save_core)
|
||||||
$num_saved_cores >= $opt_max_save_core)
|
|
||||||
{
|
{
|
||||||
mtr_report(" - deleting it, already saved",
|
mtr_report(" - deleting it, already saved",
|
||||||
"$opt_max_save_core");
|
"$opt_max_save_core");
|
||||||
@ -871,8 +870,7 @@ sub run_test_server ($$$) {
|
|||||||
},
|
},
|
||||||
$worker_savedir);
|
$worker_savedir);
|
||||||
|
|
||||||
if ($opt_max_save_datadir > 0 &&
|
if ($num_saved_datadir >= $opt_max_save_datadir)
|
||||||
$num_saved_datadir >= $opt_max_save_datadir)
|
|
||||||
{
|
{
|
||||||
mtr_report(" - skipping '$worker_savedir/'");
|
mtr_report(" - skipping '$worker_savedir/'");
|
||||||
rmtree($worker_savedir);
|
rmtree($worker_savedir);
|
||||||
@ -881,9 +879,9 @@ sub run_test_server ($$$) {
|
|||||||
{
|
{
|
||||||
mtr_report(" - saving '$worker_savedir/' to '$savedir/'");
|
mtr_report(" - saving '$worker_savedir/' to '$savedir/'");
|
||||||
rename($worker_savedir, $savedir);
|
rename($worker_savedir, $savedir);
|
||||||
|
$num_saved_datadir++;
|
||||||
}
|
}
|
||||||
resfile_print_test();
|
resfile_print_test();
|
||||||
$num_saved_datadir++;
|
|
||||||
$num_failed_test++ unless ($result->{retries} ||
|
$num_failed_test++ unless ($result->{retries} ||
|
||||||
$result->{exp_fail});
|
$result->{exp_fail});
|
||||||
|
|
||||||
@ -1440,6 +1438,17 @@ sub command_line_setup {
|
|||||||
report_option('verbose', $opt_verbose);
|
report_option('verbose', $opt_verbose);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Negative values aren't meaningful on integer options
|
||||||
|
foreach(grep(/=i$/, keys %options))
|
||||||
|
{
|
||||||
|
if (defined ${$options{$_}} &&
|
||||||
|
do { no warnings "numeric"; int ${$options{$_}} < 0})
|
||||||
|
{
|
||||||
|
my $v= (split /=/)[0];
|
||||||
|
die("$v doesn't accept a negative value:");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
# Find the absolute path to the test directory
|
# Find the absolute path to the test directory
|
||||||
$glob_mysql_test_dir= cwd();
|
$glob_mysql_test_dir= cwd();
|
||||||
if ($glob_mysql_test_dir =~ / /)
|
if ($glob_mysql_test_dir =~ / /)
|
||||||
@ -6557,12 +6566,12 @@ Options for debugging the product
|
|||||||
test(s)
|
test(s)
|
||||||
max-save-core Limit the number of core files saved (to avoid filling
|
max-save-core Limit the number of core files saved (to avoid filling
|
||||||
up disks for heavily crashing server). Defaults to
|
up disks for heavily crashing server). Defaults to
|
||||||
$opt_max_save_core, set to 0 for no limit. Set
|
$opt_max_save_core. Set its default with
|
||||||
it's default with MTR_MAX_SAVE_CORE
|
MTR_MAX_SAVE_CORE
|
||||||
max-save-datadir Limit the number of datadir saved (to avoid filling
|
max-save-datadir Limit the number of datadir saved (to avoid filling
|
||||||
up disks for heavily crashing server). Defaults to
|
up disks for heavily crashing server). Defaults to
|
||||||
$opt_max_save_datadir, set to 0 for no limit. Set
|
$opt_max_save_datadir. Set its default with
|
||||||
it's default with MTR_MAX_SAVE_DATADIR
|
MTR_MAX_SAVE_DATADIR
|
||||||
max-test-fail Limit the number of test failures before aborting
|
max-test-fail Limit the number of test failures before aborting
|
||||||
the current test run. Defaults to
|
the current test run. Defaults to
|
||||||
$opt_max_test_fail, set to 0 for no limit. Set
|
$opt_max_test_fail, set to 0 for no limit. Set
|
||||||
|
@ -1896,9 +1896,13 @@ Warnings:
|
|||||||
Warning 1264 Out of range value for column 'c1' at row 3
|
Warning 1264 Out of range value for column 'c1' at row 3
|
||||||
INSERT IGNORE INTO t5 VALUES('1e+52','-1e+52','1e+52',5),('1e-52','-1e-52','1e-52',6);
|
INSERT IGNORE INTO t5 VALUES('1e+52','-1e+52','1e+52',5),('1e-52','-1e-52','1e-52',6);
|
||||||
Warnings:
|
Warnings:
|
||||||
|
Warning 1264 Out of range value for column 'c1' at row 1
|
||||||
|
Warning 1264 Out of range value for column 'c2' at row 1
|
||||||
Warning 1264 Out of range value for column 'c3' at row 1
|
Warning 1264 Out of range value for column 'c3' at row 1
|
||||||
INSERT IGNORE INTO t5 VALUES('1e+53','-1e+53','1e+53',7),('1e-53','-1e-53','1e-53',8);
|
INSERT IGNORE INTO t5 VALUES('1e+53','-1e+53','1e+53',7),('1e-53','-1e-53','1e-53',8);
|
||||||
Warnings:
|
Warnings:
|
||||||
|
Warning 1264 Out of range value for column 'c1' at row 1
|
||||||
|
Warning 1264 Out of range value for column 'c2' at row 1
|
||||||
Warning 1264 Out of range value for column 'c3' at row 1
|
Warning 1264 Out of range value for column 'c3' at row 1
|
||||||
SELECT * FROM t5;
|
SELECT * FROM t5;
|
||||||
c1 c2 c3 c4
|
c1 c2 c3 c4
|
||||||
|
@ -31,6 +31,7 @@ a
|
|||||||
1
|
1
|
||||||
20
|
20
|
||||||
UPDATE t SET a=3 WHERE a=1;
|
UPDATE t SET a=3 WHERE a=1;
|
||||||
|
SET GLOBAL innodb_status_output= @@GLOBAL.innodb_status_output;
|
||||||
# Starting with MariaDB 10.2, innodb_read_only implies READ UNCOMMITTED.
|
# Starting with MariaDB 10.2, innodb_read_only implies READ UNCOMMITTED.
|
||||||
# In earlier versions, this would return the last committed version
|
# In earlier versions, this would return the last committed version
|
||||||
# (only a=3; no record for a=20)!
|
# (only a=3; no record for a=20)!
|
||||||
|
@ -59,6 +59,7 @@ SELECT * FROM t;
|
|||||||
UPDATE t SET a=3 WHERE a=1;
|
UPDATE t SET a=3 WHERE a=1;
|
||||||
--let $restart_parameters= --innodb-read-only
|
--let $restart_parameters= --innodb-read-only
|
||||||
--source include/restart_mysqld.inc
|
--source include/restart_mysqld.inc
|
||||||
|
SET GLOBAL innodb_status_output= @@GLOBAL.innodb_status_output;
|
||||||
--echo # Starting with MariaDB 10.2, innodb_read_only implies READ UNCOMMITTED.
|
--echo # Starting with MariaDB 10.2, innodb_read_only implies READ UNCOMMITTED.
|
||||||
--echo # In earlier versions, this would return the last committed version
|
--echo # In earlier versions, this would return the last committed version
|
||||||
--echo # (only a=3; no record for a=20)!
|
--echo # (only a=3; no record for a=20)!
|
||||||
|
15
mysql-test/suite/maria/encrypt-no-key.result
Normal file
15
mysql-test/suite/maria/encrypt-no-key.result
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
call mtr.add_suppression('Unknown key id 1. Can''t continue');
|
||||||
|
set global aria_encrypt_tables= 1;
|
||||||
|
create table t1 (pk int primary key, a int, key(a)) engine=aria transactional=1;
|
||||||
|
alter table t1 disable keys;
|
||||||
|
insert into t1 values (1,1);
|
||||||
|
alter table t1 enable keys;
|
||||||
|
ERROR HY000: Unknown key id 1. Can't continue!
|
||||||
|
repair table t1 use_frm;
|
||||||
|
Table Op Msg_type Msg_text
|
||||||
|
test.t1 repair warning Number of rows changed from 0 to 1
|
||||||
|
test.t1 repair Error Unknown key id 1. Can't continue!
|
||||||
|
test.t1 repair Error Unknown key id 1. Can't continue!
|
||||||
|
test.t1 repair status OK
|
||||||
|
drop table t1;
|
||||||
|
set global aria_encrypt_tables= default;
|
14
mysql-test/suite/maria/encrypt-no-key.test
Normal file
14
mysql-test/suite/maria/encrypt-no-key.test
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
#
|
||||||
|
# MDEV-18496 Crash when Aria encryption is enabled but plugin not available
|
||||||
|
#
|
||||||
|
call mtr.add_suppression('Unknown key id 1. Can''t continue');
|
||||||
|
|
||||||
|
set global aria_encrypt_tables= 1;
|
||||||
|
create table t1 (pk int primary key, a int, key(a)) engine=aria transactional=1;
|
||||||
|
alter table t1 disable keys;
|
||||||
|
insert into t1 values (1,1);
|
||||||
|
error 192;
|
||||||
|
alter table t1 enable keys;
|
||||||
|
repair table t1 use_frm;
|
||||||
|
drop table t1;
|
||||||
|
set global aria_encrypt_tables= default;
|
@ -1,3 +1,4 @@
|
|||||||
|
FLUSH TABLES;
|
||||||
#
|
#
|
||||||
# Bug#13737949: CRASH IN HA_PARTITION::INDEX_INIT
|
# Bug#13737949: CRASH IN HA_PARTITION::INDEX_INIT
|
||||||
# Bug#18694052: SERVER CRASH IN HA_PARTITION::INIT_RECORD_PRIORITY_QUEUE
|
# Bug#18694052: SERVER CRASH IN HA_PARTITION::INIT_RECORD_PRIORITY_QUEUE
|
||||||
|
@ -8,6 +8,10 @@
|
|||||||
# Crash tests don't work with embedded
|
# Crash tests don't work with embedded
|
||||||
--source include/not_embedded.inc
|
--source include/not_embedded.inc
|
||||||
|
|
||||||
|
# Make sure system tables are not open, as the test will kill the server
|
||||||
|
# and it will cause corruption errors in the log
|
||||||
|
FLUSH TABLES;
|
||||||
|
|
||||||
# Partitioning test that require debug features
|
# Partitioning test that require debug features
|
||||||
|
|
||||||
--echo #
|
--echo #
|
||||||
|
5
mysql-test/suite/roles/drop_current_role.result
Normal file
5
mysql-test/suite/roles/drop_current_role.result
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
create role r;
|
||||||
|
set role r;
|
||||||
|
drop role r;
|
||||||
|
revoke all on *.* from current_role;
|
||||||
|
ERROR OP000: Invalid role specification `r`
|
9
mysql-test/suite/roles/drop_current_role.test
Normal file
9
mysql-test/suite/roles/drop_current_role.test
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
--source include/not_embedded.inc
|
||||||
|
#
|
||||||
|
# MDEV-22521 Server crashes in traverse_role_graph_up or Assertion `user' fails in traverse_role_graph_impl
|
||||||
|
#
|
||||||
|
create role r;
|
||||||
|
set role r;
|
||||||
|
drop role r;
|
||||||
|
error ER_INVALID_ROLE;
|
||||||
|
revoke all on *.* from current_role;
|
@ -51,9 +51,9 @@ INSERT into t1(name, salary, income_tax) values('Record_2', 501, 501*2.5/1000);
|
|||||||
INSERT into t1(name, salary, income_tax) values('Record_3', 210, 210*2.5/1000);
|
INSERT into t1(name, salary, income_tax) values('Record_3', 210, 210*2.5/1000);
|
||||||
SELECT * from t1;
|
SELECT * from t1;
|
||||||
id name salary income_tax
|
id name salary income_tax
|
||||||
1 Record_1 100011 250.027
|
1 Record_1 100011 250.03
|
||||||
2 Record_2 501 1.2525
|
2 Record_2 501 1.25
|
||||||
3 Record_3 210 0.525
|
3 Record_3 210 0.53
|
||||||
connect test_con2, localhost, root,,;
|
connect test_con2, localhost, root,,;
|
||||||
connection test_con2;
|
connection test_con2;
|
||||||
## Verifying session & global value of variable ##
|
## Verifying session & global value of variable ##
|
||||||
@ -69,11 +69,11 @@ INSERT into t1(name, salary, income_tax) values('Record_5', 501, 501*2.5/1000);
|
|||||||
INSERT into t1(name, salary, income_tax) values('Record_6', 210, 210*2.5/1000);
|
INSERT into t1(name, salary, income_tax) values('Record_6', 210, 210*2.5/1000);
|
||||||
SELECT * from t1;
|
SELECT * from t1;
|
||||||
id name salary income_tax
|
id name salary income_tax
|
||||||
1 Record_1 100011 250.027
|
1 Record_1 100011 250.03
|
||||||
2 Record_2 501 1.2525
|
2 Record_2 501 1.25
|
||||||
3 Record_3 210 0.525
|
3 Record_3 210 0.53
|
||||||
4 Record_4 100011 250.027
|
4 Record_4 100011 250.028
|
||||||
5 Record_5 501 1.2525
|
5 Record_5 501 1.253
|
||||||
6 Record_6 210 0.525
|
6 Record_6 210 0.525
|
||||||
## Dropping table t1 ##
|
## Dropping table t1 ##
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
@ -29,7 +29,7 @@ set time_zone='+1:00';
|
|||||||
flush tables;
|
flush tables;
|
||||||
select * from t1;
|
select * from t1;
|
||||||
a b v
|
a b v
|
||||||
1 2 0.3333333330000000000
|
1 2 0.3333000000000000000
|
||||||
select * from t8;
|
select * from t8;
|
||||||
a b v
|
a b v
|
||||||
1234567890 2 2009-02-14 00:31:30
|
1234567890 2 2009-02-14 00:31:30
|
||||||
|
@ -34,19 +34,20 @@
|
|||||||
#include <execinfo.h>
|
#include <execinfo.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef __linux__
|
||||||
#define PTR_SANE(p) ((p) && (char*)(p) >= heap_start && (char*)(p) <= heap_end)
|
#define PTR_SANE(p) ((p) && (char*)(p) >= heap_start && (char*)(p) <= heap_end)
|
||||||
|
|
||||||
static char *heap_start;
|
static char *heap_start;
|
||||||
|
|
||||||
#if(defined HAVE_BSS_START) && !(defined __linux__)
|
|
||||||
extern char *__bss_start;
|
extern char *__bss_start;
|
||||||
#endif
|
#else
|
||||||
|
#define PTR_SANE(p) (p)
|
||||||
|
#endif /* __linux */
|
||||||
|
|
||||||
|
|
||||||
void my_init_stacktrace()
|
void my_init_stacktrace()
|
||||||
{
|
{
|
||||||
#if(defined HAVE_BSS_START) && !(defined __linux__)
|
#ifdef __linux__
|
||||||
heap_start = (char*) &__bss_start;
|
heap_start = (char*) &__bss_start;
|
||||||
#endif
|
#endif /* __linux */
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __linux__
|
#ifdef __linux__
|
||||||
@ -149,15 +150,15 @@ static int safe_print_str(const char *addr, size_t max_len)
|
|||||||
|
|
||||||
int my_safe_print_str(const char* val, size_t max_len)
|
int my_safe_print_str(const char* val, size_t max_len)
|
||||||
{
|
{
|
||||||
|
#ifdef __linux__
|
||||||
char *heap_end;
|
char *heap_end;
|
||||||
|
|
||||||
#ifdef __linux__
|
|
||||||
// Try and make use of /proc filesystem to safely print memory contents.
|
// Try and make use of /proc filesystem to safely print memory contents.
|
||||||
if (!safe_print_str(val, max_len))
|
if (!safe_print_str(val, max_len))
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
|
||||||
|
|
||||||
heap_end= (char*) sbrk(0);
|
heap_end= (char*) sbrk(0);
|
||||||
|
#endif
|
||||||
|
|
||||||
if (!PTR_SANE(val))
|
if (!PTR_SANE(val))
|
||||||
{
|
{
|
||||||
|
@ -67,6 +67,7 @@ Usage: $0 [OPTIONS]
|
|||||||
--cross-bootstrap For internal use. Used when building the MariaDB system
|
--cross-bootstrap For internal use. Used when building the MariaDB system
|
||||||
tables on a different host than the target.
|
tables on a different host than the target.
|
||||||
--datadir=path The path to the MariaDB data directory.
|
--datadir=path The path to the MariaDB data directory.
|
||||||
|
--no-defaults Don't read default options from any option file.
|
||||||
--defaults-extra-file=name
|
--defaults-extra-file=name
|
||||||
Read this file after the global files are read.
|
Read this file after the global files are read.
|
||||||
--defaults-file=name Only read default options from the given file name.
|
--defaults-file=name Only read default options from the given file name.
|
||||||
@ -79,8 +80,6 @@ Usage: $0 [OPTIONS]
|
|||||||
--help Display this help and exit.
|
--help Display this help and exit.
|
||||||
--ldata=path The path to the MariaDB data directory. Same as
|
--ldata=path The path to the MariaDB data directory. Same as
|
||||||
--datadir.
|
--datadir.
|
||||||
--no-defaults Don't read default options from any option file.
|
|
||||||
--defaults-file=path Read only this configuration file.
|
|
||||||
--rpm For internal use. This option is used by RPM files
|
--rpm For internal use. This option is used by RPM files
|
||||||
during the MariaDB installation process.
|
during the MariaDB installation process.
|
||||||
--skip-name-resolve Use IP addresses rather than hostnames when creating
|
--skip-name-resolve Use IP addresses rather than hostnames when creating
|
||||||
|
@ -292,7 +292,7 @@ uint my_datetime_binary_length(uint dec)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
On disk we store as unsigned number with DATETIMEF_INT_OFS offset,
|
On disk we store as unsigned number with DATETIMEF_INT_OFS offset,
|
||||||
for HA_KETYPE_BINARY compatibilty purposes.
|
for HA_KETYPE_BINARY compatibility purposes.
|
||||||
*/
|
*/
|
||||||
#define DATETIMEF_INT_OFS 0x8000000000LL
|
#define DATETIMEF_INT_OFS 0x8000000000LL
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@
|
|||||||
/*
|
/*
|
||||||
Action to perform at a synchronization point.
|
Action to perform at a synchronization point.
|
||||||
NOTE: This structure is moved around in memory by realloc(), qsort(),
|
NOTE: This structure is moved around in memory by realloc(), qsort(),
|
||||||
and memmove(). Do not add objects with non-trivial constuctors
|
and memmove(). Do not add objects with non-trivial constructors
|
||||||
or destructors, which might prevent moving of this structure
|
or destructors, which might prevent moving of this structure
|
||||||
with these functions.
|
with these functions.
|
||||||
*/
|
*/
|
||||||
@ -542,7 +542,7 @@ static void debug_sync_reset(THD *thd)
|
|||||||
@description
|
@description
|
||||||
Removing an action mainly means to decrement the ds_active counter.
|
Removing an action mainly means to decrement the ds_active counter.
|
||||||
But if the action is between other active action in the array, then
|
But if the action is between other active action in the array, then
|
||||||
the array needs to be shrinked. The active actions above the one to
|
the array needs to be shrunk. The active actions above the one to
|
||||||
be removed have to be moved down by one slot.
|
be removed have to be moved down by one slot.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -236,7 +236,7 @@ static File open_error_msg_file(const char *file_name, const char *language,
|
|||||||
MYF(0))) < 0)
|
MYF(0))) < 0)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
Trying pre-5.4 sematics of the --language parameter.
|
Trying pre-5.4 semantics of the --language parameter.
|
||||||
It included the language-specific part, e.g.:
|
It included the language-specific part, e.g.:
|
||||||
--language=/path/to/english/
|
--language=/path/to/english/
|
||||||
*/
|
*/
|
||||||
|
@ -75,8 +75,8 @@ int initialize_encryption_plugin(st_plugin_int *plugin)
|
|||||||
(struct st_mariadb_encryption*) plugin->plugin->info;
|
(struct st_mariadb_encryption*) plugin->plugin->info;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copmiler on Spark doesn't like the '?' operator here as it
|
Compiler on Spark doesn't like the '?' operator here as it
|
||||||
belives the (uint (*)...) implies the C++ call model.
|
believes the (uint (*)...) implies the C++ call model.
|
||||||
*/
|
*/
|
||||||
if (handle->crypt_ctx_size)
|
if (handle->crypt_ctx_size)
|
||||||
encryption_handler.encryption_ctx_size_func= handle->crypt_ctx_size;
|
encryption_handler.encryption_ctx_size_func= handle->crypt_ctx_size;
|
||||||
|
@ -159,7 +159,7 @@ Event_creation_ctx::load_from_db(THD *thd,
|
|||||||
/*************************************************************************/
|
/*************************************************************************/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Initiliazes dbname and name of an Event_queue_element_for_exec
|
Initializes dbname and name of an Event_queue_element_for_exec
|
||||||
object
|
object
|
||||||
|
|
||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
|
@ -672,7 +672,7 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data,
|
|||||||
DBUG_PRINT("info", ("name: %.*s", (int) parse_data->name.length,
|
DBUG_PRINT("info", ("name: %.*s", (int) parse_data->name.length,
|
||||||
parse_data->name.str));
|
parse_data->name.str));
|
||||||
|
|
||||||
DBUG_PRINT("info", ("check existance of an event with the same name"));
|
DBUG_PRINT("info", ("check existence of an event with the same name"));
|
||||||
if (!find_named_event(&parse_data->dbname, &parse_data->name, table))
|
if (!find_named_event(&parse_data->dbname, &parse_data->name, table))
|
||||||
{
|
{
|
||||||
if (thd->lex->create_info.or_replace())
|
if (thd->lex->create_info.or_replace())
|
||||||
|
@ -97,7 +97,7 @@ Event_parse_data::init_name(THD *thd, sp_name *spn)
|
|||||||
ENDS or AT is in the past, we are trying to create an event that
|
ENDS or AT is in the past, we are trying to create an event that
|
||||||
will never be executed. If it has ON COMPLETION NOT PRESERVE
|
will never be executed. If it has ON COMPLETION NOT PRESERVE
|
||||||
(default), then it would normally be dropped already, so on CREATE
|
(default), then it would normally be dropped already, so on CREATE
|
||||||
EVENT we give a warning, and do not create anyting. On ALTER EVENT
|
EVENT we give a warning, and do not create anything. On ALTER EVENT
|
||||||
we give a error, and do not change the event.
|
we give a error, and do not change the event.
|
||||||
|
|
||||||
If the event has ON COMPLETION PRESERVE, then we see if the event is
|
If the event has ON COMPLETION PRESERVE, then we see if the event is
|
||||||
@ -356,7 +356,7 @@ wrong_value:
|
|||||||
EVERY 5 MINUTE STARTS "2004-12-12 10:00:00" means that
|
EVERY 5 MINUTE STARTS "2004-12-12 10:00:00" means that
|
||||||
the event will be executed every 5 minutes but this will
|
the event will be executed every 5 minutes but this will
|
||||||
start at the date shown above. Expressions are possible :
|
start at the date shown above. Expressions are possible :
|
||||||
DATE_ADD(NOW(), INTERVAL 1 DAY) -- start tommorow at
|
DATE_ADD(NOW(), INTERVAL 1 DAY) -- start tomorrow at
|
||||||
same time.
|
same time.
|
||||||
|
|
||||||
RETURN VALUE
|
RETURN VALUE
|
||||||
@ -410,7 +410,7 @@ wrong_value:
|
|||||||
EVERY 5 MINUTE ENDS "2004-12-12 10:00:00" means that
|
EVERY 5 MINUTE ENDS "2004-12-12 10:00:00" means that
|
||||||
the event will be executed every 5 minutes but this will
|
the event will be executed every 5 minutes but this will
|
||||||
end at the date shown above. Expressions are possible :
|
end at the date shown above. Expressions are possible :
|
||||||
DATE_ADD(NOW(), INTERVAL 1 DAY) -- end tommorow at
|
DATE_ADD(NOW(), INTERVAL 1 DAY) -- end tomorrow at
|
||||||
same time.
|
same time.
|
||||||
|
|
||||||
RETURN VALUE
|
RETURN VALUE
|
||||||
|
@ -360,7 +360,7 @@ Event_queue::drop_matching_events(THD *thd, const LEX_CSTRING *pattern,
|
|||||||
We don't call mysql_cond_broadcast(&COND_queue_state);
|
We don't call mysql_cond_broadcast(&COND_queue_state);
|
||||||
If we remove the top event:
|
If we remove the top event:
|
||||||
1. The queue is empty. The scheduler will wake up at some time and
|
1. The queue is empty. The scheduler will wake up at some time and
|
||||||
realize that the queue is empty. If create_event() comes inbetween
|
realize that the queue is empty. If create_event() comes in between
|
||||||
it will signal the scheduler
|
it will signal the scheduler
|
||||||
2. The queue is not empty, but the next event after the previous top,
|
2. The queue is not empty, but the next event after the previous top,
|
||||||
won't be executed any time sooner than the element we removed. Hence,
|
won't be executed any time sooner than the element we removed. Hence,
|
||||||
|
@ -129,7 +129,7 @@ bool Events::check_if_system_tables_error()
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
Reconstructs interval expression from interval type and expression
|
Reconstructs interval expression from interval type and expression
|
||||||
value that is in form of a value of the smalles entity:
|
value that is in form of a value of the smallest entity:
|
||||||
For
|
For
|
||||||
YEAR_MONTH - expression is in months
|
YEAR_MONTH - expression is in months
|
||||||
DAY_MINUTE - expression is in minutes
|
DAY_MINUTE - expression is in minutes
|
||||||
|
105
sql/field.cc
105
sql/field.cc
@ -47,7 +47,7 @@
|
|||||||
#define MAX_EXPONENT 1024
|
#define MAX_EXPONENT 1024
|
||||||
|
|
||||||
/*****************************************************************************
|
/*****************************************************************************
|
||||||
Instansiate templates and static variables
|
Instantiate templates and static variables
|
||||||
*****************************************************************************/
|
*****************************************************************************/
|
||||||
|
|
||||||
static const char *zero_timestamp="0000-00-00 00:00:00.000000";
|
static const char *zero_timestamp="0000-00-00 00:00:00.000000";
|
||||||
@ -83,7 +83,7 @@ const char field_separator=',';
|
|||||||
/*
|
/*
|
||||||
Rules for merging different types of fields in UNION
|
Rules for merging different types of fields in UNION
|
||||||
|
|
||||||
NOTE: to avoid 256*256 table, gap in table types numeration is skiped
|
NOTE: to avoid 256*256 table, gap in table types numeration is skipped
|
||||||
following #defines describe that gap and how to canculate number of fields
|
following #defines describe that gap and how to canculate number of fields
|
||||||
and index of field in this array.
|
and index of field in this array.
|
||||||
*/
|
*/
|
||||||
@ -1470,7 +1470,7 @@ Item *Field_num::get_equal_zerofill_const_item(THD *thd, const Context &ctx,
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Contruct warning parameters using thd->no_errors
|
Construct warning parameters using thd->no_errors
|
||||||
to determine whether to generate or suppress warnings.
|
to determine whether to generate or suppress warnings.
|
||||||
We can get here in a query like this:
|
We can get here in a query like this:
|
||||||
SELECT COUNT(@@basedir);
|
SELECT COUNT(@@basedir);
|
||||||
@ -1518,7 +1518,7 @@ Value_source::Converter_string_to_number::check_edom_and_truncation(THD *thd,
|
|||||||
if (filter.want_warning_edom())
|
if (filter.want_warning_edom())
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
We can use err.ptr() here as ErrConvString is guranteed to put an
|
We can use err.ptr() here as ErrConvString is guaranteed to put an
|
||||||
end \0 here.
|
end \0 here.
|
||||||
*/
|
*/
|
||||||
THD *wthd= thd ? thd : current_thd;
|
THD *wthd= thd ? thd : current_thd;
|
||||||
@ -1550,7 +1550,7 @@ Value_source::Converter_string_to_number::check_edom_and_truncation(THD *thd,
|
|||||||
- found garbage at the end of the string.
|
- found garbage at the end of the string.
|
||||||
|
|
||||||
@param type Data type name (e.g. "decimal", "integer", "double")
|
@param type Data type name (e.g. "decimal", "integer", "double")
|
||||||
@param edom Indicates that the string-to-number routine retuned
|
@param edom Indicates that the string-to-number routine returned
|
||||||
an error code equivalent to EDOM (value out of domain),
|
an error code equivalent to EDOM (value out of domain),
|
||||||
i.e. the string fully consisted of garbage and the
|
i.e. the string fully consisted of garbage and the
|
||||||
conversion routine could not get any digits from it.
|
conversion routine could not get any digits from it.
|
||||||
@ -1613,7 +1613,7 @@ int Field_num::check_edom_and_truncation(const char *type, bool edom,
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Conver a string to an integer then check bounds.
|
Convert a string to an integer then check bounds.
|
||||||
|
|
||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
Field_num::get_int
|
Field_num::get_int
|
||||||
@ -2685,7 +2685,7 @@ int Field_decimal::store(const char *from_arg, size_t len, CHARSET_INFO *cs)
|
|||||||
We only have to generate warnings if count_cuted_fields is set.
|
We only have to generate warnings if count_cuted_fields is set.
|
||||||
This is to avoid extra checks of the number when they are not needed.
|
This is to avoid extra checks of the number when they are not needed.
|
||||||
Even if this flag is not set, it's OK to increment warnings, if
|
Even if this flag is not set, it's OK to increment warnings, if
|
||||||
it makes the code easer to read.
|
it makes the code easier to read.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (get_thd()->count_cuted_fields > CHECK_FIELD_EXPRESSION)
|
if (get_thd()->count_cuted_fields > CHECK_FIELD_EXPRESSION)
|
||||||
@ -2768,7 +2768,7 @@ int Field_decimal::store(const char *from_arg, size_t len, CHARSET_INFO *cs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Now write the formated number
|
Now write the formatted number
|
||||||
|
|
||||||
First the digits of the int_% parts.
|
First the digits of the int_% parts.
|
||||||
Do we have enough room to write these digits ?
|
Do we have enough room to write these digits ?
|
||||||
@ -3288,7 +3288,7 @@ int Field_new_decimal::store(const char *from, size_t length,
|
|||||||
If check_decimal() failed because of EDOM-alike error,
|
If check_decimal() failed because of EDOM-alike error,
|
||||||
(e.g. E_DEC_BAD_NUM), we have to initialize decimal_value to zero.
|
(e.g. E_DEC_BAD_NUM), we have to initialize decimal_value to zero.
|
||||||
Note: if check_decimal() failed because of truncation,
|
Note: if check_decimal() failed because of truncation,
|
||||||
decimal_value is alreay properly initialized.
|
decimal_value is already properly initialized.
|
||||||
*/
|
*/
|
||||||
my_decimal_set_zero(&decimal_value);
|
my_decimal_set_zero(&decimal_value);
|
||||||
/*
|
/*
|
||||||
@ -4757,11 +4757,12 @@ int truncate_double(double *nr, uint field_length, uint dec,
|
|||||||
{
|
{
|
||||||
uint order= field_length - dec;
|
uint order= field_length - dec;
|
||||||
uint step= array_elements(log_10) - 1;
|
uint step= array_elements(log_10) - 1;
|
||||||
max_value= 1.0;
|
double max_value_by_dec= 1.0;
|
||||||
for (; order > step; order-= step)
|
for (; order > step; order-= step)
|
||||||
max_value*= log_10[step];
|
max_value_by_dec*= log_10[step];
|
||||||
max_value*= log_10[order];
|
max_value_by_dec*= log_10[order];
|
||||||
max_value-= 1.0 / log_10[dec];
|
max_value_by_dec-= 1.0 / log_10[dec];
|
||||||
|
set_if_smaller(max_value, max_value_by_dec);
|
||||||
|
|
||||||
/* Check for infinity so we don't get NaN in calculations */
|
/* Check for infinity so we don't get NaN in calculations */
|
||||||
if (!std::isinf(res))
|
if (!std::isinf(res))
|
||||||
@ -5056,7 +5057,7 @@ Field_timestamp::Field_timestamp(uchar *ptr_arg, uint32 len_arg,
|
|||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
We mark the flag with TIMESTAMP_FLAG to indicate to the client that
|
We mark the flag with TIMESTAMP_FLAG to indicate to the client that
|
||||||
this field will be automaticly updated on insert.
|
this field will be automatically updated on insert.
|
||||||
*/
|
*/
|
||||||
flags|= TIMESTAMP_FLAG;
|
flags|= TIMESTAMP_FLAG;
|
||||||
if (unireg_check != TIMESTAMP_DN_FIELD)
|
if (unireg_check != TIMESTAMP_DN_FIELD)
|
||||||
@ -7509,7 +7510,7 @@ Field_string::unpack(uchar *to, const uchar *from, const uchar *from_end,
|
|||||||
with the real type. Since all allowable types have 0xF as most
|
with the real type. Since all allowable types have 0xF as most
|
||||||
significant bits of the metadata word, lengths <256 will not affect
|
significant bits of the metadata word, lengths <256 will not affect
|
||||||
the real type at all, while all other values will result in a
|
the real type at all, while all other values will result in a
|
||||||
non-existant type in the range 17-244.
|
non-existent type in the range 17-244.
|
||||||
|
|
||||||
@see Field_string::unpack
|
@see Field_string::unpack
|
||||||
|
|
||||||
@ -7695,8 +7696,7 @@ void Field_varstring::mark_unused_memory_as_defined()
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
int Field_varstring::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
|
int Field_varstring::cmp(const uchar *a_ptr, const uchar *b_ptr)
|
||||||
uint max_len)
|
|
||||||
{
|
{
|
||||||
uint a_length, b_length;
|
uint a_length, b_length;
|
||||||
int diff;
|
int diff;
|
||||||
@ -7711,8 +7711,8 @@ int Field_varstring::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
|
|||||||
a_length= uint2korr(a_ptr);
|
a_length= uint2korr(a_ptr);
|
||||||
b_length= uint2korr(b_ptr);
|
b_length= uint2korr(b_ptr);
|
||||||
}
|
}
|
||||||
set_if_smaller(a_length, max_len);
|
set_if_smaller(a_length, field_length);
|
||||||
set_if_smaller(b_length, max_len);
|
set_if_smaller(b_length, field_length);
|
||||||
diff= field_charset->coll->strnncollsp(field_charset,
|
diff= field_charset->coll->strnncollsp(field_charset,
|
||||||
a_ptr+
|
a_ptr+
|
||||||
length_bytes,
|
length_bytes,
|
||||||
@ -7724,6 +7724,43 @@ int Field_varstring::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static int cmp_str_prefix(const uchar *ua, size_t alen, const uchar *ub,
|
||||||
|
size_t blen, size_t prefix, CHARSET_INFO *cs)
|
||||||
|
{
|
||||||
|
const char *a= (char*)ua, *b= (char*)ub;
|
||||||
|
MY_STRCOPY_STATUS status;
|
||||||
|
prefix/= cs->mbmaxlen;
|
||||||
|
alen= cs->cset->well_formed_char_length(cs, a, a + alen, prefix, &status);
|
||||||
|
blen= cs->cset->well_formed_char_length(cs, b, b + blen, prefix, &status);
|
||||||
|
return cs->coll->strnncollsp(cs, ua, alen, ub, blen);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
int Field_varstring::cmp_prefix(const uchar *a_ptr, const uchar *b_ptr,
|
||||||
|
size_t prefix_len)
|
||||||
|
{
|
||||||
|
/* avoid expensive well_formed_char_length if possible */
|
||||||
|
if (prefix_len == table->field[field_index]->field_length)
|
||||||
|
return Field_varstring::cmp(a_ptr, b_ptr);
|
||||||
|
|
||||||
|
size_t a_length, b_length;
|
||||||
|
|
||||||
|
if (length_bytes == 1)
|
||||||
|
{
|
||||||
|
a_length= *a_ptr;
|
||||||
|
b_length= *b_ptr;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
a_length= uint2korr(a_ptr);
|
||||||
|
b_length= uint2korr(b_ptr);
|
||||||
|
}
|
||||||
|
return cmp_str_prefix(a_ptr+length_bytes, a_length, b_ptr+length_bytes,
|
||||||
|
b_length, prefix_len, field_charset);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@note
|
@note
|
||||||
varstring and blob keys are ALWAYS stored with a 2 byte length prefix
|
varstring and blob keys are ALWAYS stored with a 2 byte length prefix
|
||||||
@ -8235,8 +8272,7 @@ longlong Field_varstring_compressed::val_int(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int Field_varstring_compressed::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
|
int Field_varstring_compressed::cmp(const uchar *a_ptr, const uchar *b_ptr)
|
||||||
uint max_len)
|
|
||||||
{
|
{
|
||||||
String a, b;
|
String a, b;
|
||||||
uint a_length, b_length;
|
uint a_length, b_length;
|
||||||
@ -8255,11 +8291,6 @@ int Field_varstring_compressed::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
|
|||||||
uncompress(&a, &a, a_ptr + length_bytes, a_length);
|
uncompress(&a, &a, a_ptr + length_bytes, a_length);
|
||||||
uncompress(&b, &b, b_ptr + length_bytes, b_length);
|
uncompress(&b, &b, b_ptr + length_bytes, b_length);
|
||||||
|
|
||||||
if (a.length() > max_len)
|
|
||||||
a.length(max_len);
|
|
||||||
if (b.length() > max_len)
|
|
||||||
b.length(max_len);
|
|
||||||
|
|
||||||
return sortcmp(&a, &b, field_charset);
|
return sortcmp(&a, &b, field_charset);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -8494,16 +8525,24 @@ int Field_blob::cmp(const uchar *a,uint32 a_length, const uchar *b,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int Field_blob::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
|
int Field_blob::cmp(const uchar *a_ptr, const uchar *b_ptr)
|
||||||
uint max_length)
|
|
||||||
{
|
{
|
||||||
uchar *blob1,*blob2;
|
uchar *blob1,*blob2;
|
||||||
memcpy(&blob1, a_ptr+packlength, sizeof(char*));
|
memcpy(&blob1, a_ptr+packlength, sizeof(char*));
|
||||||
memcpy(&blob2, b_ptr+packlength, sizeof(char*));
|
memcpy(&blob2, b_ptr+packlength, sizeof(char*));
|
||||||
uint a_len= get_length(a_ptr), b_len= get_length(b_ptr);
|
size_t a_len= get_length(a_ptr), b_len= get_length(b_ptr);
|
||||||
set_if_smaller(a_len, max_length);
|
return cmp(blob1, (uint32)a_len, blob2, (uint32)b_len);
|
||||||
set_if_smaller(b_len, max_length);
|
}
|
||||||
return Field_blob::cmp(blob1,a_len,blob2,b_len);
|
|
||||||
|
|
||||||
|
int Field_blob::cmp_prefix(const uchar *a_ptr, const uchar *b_ptr,
|
||||||
|
size_t prefix_len)
|
||||||
|
{
|
||||||
|
uchar *blob1,*blob2;
|
||||||
|
memcpy(&blob1, a_ptr+packlength, sizeof(char*));
|
||||||
|
memcpy(&blob2, b_ptr+packlength, sizeof(char*));
|
||||||
|
size_t a_len= get_length(a_ptr), b_len= get_length(b_ptr);
|
||||||
|
return cmp_str_prefix(blob1, a_len, blob2, b_len, prefix_len, field_charset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -9923,7 +9962,7 @@ my_decimal *Field_bit::val_decimal(my_decimal *deciaml_value)
|
|||||||
The a and b pointer must be pointers to the field in a record
|
The a and b pointer must be pointers to the field in a record
|
||||||
(not the table->record[0] necessarily)
|
(not the table->record[0] necessarily)
|
||||||
*/
|
*/
|
||||||
int Field_bit::cmp_max(const uchar *a, const uchar *b, uint max_len)
|
int Field_bit::cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len)
|
||||||
{
|
{
|
||||||
my_ptrdiff_t a_diff= a - ptr;
|
my_ptrdiff_t a_diff= a - ptr;
|
||||||
my_ptrdiff_t b_diff= b - ptr;
|
my_ptrdiff_t b_diff= b - ptr;
|
||||||
|
32
sql/field.h
32
sql/field.h
@ -277,7 +277,7 @@ protected:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
// String-to-number convertion methods for the old code compatibility
|
// String-to-number conversion methods for the old code compatibility
|
||||||
longlong longlong_from_string_with_check(CHARSET_INFO *cs, const char *cptr,
|
longlong longlong_from_string_with_check(CHARSET_INFO *cs, const char *cptr,
|
||||||
const char *end) const
|
const char *end) const
|
||||||
{
|
{
|
||||||
@ -358,7 +358,7 @@ public:
|
|||||||
/*
|
/*
|
||||||
Item context attributes.
|
Item context attributes.
|
||||||
Comparison functions pass their attributes to propagate_equal_fields().
|
Comparison functions pass their attributes to propagate_equal_fields().
|
||||||
For exmple, for string comparison, the collation of the comparison
|
For example, for string comparison, the collation of the comparison
|
||||||
operation is important inside propagate_equal_fields().
|
operation is important inside propagate_equal_fields().
|
||||||
*/
|
*/
|
||||||
class Context
|
class Context
|
||||||
@ -469,7 +469,7 @@ inline bool is_temporal_type_with_date(enum_field_types type)
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Convert temporal real types as retuned by field->real_type()
|
Convert temporal real types as returned by field->real_type()
|
||||||
to field type as returned by field->type().
|
to field type as returned by field->type().
|
||||||
|
|
||||||
@param real_type Real type.
|
@param real_type Real type.
|
||||||
@ -1081,9 +1081,13 @@ public:
|
|||||||
return type();
|
return type();
|
||||||
}
|
}
|
||||||
inline int cmp(const uchar *str) { return cmp(ptr,str); }
|
inline int cmp(const uchar *str) { return cmp(ptr,str); }
|
||||||
virtual int cmp_max(const uchar *a, const uchar *b, uint max_len)
|
|
||||||
{ return cmp(a, b); }
|
|
||||||
virtual int cmp(const uchar *,const uchar *)=0;
|
virtual int cmp(const uchar *,const uchar *)=0;
|
||||||
|
/*
|
||||||
|
The following method is used for comparing prefix keys.
|
||||||
|
Currently it's only used in partitioning.
|
||||||
|
*/
|
||||||
|
virtual int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len)
|
||||||
|
{ return cmp(a, b); }
|
||||||
virtual int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0U)
|
virtual int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0U)
|
||||||
{ return memcmp(a,b,pack_length()); }
|
{ return memcmp(a,b,pack_length()); }
|
||||||
virtual int cmp_offset(uint row_offset)
|
virtual int cmp_offset(uint row_offset)
|
||||||
@ -3473,11 +3477,8 @@ public:
|
|||||||
longlong val_int(void);
|
longlong val_int(void);
|
||||||
String *val_str(String*,String *);
|
String *val_str(String*,String *);
|
||||||
my_decimal *val_decimal(my_decimal *);
|
my_decimal *val_decimal(my_decimal *);
|
||||||
int cmp_max(const uchar *, const uchar *, uint max_length);
|
int cmp(const uchar *a,const uchar *b);
|
||||||
int cmp(const uchar *a,const uchar *b)
|
int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len);
|
||||||
{
|
|
||||||
return cmp_max(a, b, ~0U);
|
|
||||||
}
|
|
||||||
void sort_string(uchar *buff,uint length);
|
void sort_string(uchar *buff,uint length);
|
||||||
uint get_key_image(uchar *buff,uint length, imagetype type);
|
uint get_key_image(uchar *buff,uint length, imagetype type);
|
||||||
void set_key_image(const uchar *buff,uint length);
|
void set_key_image(const uchar *buff,uint length);
|
||||||
@ -3541,7 +3542,7 @@ private:
|
|||||||
{
|
{
|
||||||
return (field_length - 1) / field_charset->mbmaxlen;
|
return (field_length - 1) / field_charset->mbmaxlen;
|
||||||
}
|
}
|
||||||
int cmp_max(const uchar *a_ptr, const uchar *b_ptr, uint max_len);
|
int cmp(const uchar *a_ptr, const uchar *b_ptr);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Compressed fields can't have keys as two rows may have different
|
Compressed fields can't have keys as two rows may have different
|
||||||
@ -3707,9 +3708,8 @@ public:
|
|||||||
longlong val_int(void);
|
longlong val_int(void);
|
||||||
String *val_str(String*,String *);
|
String *val_str(String*,String *);
|
||||||
my_decimal *val_decimal(my_decimal *);
|
my_decimal *val_decimal(my_decimal *);
|
||||||
int cmp_max(const uchar *, const uchar *, uint max_length);
|
int cmp(const uchar *a,const uchar *b);
|
||||||
int cmp(const uchar *a,const uchar *b)
|
int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len);
|
||||||
{ return cmp_max(a, b, ~0U); }
|
|
||||||
int cmp(const uchar *a, uint32 a_length, const uchar *b, uint32 b_length);
|
int cmp(const uchar *a, uint32 a_length, const uchar *b, uint32 b_length);
|
||||||
int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0U);
|
int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0U);
|
||||||
int key_cmp(const uchar *,const uchar*);
|
int key_cmp(const uchar *,const uchar*);
|
||||||
@ -4106,7 +4106,7 @@ private:
|
|||||||
This is the reason:
|
This is the reason:
|
||||||
- Field_bit::cmp_binary() is only implemented in the base class
|
- Field_bit::cmp_binary() is only implemented in the base class
|
||||||
(Field::cmp_binary()).
|
(Field::cmp_binary()).
|
||||||
- Field::cmp_binary() currenly use pack_length() to calculate how
|
- Field::cmp_binary() currently uses pack_length() to calculate how
|
||||||
long the data is.
|
long the data is.
|
||||||
- pack_length() includes size of the bits stored in the NULL bytes
|
- pack_length() includes size of the bits stored in the NULL bytes
|
||||||
of the record.
|
of the record.
|
||||||
@ -4165,7 +4165,7 @@ public:
|
|||||||
}
|
}
|
||||||
int cmp_binary_offset(uint row_offset)
|
int cmp_binary_offset(uint row_offset)
|
||||||
{ return cmp_offset(row_offset); }
|
{ return cmp_offset(row_offset); }
|
||||||
int cmp_max(const uchar *a, const uchar *b, uint max_length);
|
int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len);
|
||||||
int key_cmp(const uchar *a, const uchar *b)
|
int key_cmp(const uchar *a, const uchar *b)
|
||||||
{ return cmp_binary((uchar *) a, (uchar *) b); }
|
{ return cmp_binary((uchar *) a, (uchar *) b); }
|
||||||
int key_cmp(const uchar *str, uint length);
|
int key_cmp(const uchar *str, uint length);
|
||||||
|
@ -230,7 +230,7 @@ static void do_skip(Copy_field *copy __attribute__((unused)))
|
|||||||
|
|
||||||
note: if the record we're copying from is NULL-complemetned (i.e.
|
note: if the record we're copying from is NULL-complemetned (i.e.
|
||||||
from_field->table->null_row==1), it will also have all NULLable columns to be
|
from_field->table->null_row==1), it will also have all NULLable columns to be
|
||||||
set to NULLs, so we dont need to check table->null_row here.
|
set to NULLs, so we don't need to check table->null_row here.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void do_copy_null(Copy_field *copy)
|
static void do_copy_null(Copy_field *copy)
|
||||||
|
@ -862,12 +862,12 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
|
|||||||
}
|
}
|
||||||
if (!quick_select)
|
if (!quick_select)
|
||||||
{
|
{
|
||||||
(void) file->extra(HA_EXTRA_NO_CACHE); /* End cacheing of records */
|
(void) file->extra(HA_EXTRA_NO_CACHE); /* End caching of records */
|
||||||
if (!next_pos)
|
if (!next_pos)
|
||||||
file->ha_rnd_end();
|
file->ha_rnd_end();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Signal we should use orignal column read and write maps */
|
/* Signal we should use original column read and write maps */
|
||||||
sort_form->column_bitmaps_set(save_read_set, save_write_set, save_vcol_set);
|
sort_form->column_bitmaps_set(save_read_set, save_write_set, save_vcol_set);
|
||||||
|
|
||||||
if (unlikely(thd->is_error()))
|
if (unlikely(thd->is_error()))
|
||||||
|
@ -1877,7 +1877,7 @@ int Gcalc_scan_iterator::add_eq_node(Gcalc_heap::Info *node, point *sp)
|
|||||||
if (!en)
|
if (!en)
|
||||||
GCALC_DBUG_RETURN(1);
|
GCALC_DBUG_RETURN(1);
|
||||||
|
|
||||||
/* eq_node iserted after teh equal point. */
|
/* eq_node inserted after the equal point. */
|
||||||
en->next= node->get_next();
|
en->next= node->get_next();
|
||||||
node->next= en;
|
node->next= en;
|
||||||
|
|
||||||
|
@ -362,9 +362,9 @@ enum Gcalc_scan_events
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Gcalc_scan_iterator incapsulates the slisescan algorithm.
|
Gcalc_scan_iterator incapsulates the slicescan algorithm.
|
||||||
It takes filled Gcalc_heap as an datasource. Then can be
|
It takes filled Gcalc_heap as a datasource. Then can be
|
||||||
iterated trought the vertexes and intersection points with
|
iterated through the vertexes and intersection points with
|
||||||
the step() method. After the 'step()' one usually observes
|
the step() method. After the 'step()' one usually observes
|
||||||
the current 'slice' to do the necessary calculations, like
|
the current 'slice' to do the necessary calculations, like
|
||||||
looking for intersections, calculating the area, whatever.
|
looking for intersections, calculating the area, whatever.
|
||||||
|
@ -1184,14 +1184,14 @@ int Gcalc_operation_reducer::connect_threads(
|
|||||||
{
|
{
|
||||||
rp0->outer_poly= prev_range->thread_start;
|
rp0->outer_poly= prev_range->thread_start;
|
||||||
tb->thread_start= prev_range->thread_start;
|
tb->thread_start= prev_range->thread_start;
|
||||||
/* Chack if needed */
|
/* Check if needed */
|
||||||
ta->thread_start= prev_range->thread_start;
|
ta->thread_start= prev_range->thread_start;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
rp0->outer_poly= 0;
|
rp0->outer_poly= 0;
|
||||||
ta->thread_start= rp0;
|
ta->thread_start= rp0;
|
||||||
/* Chack if needed */
|
/* Check if needed */
|
||||||
tb->thread_start= rp0;
|
tb->thread_start= rp0;
|
||||||
}
|
}
|
||||||
GCALC_DBUG_RETURN(0);
|
GCALC_DBUG_RETURN(0);
|
||||||
|
@ -1480,7 +1480,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@brief Check and repair the table if neccesary
|
@brief Check and repair the table if necessary
|
||||||
|
|
||||||
@param thd Thread object
|
@param thd Thread object
|
||||||
|
|
||||||
@ -2967,7 +2967,7 @@ error:
|
|||||||
/**
|
/**
|
||||||
Read the .par file to get the partitions engines and names
|
Read the .par file to get the partitions engines and names
|
||||||
|
|
||||||
@param name Name of table file (without extention)
|
@param name Name of table file (without extension)
|
||||||
|
|
||||||
@return Operation status
|
@return Operation status
|
||||||
@retval true Failure
|
@retval true Failure
|
||||||
@ -3197,7 +3197,7 @@ static uchar *get_part_name(PART_NAME_DEF *part, size_t *length,
|
|||||||
|
|
||||||
@return Operation status
|
@return Operation status
|
||||||
@retval true Failure
|
@retval true Failure
|
||||||
@retval false Sucess
|
@retval false Success
|
||||||
*/
|
*/
|
||||||
|
|
||||||
bool ha_partition::insert_partition_name_in_hash(const char *name, uint part_id,
|
bool ha_partition::insert_partition_name_in_hash(const char *name, uint part_id,
|
||||||
@ -3323,7 +3323,7 @@ err:
|
|||||||
|
|
||||||
@return Operation status
|
@return Operation status
|
||||||
@retval true Failure
|
@retval true Failure
|
||||||
@retval false Sucess
|
@retval false Success
|
||||||
*/
|
*/
|
||||||
|
|
||||||
bool ha_partition::set_ha_share_ref(Handler_share **ha_share_arg)
|
bool ha_partition::set_ha_share_ref(Handler_share **ha_share_arg)
|
||||||
@ -4294,7 +4294,7 @@ int ha_partition::write_row(uchar * buf)
|
|||||||
/*
|
/*
|
||||||
If we have failed to set the auto-increment value for this row,
|
If we have failed to set the auto-increment value for this row,
|
||||||
it is highly likely that we will not be able to insert it into
|
it is highly likely that we will not be able to insert it into
|
||||||
the correct partition. We must check and fail if neccessary.
|
the correct partition. We must check and fail if necessary.
|
||||||
*/
|
*/
|
||||||
if (unlikely(error))
|
if (unlikely(error))
|
||||||
goto exit;
|
goto exit;
|
||||||
@ -4365,7 +4365,7 @@ exit:
|
|||||||
have the previous row record in it, while new_data will have the newest
|
have the previous row record in it, while new_data will have the newest
|
||||||
data in it.
|
data in it.
|
||||||
Keep in mind that the server can do updates based on ordering if an
|
Keep in mind that the server can do updates based on ordering if an
|
||||||
ORDER BY clause was used. Consecutive ordering is not guarenteed.
|
ORDER BY clause was used. Consecutive ordering is not guaranteed.
|
||||||
|
|
||||||
Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
|
Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
|
||||||
new_data is always record[0]
|
new_data is always record[0]
|
||||||
@ -4498,7 +4498,7 @@ exit:
|
|||||||
(from either a previous rnd_xxx() or index_xxx() call).
|
(from either a previous rnd_xxx() or index_xxx() call).
|
||||||
If you keep a pointer to the last row or can access a primary key it will
|
If you keep a pointer to the last row or can access a primary key it will
|
||||||
make doing the deletion quite a bit easier.
|
make doing the deletion quite a bit easier.
|
||||||
Keep in mind that the server does no guarentee consecutive deletions.
|
Keep in mind that the server does no guarantee consecutive deletions.
|
||||||
ORDER BY clauses can be used.
|
ORDER BY clauses can be used.
|
||||||
|
|
||||||
Called in sql_acl.cc and sql_udf.cc to manage internal table information.
|
Called in sql_acl.cc and sql_udf.cc to manage internal table information.
|
||||||
@ -4880,7 +4880,7 @@ int ha_partition::end_bulk_insert()
|
|||||||
|
|
||||||
When scan is used we will scan one handler partition at a time.
|
When scan is used we will scan one handler partition at a time.
|
||||||
When preparing for rnd_pos we will init all handler partitions.
|
When preparing for rnd_pos we will init all handler partitions.
|
||||||
No extra cache handling is needed when scannning is not performed.
|
No extra cache handling is needed when scanning is not performed.
|
||||||
|
|
||||||
Before initialising we will call rnd_end to ensure that we clean up from
|
Before initialising we will call rnd_end to ensure that we clean up from
|
||||||
any previous incarnation of a table scan.
|
any previous incarnation of a table scan.
|
||||||
@ -8590,7 +8590,7 @@ static int end_keyread_cb(handler* h, void *unused)
|
|||||||
function after completing a query.
|
function after completing a query.
|
||||||
3) It is called when deleting the QUICK_RANGE_SELECT object if the
|
3) It is called when deleting the QUICK_RANGE_SELECT object if the
|
||||||
QUICK_RANGE_SELECT object had its own handler object. It is called
|
QUICK_RANGE_SELECT object had its own handler object. It is called
|
||||||
immediatley before close of this local handler object.
|
immediately before close of this local handler object.
|
||||||
HA_EXTRA_KEYREAD:
|
HA_EXTRA_KEYREAD:
|
||||||
HA_EXTRA_NO_KEYREAD:
|
HA_EXTRA_NO_KEYREAD:
|
||||||
These parameters are used to provide an optimisation hint to the handler.
|
These parameters are used to provide an optimisation hint to the handler.
|
||||||
@ -8627,7 +8627,7 @@ static int end_keyread_cb(handler* h, void *unused)
|
|||||||
HA_EXTRA_IGNORE_DUP_KEY:
|
HA_EXTRA_IGNORE_DUP_KEY:
|
||||||
HA_EXTRA_NO_IGNORE_DUP_KEY:
|
HA_EXTRA_NO_IGNORE_DUP_KEY:
|
||||||
Informs the handler to we will not stop the transaction if we get an
|
Informs the handler to we will not stop the transaction if we get an
|
||||||
duplicate key errors during insert/upate.
|
duplicate key errors during insert/update.
|
||||||
Always called in pair, triggered by INSERT IGNORE and other similar
|
Always called in pair, triggered by INSERT IGNORE and other similar
|
||||||
SQL constructs.
|
SQL constructs.
|
||||||
Not used by MyISAM.
|
Not used by MyISAM.
|
||||||
@ -10081,7 +10081,7 @@ bool ha_partition::prepare_inplace_alter_table(TABLE *altered_table,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
Changing to similar partitioning, only update metadata.
|
Changing to similar partitioning, only update metadata.
|
||||||
Non allowed changes would be catched in prep_alter_part_table().
|
Non allowed changes would be caought in prep_alter_part_table().
|
||||||
*/
|
*/
|
||||||
if (ha_alter_info->alter_info->partition_flags == ALTER_PARTITION_INFO)
|
if (ha_alter_info->alter_info->partition_flags == ALTER_PARTITION_INFO)
|
||||||
{
|
{
|
||||||
@ -10117,7 +10117,7 @@ bool ha_partition::inplace_alter_table(TABLE *altered_table,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
Changing to similar partitioning, only update metadata.
|
Changing to similar partitioning, only update metadata.
|
||||||
Non allowed changes would be catched in prep_alter_part_table().
|
Non allowed changes would be caught in prep_alter_part_table().
|
||||||
*/
|
*/
|
||||||
if (ha_alter_info->alter_info->partition_flags == ALTER_PARTITION_INFO)
|
if (ha_alter_info->alter_info->partition_flags == ALTER_PARTITION_INFO)
|
||||||
{
|
{
|
||||||
@ -10165,7 +10165,7 @@ bool ha_partition::commit_inplace_alter_table(TABLE *altered_table,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
Changing to similar partitioning, only update metadata.
|
Changing to similar partitioning, only update metadata.
|
||||||
Non allowed changes would be catched in prep_alter_part_table().
|
Non allowed changes would be caught in prep_alter_part_table().
|
||||||
*/
|
*/
|
||||||
if (ha_alter_info->alter_info->partition_flags == ALTER_PARTITION_INFO)
|
if (ha_alter_info->alter_info->partition_flags == ALTER_PARTITION_INFO)
|
||||||
{
|
{
|
||||||
|
@ -509,7 +509,7 @@ public:
|
|||||||
-------------------------------------------------------------------------
|
-------------------------------------------------------------------------
|
||||||
MODULE create/delete handler object
|
MODULE create/delete handler object
|
||||||
-------------------------------------------------------------------------
|
-------------------------------------------------------------------------
|
||||||
Object create/delete methode. The normal called when a table object
|
Object create/delete method. Normally called when a table object
|
||||||
exists. There is also a method to create the handler object with only
|
exists. There is also a method to create the handler object with only
|
||||||
partition information. This is used from mysql_create_table when the
|
partition information. This is used from mysql_create_table when the
|
||||||
table is to be created and the engine type is deduced to be the
|
table is to be created and the engine type is deduced to be the
|
||||||
@ -824,7 +824,7 @@ public:
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
@breif
|
@breif
|
||||||
Positions an index cursor to the index specified in the hanlde. Fetches the
|
Positions an index cursor to the index specified in the handle. Fetches the
|
||||||
row if available. If the key value is null, begin at first key of the
|
row if available. If the key value is null, begin at first key of the
|
||||||
index.
|
index.
|
||||||
*/
|
*/
|
||||||
@ -1119,7 +1119,7 @@ public:
|
|||||||
|
|
||||||
HA_REC_NOT_IN_SEQ:
|
HA_REC_NOT_IN_SEQ:
|
||||||
This flag is set for handlers that cannot guarantee that the rows are
|
This flag is set for handlers that cannot guarantee that the rows are
|
||||||
returned accroding to incremental positions (0, 1, 2, 3...).
|
returned according to incremental positions (0, 1, 2, 3...).
|
||||||
This also means that rnd_next() should return HA_ERR_RECORD_DELETED
|
This also means that rnd_next() should return HA_ERR_RECORD_DELETED
|
||||||
if it finds a deleted row.
|
if it finds a deleted row.
|
||||||
(MyISAM (not fixed length row), HEAP, InnoDB)
|
(MyISAM (not fixed length row), HEAP, InnoDB)
|
||||||
|
@ -721,7 +721,7 @@ int ha_end()
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
This should be eventualy based on the graceful shutdown flag.
|
This should be eventually based on the graceful shutdown flag.
|
||||||
So if flag is equal to HA_PANIC_CLOSE, the deallocate
|
So if flag is equal to HA_PANIC_CLOSE, the deallocate
|
||||||
the errors.
|
the errors.
|
||||||
*/
|
*/
|
||||||
@ -1330,8 +1330,8 @@ int ha_commit_trans(THD *thd, bool all)
|
|||||||
THD_TRANS *trans= all ? &thd->transaction.all : &thd->transaction.stmt;
|
THD_TRANS *trans= all ? &thd->transaction.all : &thd->transaction.stmt;
|
||||||
/*
|
/*
|
||||||
"real" is a nick name for a transaction for which a commit will
|
"real" is a nick name for a transaction for which a commit will
|
||||||
make persistent changes. E.g. a 'stmt' transaction inside a 'all'
|
make persistent changes. E.g. a 'stmt' transaction inside an 'all'
|
||||||
transation is not 'real': even though it's possible to commit it,
|
transaction is not 'real': even though it's possible to commit it,
|
||||||
the changes are not durable as they might be rolled back if the
|
the changes are not durable as they might be rolled back if the
|
||||||
enclosing 'all' transaction is rolled back.
|
enclosing 'all' transaction is rolled back.
|
||||||
*/
|
*/
|
||||||
@ -2662,7 +2662,7 @@ handler *handler::clone(const char *name, MEM_ROOT *mem_root)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
TODO: Implement a more efficient way to have more than one index open for
|
TODO: Implement a more efficient way to have more than one index open for
|
||||||
the same table instance. The ha_open call is not cachable for clone.
|
the same table instance. The ha_open call is not cacheable for clone.
|
||||||
|
|
||||||
This is not critical as the engines already have the table open
|
This is not critical as the engines already have the table open
|
||||||
and should be able to use the original instance of the table.
|
and should be able to use the original instance of the table.
|
||||||
@ -3514,7 +3514,7 @@ int handler::update_auto_increment()
|
|||||||
index_init() or rnd_init() and in any column_bitmaps_signal() call after
|
index_init() or rnd_init() and in any column_bitmaps_signal() call after
|
||||||
this.
|
this.
|
||||||
|
|
||||||
The handler is allowd to do changes to the bitmap after a index_init or
|
The handler is allowed to do changes to the bitmap after a index_init or
|
||||||
rnd_init() call is made as after this, MySQL will not use the bitmap
|
rnd_init() call is made as after this, MySQL will not use the bitmap
|
||||||
for any program logic checking.
|
for any program logic checking.
|
||||||
*/
|
*/
|
||||||
@ -3577,7 +3577,7 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment,
|
|||||||
{ // Autoincrement at key-start
|
{ // Autoincrement at key-start
|
||||||
error= ha_index_last(table->record[1]);
|
error= ha_index_last(table->record[1]);
|
||||||
/*
|
/*
|
||||||
MySQL implicitely assumes such method does locking (as MySQL decides to
|
MySQL implicitly assumes such method does locking (as MySQL decides to
|
||||||
use nr+increment without checking again with the handler, in
|
use nr+increment without checking again with the handler, in
|
||||||
handler::update_auto_increment()), so reserves to infinite.
|
handler::update_auto_increment()), so reserves to infinite.
|
||||||
*/
|
*/
|
||||||
|
@ -218,7 +218,7 @@ enum enum_alter_inplace_result {
|
|||||||
this flag must implement start_read_removal() and end_read_removal().
|
this flag must implement start_read_removal() and end_read_removal().
|
||||||
The handler may return "fake" rows constructed from the key of the row
|
The handler may return "fake" rows constructed from the key of the row
|
||||||
asked for. This is used to optimize UPDATE and DELETE by reducing the
|
asked for. This is used to optimize UPDATE and DELETE by reducing the
|
||||||
numer of roundtrips between handler and storage engine.
|
number of roundtrips between handler and storage engine.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
UPDATE a=1 WHERE pk IN (<keys>)
|
UPDATE a=1 WHERE pk IN (<keys>)
|
||||||
@ -532,7 +532,7 @@ enum enum_binlog_command {
|
|||||||
|
|
||||||
/* Bits in used_fields */
|
/* Bits in used_fields */
|
||||||
#define HA_CREATE_USED_AUTO (1UL << 0)
|
#define HA_CREATE_USED_AUTO (1UL << 0)
|
||||||
#define HA_CREATE_USED_RAID (1UL << 1) //RAID is no longer availble
|
#define HA_CREATE_USED_RAID (1UL << 1) //RAID is no longer available
|
||||||
#define HA_CREATE_USED_UNION (1UL << 2)
|
#define HA_CREATE_USED_UNION (1UL << 2)
|
||||||
#define HA_CREATE_USED_INSERT_METHOD (1UL << 3)
|
#define HA_CREATE_USED_INSERT_METHOD (1UL << 3)
|
||||||
#define HA_CREATE_USED_MIN_ROWS (1UL << 4)
|
#define HA_CREATE_USED_MIN_ROWS (1UL << 4)
|
||||||
@ -1190,7 +1190,7 @@ struct handler_iterator {
|
|||||||
/*
|
/*
|
||||||
Pointer to buffer for the iterator to use.
|
Pointer to buffer for the iterator to use.
|
||||||
Should be allocated by function which created the iterator and
|
Should be allocated by function which created the iterator and
|
||||||
destroied by freed by above "destroy" call
|
destroyed by freed by above "destroy" call
|
||||||
*/
|
*/
|
||||||
void *buffer;
|
void *buffer;
|
||||||
};
|
};
|
||||||
@ -1406,7 +1406,7 @@ struct handlerton
|
|||||||
"cookie".
|
"cookie".
|
||||||
|
|
||||||
The flush and call of commit_checkpoint_notify_ha() need not happen
|
The flush and call of commit_checkpoint_notify_ha() need not happen
|
||||||
immediately - it can be scheduled and performed asynchroneously (ie. as
|
immediately - it can be scheduled and performed asynchronously (ie. as
|
||||||
part of next prepare(), or sync every second, or whatever), but should
|
part of next prepare(), or sync every second, or whatever), but should
|
||||||
not be postponed indefinitely. It is however also permissible to do it
|
not be postponed indefinitely. It is however also permissible to do it
|
||||||
immediately, before returning from commit_checkpoint_request().
|
immediately, before returning from commit_checkpoint_request().
|
||||||
@ -1497,7 +1497,7 @@ struct handlerton
|
|||||||
file extention. This is implied by the open_table_error()
|
file extention. This is implied by the open_table_error()
|
||||||
and the default discovery implementation.
|
and the default discovery implementation.
|
||||||
|
|
||||||
Second element - data file extention. This is implied
|
Second element - data file extension. This is implied
|
||||||
assumed by REPAIR TABLE ... USE_FRM implementation.
|
assumed by REPAIR TABLE ... USE_FRM implementation.
|
||||||
*/
|
*/
|
||||||
const char **tablefile_extensions; // by default - empty list
|
const char **tablefile_extensions; // by default - empty list
|
||||||
@ -2145,7 +2145,7 @@ struct HA_CREATE_INFO: public Table_scope_and_contents_source_st,
|
|||||||
CONVERT TO CHARACTER SET DEFAULT
|
CONVERT TO CHARACTER SET DEFAULT
|
||||||
to
|
to
|
||||||
CONVERT TO CHARACTER SET <character-set-of-the-current-database>
|
CONVERT TO CHARACTER SET <character-set-of-the-current-database>
|
||||||
TODO: Should't we postpone resolution of DEFAULT until the
|
TODO: Shouldn't we postpone resolution of DEFAULT until the
|
||||||
character set of the table owner database is loaded from its db.opt?
|
character set of the table owner database is loaded from its db.opt?
|
||||||
*/
|
*/
|
||||||
DBUG_ASSERT(cs);
|
DBUG_ASSERT(cs);
|
||||||
@ -2890,7 +2890,7 @@ public:
|
|||||||
ha_statistics stats;
|
ha_statistics stats;
|
||||||
|
|
||||||
/** MultiRangeRead-related members: */
|
/** MultiRangeRead-related members: */
|
||||||
range_seq_t mrr_iter; /* Interator to traverse the range sequence */
|
range_seq_t mrr_iter; /* Iterator to traverse the range sequence */
|
||||||
RANGE_SEQ_IF mrr_funcs; /* Range sequence traversal functions */
|
RANGE_SEQ_IF mrr_funcs; /* Range sequence traversal functions */
|
||||||
HANDLER_BUFFER *multi_range_buffer; /* MRR buffer info */
|
HANDLER_BUFFER *multi_range_buffer; /* MRR buffer info */
|
||||||
uint ranges_in_seq; /* Total number of ranges in the traversed sequence */
|
uint ranges_in_seq; /* Total number of ranges in the traversed sequence */
|
||||||
@ -3868,7 +3868,7 @@ public:
|
|||||||
This method offers the storage engine, the possibility to store a reference
|
This method offers the storage engine, the possibility to store a reference
|
||||||
to a table name which is going to be used with query cache.
|
to a table name which is going to be used with query cache.
|
||||||
The method is called each time a statement is written to the cache and can
|
The method is called each time a statement is written to the cache and can
|
||||||
be used to verify if a specific statement is cachable. It also offers
|
be used to verify if a specific statement is cacheable. It also offers
|
||||||
the possibility to register a generic (but static) call back function which
|
the possibility to register a generic (but static) call back function which
|
||||||
is called each time a statement is matched against the query cache.
|
is called each time a statement is matched against the query cache.
|
||||||
|
|
||||||
|
14
sql/item.cc
14
sql/item.cc
@ -1543,7 +1543,7 @@ bool Item::get_date_from_string(MYSQL_TIME *ltime, ulonglong fuzzydate)
|
|||||||
bool Item::make_zero_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
|
bool Item::make_zero_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
if the item was not null and convertion failed, we return a zero date
|
if the item was not null and conversion failed, we return a zero date
|
||||||
if allowed, otherwise - null.
|
if allowed, otherwise - null.
|
||||||
*/
|
*/
|
||||||
bzero((char*) ltime,sizeof(*ltime));
|
bzero((char*) ltime,sizeof(*ltime));
|
||||||
@ -5332,7 +5332,7 @@ static bool mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current,
|
|||||||
|
|
||||||
@note
|
@note
|
||||||
We have to mark all items between current_sel (including) and
|
We have to mark all items between current_sel (including) and
|
||||||
last_select (excluding) as dependend (select before last_select should
|
last_select (excluding) as dependent (select before last_select should
|
||||||
be marked with actual table mask used by resolved item, all other with
|
be marked with actual table mask used by resolved item, all other with
|
||||||
OUTER_REF_TABLE_BIT) and also write dependence information to Item of
|
OUTER_REF_TABLE_BIT) and also write dependence information to Item of
|
||||||
resolved identifier.
|
resolved identifier.
|
||||||
@ -5708,7 +5708,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
|
|||||||
bool upward_lookup= FALSE;
|
bool upward_lookup= FALSE;
|
||||||
TABLE_LIST *table_list;
|
TABLE_LIST *table_list;
|
||||||
|
|
||||||
/* Calulate the TABLE_LIST for the table */
|
/* Calculate the TABLE_LIST for the table */
|
||||||
table_list= (cached_table ? cached_table :
|
table_list= (cached_table ? cached_table :
|
||||||
field_found && (*from_field) != view_ref_found ?
|
field_found && (*from_field) != view_ref_found ?
|
||||||
(*from_field)->table->pos_in_table_list : 0);
|
(*from_field)->table->pos_in_table_list : 0);
|
||||||
@ -6444,7 +6444,7 @@ Item *Item_field::propagate_equal_fields(THD *thd,
|
|||||||
but failed to create a valid DATE literal from the given string literal.
|
but failed to create a valid DATE literal from the given string literal.
|
||||||
|
|
||||||
Do not do constant propagation in such cases and unlink
|
Do not do constant propagation in such cases and unlink
|
||||||
"this" from the found Item_equal (as this equality not usefull).
|
"this" from the found Item_equal (as this equality not useful).
|
||||||
*/
|
*/
|
||||||
item_equal= NULL;
|
item_equal= NULL;
|
||||||
return this;
|
return this;
|
||||||
@ -8019,7 +8019,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
|
|||||||
/*
|
/*
|
||||||
Due to cache, find_field_in_tables() can return field which
|
Due to cache, find_field_in_tables() can return field which
|
||||||
doesn't belong to provided outer_context. In this case we have
|
doesn't belong to provided outer_context. In this case we have
|
||||||
to find proper field context in order to fix field correcly.
|
to find proper field context in order to fix field correctly.
|
||||||
*/
|
*/
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
@ -8204,9 +8204,9 @@ Item* Item_ref::transform(THD *thd, Item_transformer transformer, uchar *arg)
|
|||||||
callback functions.
|
callback functions.
|
||||||
|
|
||||||
First the function applies the analyzer to the Item_ref object. Then
|
First the function applies the analyzer to the Item_ref object. Then
|
||||||
if the analizer succeeeds we first applies the compile method to the
|
if the analyzer succeeds we first apply the compile method to the
|
||||||
object the Item_ref object is referencing. If this returns a new
|
object the Item_ref object is referencing. If this returns a new
|
||||||
item the old item is substituted for a new one. After this the
|
item the old item is substituted for a new one. After this the
|
||||||
transformer is applied to the Item_ref object itself.
|
transformer is applied to the Item_ref object itself.
|
||||||
The compile function is not called if the analyzer returns NULL
|
The compile function is not called if the analyzer returns NULL
|
||||||
in the parameter arg_p.
|
in the parameter arg_p.
|
||||||
|
19
sql/item.h
19
sql/item.h
@ -157,7 +157,7 @@ void dummy_error_processor(THD *thd, void *data);
|
|||||||
void view_error_processor(THD *thd, void *data);
|
void view_error_processor(THD *thd, void *data);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Instances of Name_resolution_context store the information necesary for
|
Instances of Name_resolution_context store the information necessary for
|
||||||
name resolution of Items and other context analysis of a query made in
|
name resolution of Items and other context analysis of a query made in
|
||||||
fix_fields().
|
fix_fields().
|
||||||
|
|
||||||
@ -337,7 +337,7 @@ public:
|
|||||||
Monotonicity is defined only for Item* trees that represent table
|
Monotonicity is defined only for Item* trees that represent table
|
||||||
partitioning expressions (i.e. have no subselects/user vars/PS parameters
|
partitioning expressions (i.e. have no subselects/user vars/PS parameters
|
||||||
etc etc). An Item* tree is assumed to have the same monotonicity properties
|
etc etc). An Item* tree is assumed to have the same monotonicity properties
|
||||||
as its correspoinding function F:
|
as its corresponding function F:
|
||||||
|
|
||||||
[signed] longlong F(field1, field2, ...) {
|
[signed] longlong F(field1, field2, ...) {
|
||||||
put values of field_i into table record buffer;
|
put values of field_i into table record buffer;
|
||||||
@ -754,7 +754,7 @@ protected:
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
This method is used if the item was not null but convertion to
|
This method is used if the item was not null but conversion to
|
||||||
TIME/DATE/DATETIME failed. We return a zero date if allowed,
|
TIME/DATE/DATETIME failed. We return a zero date if allowed,
|
||||||
otherwise - null.
|
otherwise - null.
|
||||||
*/
|
*/
|
||||||
@ -958,7 +958,7 @@ public:
|
|||||||
/*
|
/*
|
||||||
real_type() is the type of base item. This is same as type() for
|
real_type() is the type of base item. This is same as type() for
|
||||||
most items, except Item_ref() and Item_cache_wrapper() where it
|
most items, except Item_ref() and Item_cache_wrapper() where it
|
||||||
shows the type for the underlaying item.
|
shows the type for the underlying item.
|
||||||
*/
|
*/
|
||||||
virtual enum Type real_type() const { return type(); }
|
virtual enum Type real_type() const { return type(); }
|
||||||
|
|
||||||
@ -1083,7 +1083,7 @@ public:
|
|||||||
The caller can modify the returned String, if it's not marked
|
The caller can modify the returned String, if it's not marked
|
||||||
"const" (with the String::mark_as_const() method). That means that
|
"const" (with the String::mark_as_const() method). That means that
|
||||||
if the item returns its own internal buffer (e.g. tmp_value), it
|
if the item returns its own internal buffer (e.g. tmp_value), it
|
||||||
*must* be marked "const" [1]. So normally it's preferrable to
|
*must* be marked "const" [1]. So normally it's preferable to
|
||||||
return the result value in the String, that was passed as an
|
return the result value in the String, that was passed as an
|
||||||
argument. But, for example, SUBSTR() returns a String that simply
|
argument. But, for example, SUBSTR() returns a String that simply
|
||||||
points into the buffer of SUBSTR()'s args[0]->val_str(). Such a
|
points into the buffer of SUBSTR()'s args[0]->val_str(). Such a
|
||||||
@ -1512,7 +1512,7 @@ public:
|
|||||||
@param cond_ptr[OUT] Store a replacement item here if the condition
|
@param cond_ptr[OUT] Store a replacement item here if the condition
|
||||||
can be simplified, e.g.:
|
can be simplified, e.g.:
|
||||||
WHERE part1 OR part2 OR part3
|
WHERE part1 OR part2 OR part3
|
||||||
with one of the partN evalutating to SEL_TREE::ALWAYS.
|
with one of the partN evaluating to SEL_TREE::ALWAYS.
|
||||||
*/
|
*/
|
||||||
virtual SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr);
|
virtual SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr);
|
||||||
/*
|
/*
|
||||||
@ -2084,8 +2084,9 @@ public:
|
|||||||
virtual bool is_outer_field() const { DBUG_ASSERT(fixed); return FALSE; }
|
virtual bool is_outer_field() const { DBUG_ASSERT(fixed); return FALSE; }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Checks if this item or any of its decendents contains a subquery. This is a
|
Checks if this item or any of its descendents contains a subquery.
|
||||||
replacement of the former Item::has_subquery() and Item::with_subselect.
|
This is a replacement of the former Item::has_subquery() and
|
||||||
|
Item::with_subselect.
|
||||||
*/
|
*/
|
||||||
virtual bool with_subquery() const { DBUG_ASSERT(fixed); return false; }
|
virtual bool with_subquery() const { DBUG_ASSERT(fixed); return false; }
|
||||||
|
|
||||||
@ -5642,7 +5643,7 @@ public:
|
|||||||
|
|
||||||
This is the method that updates the cached value.
|
This is the method that updates the cached value.
|
||||||
It must be explicitly called by the user of this class to store the value
|
It must be explicitly called by the user of this class to store the value
|
||||||
of the orginal item in the cache.
|
of the original item in the cache.
|
||||||
*/
|
*/
|
||||||
virtual void copy() = 0;
|
virtual void copy() = 0;
|
||||||
|
|
||||||
|
@ -192,7 +192,7 @@ bool Cached_item_field::cmp(void)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
If value is not null and value changed (from null to not null or
|
If value is not null and value changed (from null to not null or
|
||||||
becasue of value change), then copy the new value to buffer.
|
because of value change), then copy the new value to buffer.
|
||||||
*/
|
*/
|
||||||
if (! null_value && (tmp || (tmp= (field->cmp(buff) != 0))))
|
if (! null_value && (tmp || (tmp= (field->cmp(buff) != 0))))
|
||||||
field->get_image(buff,length,field->charset());
|
field->get_image(buff,length,field->charset());
|
||||||
|
@ -1372,7 +1372,7 @@ bool Item_in_optimizer::fix_fields(THD *thd, Item **ref)
|
|||||||
@note
|
@note
|
||||||
Item_in_optimizer should work as pass-through for
|
Item_in_optimizer should work as pass-through for
|
||||||
- subqueries that were processed by ALL/ANY->MIN/MAX rewrite
|
- subqueries that were processed by ALL/ANY->MIN/MAX rewrite
|
||||||
- subqueries taht were originally EXISTS subqueries (and were coverted by
|
- subqueries that were originally EXISTS subqueries (and were coinverted by
|
||||||
the EXISTS->IN rewrite)
|
the EXISTS->IN rewrite)
|
||||||
|
|
||||||
When Item_in_optimizer is not not working as a pass-through, it
|
When Item_in_optimizer is not not working as a pass-through, it
|
||||||
@ -1962,8 +1962,8 @@ longlong Item_func_interval::val_int()
|
|||||||
interval_range *range= intervals + mid;
|
interval_range *range= intervals + mid;
|
||||||
my_bool cmp_result;
|
my_bool cmp_result;
|
||||||
/*
|
/*
|
||||||
The values in the range intervall may have different types,
|
The values in the range interval may have different types,
|
||||||
Only do a decimal comparision of the first argument is a decimal
|
Only do a decimal comparison if the first argument is a decimal
|
||||||
and we are comparing against a decimal
|
and we are comparing against a decimal
|
||||||
*/
|
*/
|
||||||
if (dec && range->type == DECIMAL_RESULT)
|
if (dec && range->type == DECIMAL_RESULT)
|
||||||
@ -2544,7 +2544,7 @@ Item_func_nullif::fix_length_and_dec()
|
|||||||
Some examples of what NULLIF can end up with after argument
|
Some examples of what NULLIF can end up with after argument
|
||||||
substitution (we don't mention args[1] in some cases for simplicity):
|
substitution (we don't mention args[1] in some cases for simplicity):
|
||||||
|
|
||||||
1. l_expr is not an aggragate function:
|
1. l_expr is not an aggregate function:
|
||||||
|
|
||||||
a. No conversion happened.
|
a. No conversion happened.
|
||||||
args[0] and args[2] were not replaced to something else
|
args[0] and args[2] were not replaced to something else
|
||||||
@ -2668,7 +2668,7 @@ Item_func_nullif::fix_length_and_dec()
|
|||||||
In this case we remember and reuse m_arg0 during EXECUTE time as args[2].
|
In this case we remember and reuse m_arg0 during EXECUTE time as args[2].
|
||||||
|
|
||||||
QQ: How to make sure that m_args0 does not point
|
QQ: How to make sure that m_args0 does not point
|
||||||
to something temporary which will be destoyed between PREPARE and EXECUTE.
|
to something temporary which will be destroyed between PREPARE and EXECUTE.
|
||||||
The condition below should probably be more strict and somehow check that:
|
The condition below should probably be more strict and somehow check that:
|
||||||
- change_item_tree() was called for the new args[0]
|
- change_item_tree() was called for the new args[0]
|
||||||
- m_args0 is referenced from inside args[0], e.g. as a function argument,
|
- m_args0 is referenced from inside args[0], e.g. as a function argument,
|
||||||
@ -6952,7 +6952,7 @@ Item* Item_equal::get_first(JOIN_TAB *context, Item *field_item)
|
|||||||
and not ot2.col.
|
and not ot2.col.
|
||||||
|
|
||||||
eliminate_item_equal() also has code that deals with equality substitution
|
eliminate_item_equal() also has code that deals with equality substitution
|
||||||
in presense of SJM nests.
|
in presence of SJM nests.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
TABLE_LIST *emb_nest;
|
TABLE_LIST *emb_nest;
|
||||||
|
@ -172,7 +172,7 @@ protected:
|
|||||||
/*
|
/*
|
||||||
Return the full select tree for "field_item" and "value":
|
Return the full select tree for "field_item" and "value":
|
||||||
- a single SEL_TREE if the field is not in a multiple equality, or
|
- a single SEL_TREE if the field is not in a multiple equality, or
|
||||||
- a conjuction of all SEL_TREEs for all fields from
|
- a conjunction of all SEL_TREEs for all fields from
|
||||||
the same multiple equality with "field_item".
|
the same multiple equality with "field_item".
|
||||||
*/
|
*/
|
||||||
SEL_TREE *get_full_func_mm_tree(RANGE_OPT_PARAM *param,
|
SEL_TREE *get_full_func_mm_tree(RANGE_OPT_PARAM *param,
|
||||||
|
@ -506,7 +506,7 @@ Item *Item_func::transform(THD *thd, Item_transformer transformer, uchar *argume
|
|||||||
callback functions.
|
callback functions.
|
||||||
|
|
||||||
First the function applies the analyzer to the root node of
|
First the function applies the analyzer to the root node of
|
||||||
the Item_func object. Then if the analizer succeeeds (returns TRUE)
|
the Item_func object. Then if the analyzer succeeds (returns TRUE)
|
||||||
the function recursively applies the compile method to each argument
|
the function recursively applies the compile method to each argument
|
||||||
of the Item_func node.
|
of the Item_func node.
|
||||||
If the call of the method for an argument item returns a new item
|
If the call of the method for an argument item returns a new item
|
||||||
@ -1627,6 +1627,8 @@ my_decimal *Item_func_div::decimal_op(my_decimal *decimal_value)
|
|||||||
null_value= 1;
|
null_value= 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
my_decimal_round(E_DEC_FATAL_ERROR, decimal_value,
|
||||||
|
decimals, FALSE, decimal_value);
|
||||||
return decimal_value;
|
return decimal_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1683,7 +1685,7 @@ bool Item_func_div::fix_length_and_dec()
|
|||||||
DBUG_ENTER("Item_func_div::fix_length_and_dec");
|
DBUG_ENTER("Item_func_div::fix_length_and_dec");
|
||||||
DBUG_PRINT("info", ("name %s", func_name()));
|
DBUG_PRINT("info", ("name %s", func_name()));
|
||||||
prec_increment= current_thd->variables.div_precincrement;
|
prec_increment= current_thd->variables.div_precincrement;
|
||||||
maybe_null= 1; // devision by zero
|
maybe_null= 1; // division by zero
|
||||||
|
|
||||||
const Type_aggregator *aggregator= &type_handler_data->m_type_aggregator_for_div;
|
const Type_aggregator *aggregator= &type_handler_data->m_type_aggregator_for_div;
|
||||||
DBUG_EXECUTE_IF("num_op", aggregator= &type_handler_data->m_type_aggregator_non_commutative_test;);
|
DBUG_EXECUTE_IF("num_op", aggregator= &type_handler_data->m_type_aggregator_non_commutative_test;);
|
||||||
@ -4665,7 +4667,7 @@ bool Item_func_set_user_var::register_field_in_bitmap(void *arg)
|
|||||||
@param type type of new value
|
@param type type of new value
|
||||||
@param cs charset info for new value
|
@param cs charset info for new value
|
||||||
@param dv derivation for new value
|
@param dv derivation for new value
|
||||||
@param unsigned_arg indiates if a value of type INT_RESULT is unsigned
|
@param unsigned_arg indicates if a value of type INT_RESULT is unsigned
|
||||||
|
|
||||||
@note Sets error and fatal error if allocation fails.
|
@note Sets error and fatal error if allocation fails.
|
||||||
|
|
||||||
@ -6573,7 +6575,7 @@ Item_func_sp::fix_fields(THD *thd, Item **ref)
|
|||||||
/*
|
/*
|
||||||
Here we check privileges of the stored routine only during view
|
Here we check privileges of the stored routine only during view
|
||||||
creation, in order to validate the view. A runtime check is
|
creation, in order to validate the view. A runtime check is
|
||||||
perfomed in Item_func_sp::execute(), and this method is not
|
performed in Item_func_sp::execute(), and this method is not
|
||||||
called during context analysis. Notice, that during view
|
called during context analysis. Notice, that during view
|
||||||
creation we do not infer into stored routine bodies and do not
|
creation we do not infer into stored routine bodies and do not
|
||||||
check privileges of its statements, which would probably be a
|
check privileges of its statements, which would probably be a
|
||||||
|
@ -210,7 +210,7 @@ String *Item_func_inet_str_base::val_str_ascii(String *buffer)
|
|||||||
|
|
||||||
@return Completion status.
|
@return Completion status.
|
||||||
@retval false Given string does not represent an IPv4-address.
|
@retval false Given string does not represent an IPv4-address.
|
||||||
@retval true The string has been converted sucessfully.
|
@retval true The string has been converted successfully.
|
||||||
|
|
||||||
@note The problem with inet_pton() is that it treats leading zeros in
|
@note The problem with inet_pton() is that it treats leading zeros in
|
||||||
IPv4-part differently on different platforms.
|
IPv4-part differently on different platforms.
|
||||||
@ -335,7 +335,7 @@ static bool str_to_ipv4(const char *str, size_t str_length, in_addr *ipv4_addres
|
|||||||
|
|
||||||
@return Completion status.
|
@return Completion status.
|
||||||
@retval false Given string does not represent an IPv6-address.
|
@retval false Given string does not represent an IPv6-address.
|
||||||
@retval true The string has been converted sucessfully.
|
@retval true The string has been converted successfully.
|
||||||
|
|
||||||
@note The problem with inet_pton() is that it treats leading zeros in
|
@note The problem with inet_pton() is that it treats leading zeros in
|
||||||
IPv4-part differently on different platforms.
|
IPv4-part differently on different platforms.
|
||||||
@ -681,7 +681,7 @@ static void ipv6_to_str(const in6_addr *ipv6, char *str)
|
|||||||
|
|
||||||
@return Completion status.
|
@return Completion status.
|
||||||
@retval false Given string does not represent an IP-address.
|
@retval false Given string does not represent an IP-address.
|
||||||
@retval true The string has been converted sucessfully.
|
@retval true The string has been converted successfully.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
bool Item_func_inet6_aton::calc_value(const String *arg, String *buffer)
|
bool Item_func_inet6_aton::calc_value(const String *arg, String *buffer)
|
||||||
@ -721,7 +721,7 @@ bool Item_func_inet6_aton::calc_value(const String *arg, String *buffer)
|
|||||||
|
|
||||||
@return Completion status.
|
@return Completion status.
|
||||||
@retval false The argument does not correspond to IP-address.
|
@retval false The argument does not correspond to IP-address.
|
||||||
@retval true The string has been converted sucessfully.
|
@retval true The string has been converted successfully.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
bool Item_func_inet6_ntoa::calc_value(const String *arg, String *buffer)
|
bool Item_func_inet6_ntoa::calc_value(const String *arg, String *buffer)
|
||||||
|
@ -990,7 +990,7 @@ String *Item_func_concat_ws::val_str(String *str)
|
|||||||
goto null; // Must be a blob
|
goto null; // Must be a blob
|
||||||
}
|
}
|
||||||
else if (res2 == &tmp_value)
|
else if (res2 == &tmp_value)
|
||||||
{ // This can happend only 1 time
|
{ // This can happen only 1 time
|
||||||
if (tmp_value.replace(0,0,*sep_str) || tmp_value.replace(0,0,*res))
|
if (tmp_value.replace(0,0,*sep_str) || tmp_value.replace(0,0,*res))
|
||||||
goto null;
|
goto null;
|
||||||
res= &tmp_value;
|
res= &tmp_value;
|
||||||
@ -1141,7 +1141,7 @@ bool Item_func_reverse::fix_length_and_dec()
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Replace all occurences of string2 in string1 with string3.
|
Replace all occurrences of string2 in string1 with string3.
|
||||||
|
|
||||||
Don't reallocate val_str() if not needed.
|
Don't reallocate val_str() if not needed.
|
||||||
|
|
||||||
@ -3997,7 +3997,7 @@ bool Item_func_export_set::fix_length_and_dec()
|
|||||||
using in a SQL statement.
|
using in a SQL statement.
|
||||||
|
|
||||||
Adds a \\ before all characters that needs to be escaped in a SQL string.
|
Adds a \\ before all characters that needs to be escaped in a SQL string.
|
||||||
We also escape '^Z' (END-OF-FILE in windows) to avoid probelms when
|
We also escape '^Z' (END-OF-FILE in windows) to avoid problems when
|
||||||
running commands from a file in windows.
|
running commands from a file in windows.
|
||||||
|
|
||||||
This function is very useful when you want to generate SQL statements.
|
This function is very useful when you want to generate SQL statements.
|
||||||
|
@ -1125,12 +1125,12 @@ void Item_singlerow_subselect::reset()
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
@todo
|
@todo
|
||||||
- We cant change name of Item_field or Item_ref, because it will
|
- We can't change name of Item_field or Item_ref, because it will
|
||||||
prevent it's correct resolving, but we should save name of
|
prevent its correct resolving, but we should save name of
|
||||||
removed item => we do not make optimization if top item of
|
removed item => we do not make optimization if top item of
|
||||||
list is field or reference.
|
list is field or reference.
|
||||||
- switch off this optimization for prepare statement,
|
- switch off this optimization for prepare statement,
|
||||||
because we do not rollback this changes.
|
because we do not rollback these changes.
|
||||||
Make rollback for it, or special name resolving mode in 5.0.
|
Make rollback for it, or special name resolving mode in 5.0.
|
||||||
|
|
||||||
@param join Join object of the subquery (i.e. 'child' join).
|
@param join Join object of the subquery (i.e. 'child' join).
|
||||||
@ -1153,8 +1153,8 @@ Item_singlerow_subselect::select_transformer(JOIN *join)
|
|||||||
select_lex->item_list.elements == 1 &&
|
select_lex->item_list.elements == 1 &&
|
||||||
!select_lex->item_list.head()->with_sum_func &&
|
!select_lex->item_list.head()->with_sum_func &&
|
||||||
/*
|
/*
|
||||||
We cant change name of Item_field or Item_ref, because it will
|
We can't change name of Item_field or Item_ref, because it will
|
||||||
prevent it's correct resolving, but we should save name of
|
prevent its correct resolving, but we should save name of
|
||||||
removed item => we do not make optimization if top item of
|
removed item => we do not make optimization if top item of
|
||||||
list is field or reference.
|
list is field or reference.
|
||||||
TODO: solve above problem
|
TODO: solve above problem
|
||||||
@ -1631,7 +1631,7 @@ longlong Item_exists_subselect::val_int()
|
|||||||
Return the result of EXISTS as a string value
|
Return the result of EXISTS as a string value
|
||||||
|
|
||||||
Converts the true/false result into a string value.
|
Converts the true/false result into a string value.
|
||||||
Note that currently this cannot be NULL, so if the query exection fails
|
Note that currently this cannot be NULL, so if the query execution fails
|
||||||
it will return 0.
|
it will return 0.
|
||||||
|
|
||||||
@param decimal_value[out] buffer to hold the resulting string value
|
@param decimal_value[out] buffer to hold the resulting string value
|
||||||
@ -1654,7 +1654,7 @@ String *Item_exists_subselect::val_str(String *str)
|
|||||||
Return the result of EXISTS as a decimal value
|
Return the result of EXISTS as a decimal value
|
||||||
|
|
||||||
Converts the true/false result into a decimal value.
|
Converts the true/false result into a decimal value.
|
||||||
Note that currently this cannot be NULL, so if the query exection fails
|
Note that currently this cannot be NULL, so if the query execution fails
|
||||||
it will return 0.
|
it will return 0.
|
||||||
|
|
||||||
@param decimal_value[out] Buffer to hold the resulting decimal value
|
@param decimal_value[out] Buffer to hold the resulting decimal value
|
||||||
@ -2352,7 +2352,7 @@ Item_in_subselect::row_value_transformer(JOIN *join)
|
|||||||
is_not_null_test(v3))
|
is_not_null_test(v3))
|
||||||
where is_not_null_test registers NULLs values but reject rows.
|
where is_not_null_test registers NULLs values but reject rows.
|
||||||
|
|
||||||
in case when we do not need correct NULL, we have simplier construction:
|
in case when we do not need correct NULL, we have simpler construction:
|
||||||
EXISTS (SELECT ... WHERE where and
|
EXISTS (SELECT ... WHERE where and
|
||||||
(l1 = v1) and
|
(l1 = v1) and
|
||||||
(l2 = v2) and
|
(l2 = v2) and
|
||||||
@ -2755,6 +2755,8 @@ bool Item_exists_subselect::select_prepare_to_be_in()
|
|||||||
Check if 'func' is an equality in form "inner_table.column = outer_expr"
|
Check if 'func' is an equality in form "inner_table.column = outer_expr"
|
||||||
|
|
||||||
@param func Expression to check
|
@param func Expression to check
|
||||||
|
@param allow_subselect If true, the outer_expr part can have a subquery
|
||||||
|
If false, it cannot.
|
||||||
@param local_field OUT Return "inner_table.column" here
|
@param local_field OUT Return "inner_table.column" here
|
||||||
@param outer_expr OUT Return outer_expr here
|
@param outer_expr OUT Return outer_expr here
|
||||||
|
|
||||||
@ -2762,6 +2764,7 @@ bool Item_exists_subselect::select_prepare_to_be_in()
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
static bool check_equality_for_exist2in(Item_func *func,
|
static bool check_equality_for_exist2in(Item_func *func,
|
||||||
|
bool allow_subselect,
|
||||||
Item_ident **local_field,
|
Item_ident **local_field,
|
||||||
Item **outer_exp)
|
Item **outer_exp)
|
||||||
{
|
{
|
||||||
@ -2772,7 +2775,8 @@ static bool check_equality_for_exist2in(Item_func *func,
|
|||||||
args= func->arguments();
|
args= func->arguments();
|
||||||
if (args[0]->real_type() == Item::FIELD_ITEM &&
|
if (args[0]->real_type() == Item::FIELD_ITEM &&
|
||||||
args[0]->all_used_tables() != OUTER_REF_TABLE_BIT &&
|
args[0]->all_used_tables() != OUTER_REF_TABLE_BIT &&
|
||||||
args[1]->all_used_tables() == OUTER_REF_TABLE_BIT)
|
args[1]->all_used_tables() == OUTER_REF_TABLE_BIT &&
|
||||||
|
(allow_subselect || !args[1]->with_subquery()))
|
||||||
{
|
{
|
||||||
/* It is Item_field or Item_direct_view_ref) */
|
/* It is Item_field or Item_direct_view_ref) */
|
||||||
DBUG_ASSERT(args[0]->type() == Item::FIELD_ITEM ||
|
DBUG_ASSERT(args[0]->type() == Item::FIELD_ITEM ||
|
||||||
@ -2783,7 +2787,8 @@ static bool check_equality_for_exist2in(Item_func *func,
|
|||||||
}
|
}
|
||||||
else if (args[1]->real_type() == Item::FIELD_ITEM &&
|
else if (args[1]->real_type() == Item::FIELD_ITEM &&
|
||||||
args[1]->all_used_tables() != OUTER_REF_TABLE_BIT &&
|
args[1]->all_used_tables() != OUTER_REF_TABLE_BIT &&
|
||||||
args[0]->all_used_tables() == OUTER_REF_TABLE_BIT)
|
args[0]->all_used_tables() == OUTER_REF_TABLE_BIT &&
|
||||||
|
(allow_subselect || !args[0]->with_subquery()))
|
||||||
{
|
{
|
||||||
/* It is Item_field or Item_direct_view_ref) */
|
/* It is Item_field or Item_direct_view_ref) */
|
||||||
DBUG_ASSERT(args[1]->type() == Item::FIELD_ITEM ||
|
DBUG_ASSERT(args[1]->type() == Item::FIELD_ITEM ||
|
||||||
@ -2812,6 +2817,13 @@ typedef struct st_eq_field_outer
|
|||||||
|
|
||||||
outer1=inner_tbl1.col1 AND ... AND outer2=inner_tbl1.col2 AND remainder_cond
|
outer1=inner_tbl1.col1 AND ... AND outer2=inner_tbl1.col2 AND remainder_cond
|
||||||
|
|
||||||
|
if there is just one outer_expr=inner_expr pair, then outer_expr can have a
|
||||||
|
subselect in it. If there are many such pairs, then none of outer_expr can
|
||||||
|
have a subselect in it. If we allow this, the query will fail with an error:
|
||||||
|
|
||||||
|
This version of MariaDB doesn't yet support 'SUBQUERY in ROW in left
|
||||||
|
expression of IN/ALL/ANY'
|
||||||
|
|
||||||
@param conds Condition to be checked
|
@param conds Condition to be checked
|
||||||
@parm result Array to collect EQ_FIELD_OUTER elements describing
|
@parm result Array to collect EQ_FIELD_OUTER elements describing
|
||||||
inner-vs-outer equalities the function has found.
|
inner-vs-outer equalities the function has found.
|
||||||
@ -2829,14 +2841,17 @@ static bool find_inner_outer_equalities(Item **conds,
|
|||||||
{
|
{
|
||||||
List_iterator<Item> li(*((Item_cond*)*conds)->argument_list());
|
List_iterator<Item> li(*((Item_cond*)*conds)->argument_list());
|
||||||
Item *item;
|
Item *item;
|
||||||
|
bool allow_subselect= true;
|
||||||
while ((item= li++))
|
while ((item= li++))
|
||||||
{
|
{
|
||||||
if (item->type() == Item::FUNC_ITEM &&
|
if (item->type() == Item::FUNC_ITEM &&
|
||||||
check_equality_for_exist2in((Item_func *)item,
|
check_equality_for_exist2in((Item_func *)item,
|
||||||
|
allow_subselect,
|
||||||
&element.local_field,
|
&element.local_field,
|
||||||
&element.outer_exp))
|
&element.outer_exp))
|
||||||
{
|
{
|
||||||
found= TRUE;
|
found= TRUE;
|
||||||
|
allow_subselect= false;
|
||||||
element.eq_ref= li.ref();
|
element.eq_ref= li.ref();
|
||||||
if (result.append(element))
|
if (result.append(element))
|
||||||
goto alloc_err;
|
goto alloc_err;
|
||||||
@ -2845,6 +2860,7 @@ static bool find_inner_outer_equalities(Item **conds,
|
|||||||
}
|
}
|
||||||
else if ((*conds)->type() == Item::FUNC_ITEM &&
|
else if ((*conds)->type() == Item::FUNC_ITEM &&
|
||||||
check_equality_for_exist2in((Item_func *)*conds,
|
check_equality_for_exist2in((Item_func *)*conds,
|
||||||
|
true,
|
||||||
&element.local_field,
|
&element.local_field,
|
||||||
&element.outer_exp))
|
&element.outer_exp))
|
||||||
{
|
{
|
||||||
@ -3207,7 +3223,7 @@ Item_in_subselect::select_in_like_transformer(JOIN *join)
|
|||||||
/*
|
/*
|
||||||
In some optimisation cases we will not need this Item_in_optimizer
|
In some optimisation cases we will not need this Item_in_optimizer
|
||||||
object, but we can't know it here, but here we need address correct
|
object, but we can't know it here, but here we need address correct
|
||||||
reference on left expresion.
|
reference on left expression.
|
||||||
|
|
||||||
note: we won't need Item_in_optimizer when handling degenerate cases
|
note: we won't need Item_in_optimizer when handling degenerate cases
|
||||||
like "... IN (SELECT 1)"
|
like "... IN (SELECT 1)"
|
||||||
@ -3239,7 +3255,7 @@ Item_in_subselect::select_in_like_transformer(JOIN *join)
|
|||||||
and all that items do not make permanent changes in current item arena
|
and all that items do not make permanent changes in current item arena
|
||||||
which allow to us call them with changed arena (if we do not know nature
|
which allow to us call them with changed arena (if we do not know nature
|
||||||
of Item, we have to call fix_fields() for it only with original arena to
|
of Item, we have to call fix_fields() for it only with original arena to
|
||||||
avoid memory leack)
|
avoid memory leak)
|
||||||
*/
|
*/
|
||||||
if (left_expr->cols() == 1)
|
if (left_expr->cols() == 1)
|
||||||
trans_res= single_value_transformer(join);
|
trans_res= single_value_transformer(join);
|
||||||
@ -3401,7 +3417,7 @@ bool Item_in_subselect::setup_mat_engine()
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
The select_engine (that executes transformed IN=>EXISTS subselects) is
|
The select_engine (that executes transformed IN=>EXISTS subselects) is
|
||||||
pre-created at parse time, and is stored in statment memory (preserved
|
pre-created at parse time, and is stored in statement memory (preserved
|
||||||
across PS executions).
|
across PS executions).
|
||||||
*/
|
*/
|
||||||
DBUG_ASSERT(engine->engine_type() == subselect_engine::SINGLE_SELECT_ENGINE);
|
DBUG_ASSERT(engine->engine_type() == subselect_engine::SINGLE_SELECT_ENGINE);
|
||||||
@ -3869,7 +3885,7 @@ int subselect_single_select_engine::exec()
|
|||||||
For at least one of the pushed predicates the following is true:
|
For at least one of the pushed predicates the following is true:
|
||||||
We should not apply optimizations based on the condition that was
|
We should not apply optimizations based on the condition that was
|
||||||
pushed down into the subquery. Those optimizations are ref[_or_null]
|
pushed down into the subquery. Those optimizations are ref[_or_null]
|
||||||
acceses. Change them to be full table scans.
|
accesses. Change them to be full table scans.
|
||||||
*/
|
*/
|
||||||
JOIN_TAB *tab;
|
JOIN_TAB *tab;
|
||||||
for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
|
for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
|
||||||
@ -6098,7 +6114,7 @@ int subselect_partial_match_engine::exec()
|
|||||||
if (has_covering_null_row)
|
if (has_covering_null_row)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
If there is a NULL-only row that coveres all columns the result of IN
|
If there is a NULL-only row that covers all columns the result of IN
|
||||||
is UNKNOWN.
|
is UNKNOWN.
|
||||||
*/
|
*/
|
||||||
item_in->value= 0;
|
item_in->value= 0;
|
||||||
@ -6293,7 +6309,7 @@ subselect_rowid_merge_engine::init(MY_BITMAP *non_null_key_parts,
|
|||||||
for (uint i= (non_null_key ? 1 : 0); i < merge_keys_count; i++)
|
for (uint i= (non_null_key ? 1 : 0); i < merge_keys_count; i++)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
Check if the first and only indexed column contains NULL in the curent
|
Check if the first and only indexed column contains NULL in the current
|
||||||
row, and add the row number to the corresponding key.
|
row, and add the row number to the corresponding key.
|
||||||
*/
|
*/
|
||||||
if (merge_keys[i]->get_field(0)->is_null())
|
if (merge_keys[i]->get_field(0)->is_null())
|
||||||
@ -6505,7 +6521,7 @@ bool subselect_rowid_merge_engine::partial_match()
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
If all nullable columns contain only NULLs, then there is a guranteed
|
If all nullable columns contain only NULLs, then there is a guaranteed
|
||||||
partial match, and we don't need to search for a matching row.
|
partial match, and we don't need to search for a matching row.
|
||||||
*/
|
*/
|
||||||
if (has_covering_null_columns)
|
if (has_covering_null_columns)
|
||||||
|
@ -561,7 +561,7 @@ public:
|
|||||||
bool jtbm_const_row_found;
|
bool jtbm_const_row_found;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
TRUE<=>this is a flattenable semi-join, false overwise.
|
TRUE<=>this is a flattenable semi-join, false otherwise.
|
||||||
*/
|
*/
|
||||||
bool is_flattenable_semijoin;
|
bool is_flattenable_semijoin;
|
||||||
|
|
||||||
@ -983,7 +983,7 @@ class subselect_indexsubquery_engine: public subselect_uniquesubquery_engine
|
|||||||
/* FALSE for 'ref', TRUE for 'ref-or-null'. */
|
/* FALSE for 'ref', TRUE for 'ref-or-null'. */
|
||||||
bool check_null;
|
bool check_null;
|
||||||
/*
|
/*
|
||||||
The "having" clause. This clause (further reffered to as "artificial
|
The "having" clause. This clause (further referred to as "artificial
|
||||||
having") was inserted by subquery transformation code. It contains
|
having") was inserted by subquery transformation code. It contains
|
||||||
Item(s) that have a side-effect: they record whether the subquery has
|
Item(s) that have a side-effect: they record whether the subquery has
|
||||||
produced a row with NULL certain components. We need to use it for cases
|
produced a row with NULL certain components. We need to use it for cases
|
||||||
@ -1004,7 +1004,7 @@ class subselect_indexsubquery_engine: public subselect_uniquesubquery_engine
|
|||||||
However, subqueries like the above are currently not handled by index
|
However, subqueries like the above are currently not handled by index
|
||||||
lookup-based subquery engines, the engine applicability check misses
|
lookup-based subquery engines, the engine applicability check misses
|
||||||
them: it doesn't switch the engine for case of artificial having and
|
them: it doesn't switch the engine for case of artificial having and
|
||||||
[eq_]ref access (only for artifical having + ref_or_null or no having).
|
[eq_]ref access (only for artificial having + ref_or_null or no having).
|
||||||
The above example subquery is handled as a full-blown SELECT with eq_ref
|
The above example subquery is handled as a full-blown SELECT with eq_ref
|
||||||
access to one table.
|
access to one table.
|
||||||
|
|
||||||
@ -1075,7 +1075,7 @@ public:
|
|||||||
*/
|
*/
|
||||||
JOIN *materialize_join;
|
JOIN *materialize_join;
|
||||||
/*
|
/*
|
||||||
A conjunction of all the equality condtions between all pairs of expressions
|
A conjunction of all the equality conditions between all pairs of expressions
|
||||||
that are arguments of an IN predicate. We need these to post-filter some
|
that are arguments of an IN predicate. We need these to post-filter some
|
||||||
IN results because index lookups sometimes match values that are actually
|
IN results because index lookups sometimes match values that are actually
|
||||||
not equal to the search key in SQL terms.
|
not equal to the search key in SQL terms.
|
||||||
|
@ -705,7 +705,7 @@ int Aggregator_distinct::composite_key_cmp(void* arg, uchar* key1, uchar* key2)
|
|||||||
|
|
||||||
C_MODE_START
|
C_MODE_START
|
||||||
|
|
||||||
/* Declarations for auxilary C-callbacks */
|
/* Declarations for auxiliary C-callbacks */
|
||||||
|
|
||||||
int simple_raw_key_cmp(void* arg, const void* key1, const void* key2)
|
int simple_raw_key_cmp(void* arg, const void* key1, const void* key2)
|
||||||
{
|
{
|
||||||
@ -737,7 +737,7 @@ C_MODE_END
|
|||||||
@param thd Thread descriptor
|
@param thd Thread descriptor
|
||||||
@return status
|
@return status
|
||||||
@retval FALSE success
|
@retval FALSE success
|
||||||
@retval TRUE faliure
|
@retval TRUE failure
|
||||||
|
|
||||||
Prepares Aggregator_distinct to process the incoming stream.
|
Prepares Aggregator_distinct to process the incoming stream.
|
||||||
Creates the temporary table and the Unique class if needed.
|
Creates the temporary table and the Unique class if needed.
|
||||||
@ -1944,7 +1944,7 @@ void Item_sum_count::cleanup()
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Avgerage
|
Average
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void Item_sum_avg::fix_length_and_dec_decimal()
|
void Item_sum_avg::fix_length_and_dec_decimal()
|
||||||
@ -2214,7 +2214,7 @@ bool Item_sum_variance::fix_length_and_dec()
|
|||||||
/*
|
/*
|
||||||
According to the SQL2003 standard (Part 2, Foundations; sec 10.9,
|
According to the SQL2003 standard (Part 2, Foundations; sec 10.9,
|
||||||
aggregate function; paragraph 7h of Syntax Rules), "the declared
|
aggregate function; paragraph 7h of Syntax Rules), "the declared
|
||||||
type of the result is an implementation-defined aproximate numeric
|
type of the result is an implementation-defined approximate numeric
|
||||||
type.
|
type.
|
||||||
*/
|
*/
|
||||||
if (args[0]->type_handler()->Item_sum_variance_fix_length_and_dec(this))
|
if (args[0]->type_handler()->Item_sum_variance_fix_length_and_dec(this))
|
||||||
@ -2288,7 +2288,7 @@ double Item_sum_variance::val_real()
|
|||||||
is one or zero. If it's zero, i.e. a population variance, then we only
|
is one or zero. If it's zero, i.e. a population variance, then we only
|
||||||
set nullness when the count is zero.
|
set nullness when the count is zero.
|
||||||
|
|
||||||
Another way to read it is that 'sample' is the numerical threshhold, at and
|
Another way to read it is that 'sample' is the numerical threshold, at and
|
||||||
below which a 'count' number of items is called NULL.
|
below which a 'count' number of items is called NULL.
|
||||||
*/
|
*/
|
||||||
DBUG_ASSERT((sample == 0) || (sample == 1));
|
DBUG_ASSERT((sample == 0) || (sample == 1));
|
||||||
@ -4171,7 +4171,7 @@ bool Item_func_group_concat::setup(THD *thd)
|
|||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
Force the create_tmp_table() to convert BIT columns to INT
|
Force the create_tmp_table() to convert BIT columns to INT
|
||||||
as we cannot compare two table records containg BIT fields
|
as we cannot compare two table records containing BIT fields
|
||||||
stored in the the tree used for distinct/order by.
|
stored in the the tree used for distinct/order by.
|
||||||
Moreover we don't even save in the tree record null bits
|
Moreover we don't even save in the tree record null bits
|
||||||
where BIT fields store parts of their data.
|
where BIT fields store parts of their data.
|
||||||
|
@ -251,7 +251,7 @@ class Window_spec;
|
|||||||
The field 'aggr_level' is to contain the nest level of the subquery
|
The field 'aggr_level' is to contain the nest level of the subquery
|
||||||
where the set function is aggregated.
|
where the set function is aggregated.
|
||||||
|
|
||||||
The field 'max_arg_level' is for the maximun of the nest levels of the
|
The field 'max_arg_level' is for the maximum of the nest levels of the
|
||||||
unbound column references occurred in the set function. A column reference
|
unbound column references occurred in the set function. A column reference
|
||||||
is unbound within a set function if it is not bound by any subquery
|
is unbound within a set function if it is not bound by any subquery
|
||||||
used as a subexpression in this function. A column reference is bound by
|
used as a subexpression in this function. A column reference is bound by
|
||||||
|
@ -453,7 +453,7 @@ err:
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Create a formated date/time value in a string.
|
Create a formatted date/time value in a string.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static bool make_date_time(const LEX_CSTRING &format, MYSQL_TIME *l_time,
|
static bool make_date_time(const LEX_CSTRING &format, MYSQL_TIME *l_time,
|
||||||
@ -1042,7 +1042,7 @@ uint week_mode(uint mode)
|
|||||||
a date at start of january) In this case one can get 53 for the
|
a date at start of january) In this case one can get 53 for the
|
||||||
first week of next year. This flag ensures that the week is
|
first week of next year. This flag ensures that the week is
|
||||||
relevant for the given year. Note that this flag is only
|
relevant for the given year. Note that this flag is only
|
||||||
releveant if WEEK_JANUARY is not set.
|
relevant if WEEK_JANUARY is not set.
|
||||||
|
|
||||||
If set Week is in range 1-53.
|
If set Week is in range 1-53.
|
||||||
|
|
||||||
@ -1346,7 +1346,7 @@ bool get_interval_value(Item *args,interval_type int_type, INTERVAL *interval)
|
|||||||
if (!(res= args->val_str_ascii(&str_value)))
|
if (!(res= args->val_str_ascii(&str_value)))
|
||||||
return (1);
|
return (1);
|
||||||
|
|
||||||
/* record negative intervalls in interval->neg */
|
/* record negative intervals in interval->neg */
|
||||||
str=res->ptr();
|
str=res->ptr();
|
||||||
cs= res->charset();
|
cs= res->charset();
|
||||||
const char *end=str+res->length();
|
const char *end=str+res->length();
|
||||||
@ -1568,7 +1568,7 @@ bool Item_func_from_days::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Converts current time in my_time_t to MYSQL_TIME represenatation for local
|
Converts current time in my_time_t to MYSQL_TIME representation for local
|
||||||
time zone. Defines time zone (local) used for whole CURDATE function.
|
time zone. Defines time zone (local) used for whole CURDATE function.
|
||||||
*/
|
*/
|
||||||
void Item_func_curdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
void Item_func_curdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||||
@ -1579,7 +1579,7 @@ void Item_func_curdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Converts current time in my_time_t to MYSQL_TIME represenatation for UTC
|
Converts current time in my_time_t to MYSQL_TIME representation for UTC
|
||||||
time zone. Defines time zone (UTC) used for whole UTC_DATE function.
|
time zone. Defines time zone (UTC) used for whole UTC_DATE function.
|
||||||
*/
|
*/
|
||||||
void Item_func_curdate_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
void Item_func_curdate_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||||
@ -1659,7 +1659,7 @@ static void set_sec_part(ulong sec_part, MYSQL_TIME *ltime, Item *item)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Converts current time in my_time_t to MYSQL_TIME represenatation for local
|
Converts current time in my_time_t to MYSQL_TIME representation for local
|
||||||
time zone. Defines time zone (local) used for whole CURTIME function.
|
time zone. Defines time zone (local) used for whole CURTIME function.
|
||||||
*/
|
*/
|
||||||
void Item_func_curtime_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
void Item_func_curtime_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||||
@ -1673,7 +1673,7 @@ void Item_func_curtime_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Converts current time in my_time_t to MYSQL_TIME represenatation for UTC
|
Converts current time in my_time_t to MYSQL_TIME representation for UTC
|
||||||
time zone. Defines time zone (UTC) used for whole UTC_TIME function.
|
time zone. Defines time zone (UTC) used for whole UTC_TIME function.
|
||||||
*/
|
*/
|
||||||
void Item_func_curtime_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
void Item_func_curtime_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||||
@ -1728,7 +1728,7 @@ int Item_func_now_local::save_in_field(Field *field, bool no_conversions)
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Converts current time in my_time_t to MYSQL_TIME represenatation for local
|
Converts current time in my_time_t to MYSQL_TIME representation for local
|
||||||
time zone. Defines time zone (local) used for whole NOW function.
|
time zone. Defines time zone (local) used for whole NOW function.
|
||||||
*/
|
*/
|
||||||
void Item_func_now_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
void Item_func_now_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||||
@ -1740,7 +1740,7 @@ void Item_func_now_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Converts current time in my_time_t to MYSQL_TIME represenatation for UTC
|
Converts current time in my_time_t to MYSQL_TIME representation for UTC
|
||||||
time zone. Defines time zone (UTC) used for whole UTC_TIMESTAMP function.
|
time zone. Defines time zone (UTC) used for whole UTC_TIMESTAMP function.
|
||||||
*/
|
*/
|
||||||
void Item_func_now_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
void Item_func_now_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||||
@ -1771,7 +1771,7 @@ bool Item_func_now::get_date(MYSQL_TIME *res,
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Converts current time in my_time_t to MYSQL_TIME represenatation for local
|
Converts current time in my_time_t to MYSQL_TIME representation for local
|
||||||
time zone. Defines time zone (local) used for whole SYSDATE function.
|
time zone. Defines time zone (local) used for whole SYSDATE function.
|
||||||
*/
|
*/
|
||||||
void Item_func_sysdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
void Item_func_sysdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||||
@ -2091,7 +2091,7 @@ bool Item_func_convert_tz::get_date(MYSQL_TIME *ltime,
|
|||||||
uint not_used;
|
uint not_used;
|
||||||
my_time_tmp= from_tz->TIME_to_gmt_sec(ltime, ¬_used);
|
my_time_tmp= from_tz->TIME_to_gmt_sec(ltime, ¬_used);
|
||||||
ulong sec_part= ltime->second_part;
|
ulong sec_part= ltime->second_part;
|
||||||
/* my_time_tmp is guranteed to be in the allowed range */
|
/* my_time_tmp is guaranteed to be in the allowed range */
|
||||||
if (my_time_tmp)
|
if (my_time_tmp)
|
||||||
to_tz->gmt_sec_to_TIME(ltime, my_time_tmp);
|
to_tz->gmt_sec_to_TIME(ltime, my_time_tmp);
|
||||||
/* we rely on the fact that no timezone conversion can change sec_part */
|
/* we rely on the fact that no timezone conversion can change sec_part */
|
||||||
@ -2569,7 +2569,7 @@ void Item_char_typecast::fix_length_and_dec_internal(CHARSET_INFO *from_cs)
|
|||||||
uint32 char_length;
|
uint32 char_length;
|
||||||
/*
|
/*
|
||||||
We always force character set conversion if cast_cs
|
We always force character set conversion if cast_cs
|
||||||
is a multi-byte character set. It garantees that the
|
is a multi-byte character set. It guarantees that the
|
||||||
result of CAST is a well-formed string.
|
result of CAST is a well-formed string.
|
||||||
For single-byte character sets we allow just to copy
|
For single-byte character sets we allow just to copy
|
||||||
from the argument. A single-byte character sets string
|
from the argument. A single-byte character sets string
|
||||||
|
@ -64,7 +64,7 @@ typedef struct my_xml_node_st
|
|||||||
} MY_XML_NODE;
|
} MY_XML_NODE;
|
||||||
|
|
||||||
|
|
||||||
/* Lexical analizer token */
|
/* Lexical analyzer token */
|
||||||
typedef struct my_xpath_lex_st
|
typedef struct my_xpath_lex_st
|
||||||
{
|
{
|
||||||
int term; /* token type, see MY_XPATH_LEX_XXXXX below */
|
int term; /* token type, see MY_XPATH_LEX_XXXXX below */
|
||||||
@ -1101,7 +1101,7 @@ static Item* nametestfunc(MY_XPATH *xpath,
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Tokens consisting of one character, for faster lexical analizer.
|
Tokens consisting of one character, for faster lexical analyzer.
|
||||||
*/
|
*/
|
||||||
static char simpletok[128]=
|
static char simpletok[128]=
|
||||||
{
|
{
|
||||||
@ -1421,7 +1421,7 @@ my_xpath_function(const char *beg, const char *end)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* Initialize a lex analizer token */
|
/* Initialize a lex analyzer token */
|
||||||
static void
|
static void
|
||||||
my_xpath_lex_init(MY_XPATH_LEX *lex,
|
my_xpath_lex_init(MY_XPATH_LEX *lex,
|
||||||
const char *str, const char *strend)
|
const char *str, const char *strend)
|
||||||
@ -1452,7 +1452,7 @@ my_xdigit(int c)
|
|||||||
SYNOPSYS
|
SYNOPSYS
|
||||||
Scan the next token from the input.
|
Scan the next token from the input.
|
||||||
lex->term is set to the scanned token type.
|
lex->term is set to the scanned token type.
|
||||||
lex->beg and lex->end are set to the beginnig
|
lex->beg and lex->end are set to the beginning
|
||||||
and to the end of the token.
|
and to the end of the token.
|
||||||
RETURN
|
RETURN
|
||||||
N/A
|
N/A
|
||||||
@ -1478,7 +1478,7 @@ my_xpath_lex_scan(MY_XPATH *xpath,
|
|||||||
(const uchar*) end)) > 0 &&
|
(const uchar*) end)) > 0 &&
|
||||||
((ctype & (_MY_L | _MY_U)) || *beg == '_'))
|
((ctype & (_MY_L | _MY_U)) || *beg == '_'))
|
||||||
{
|
{
|
||||||
// scan untill the end of the idenfitier
|
// scan until the end of the identifier
|
||||||
for (beg+= length;
|
for (beg+= length;
|
||||||
(length= xpath->cs->cset->ctype(xpath->cs, &ctype,
|
(length= xpath->cs->cset->ctype(xpath->cs, &ctype,
|
||||||
(const uchar*) beg,
|
(const uchar*) beg,
|
||||||
@ -1607,7 +1607,7 @@ static int my_xpath_parse_AxisName(MY_XPATH *xpath)
|
|||||||
** Grammar rules, according to http://www.w3.org/TR/xpath
|
** Grammar rules, according to http://www.w3.org/TR/xpath
|
||||||
** Implemented using recursive descendant method.
|
** Implemented using recursive descendant method.
|
||||||
** All the following grammar processing functions accept
|
** All the following grammar processing functions accept
|
||||||
** a signle "xpath" argument and return 1 on success and 0 on error.
|
** a single "xpath" argument and return 1 on success and 0 on error.
|
||||||
** They also modify "xpath" argument by creating new items.
|
** They also modify "xpath" argument by creating new items.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -2502,7 +2502,7 @@ public:
|
|||||||
as it is in conflict with abbreviated step.
|
as it is in conflict with abbreviated step.
|
||||||
1 + .123 does not work,
|
1 + .123 does not work,
|
||||||
1 + 0.123 does.
|
1 + 0.123 does.
|
||||||
Perhaps it is better to move this code into lex analizer.
|
Perhaps it is better to move this code into lex analyzer.
|
||||||
|
|
||||||
RETURN
|
RETURN
|
||||||
1 - success
|
1 - success
|
||||||
@ -2857,7 +2857,7 @@ append_node(String *str, MY_XML_NODE *node)
|
|||||||
SYNOPSYS
|
SYNOPSYS
|
||||||
|
|
||||||
A call-back function executed when XML parser
|
A call-back function executed when XML parser
|
||||||
is entering a tag or an attribue.
|
is entering a tag or an attribute.
|
||||||
Appends the new node into data->pxml.
|
Appends the new node into data->pxml.
|
||||||
Increments data->level.
|
Increments data->level.
|
||||||
|
|
||||||
@ -2893,7 +2893,7 @@ int xml_enter(MY_XML_PARSER *st,const char *attr, size_t len)
|
|||||||
SYNOPSYS
|
SYNOPSYS
|
||||||
|
|
||||||
A call-back function executed when XML parser
|
A call-back function executed when XML parser
|
||||||
is entering into a tag or an attribue textual value.
|
is entering into a tag or an attribute textual value.
|
||||||
The value is appended into data->pxml.
|
The value is appended into data->pxml.
|
||||||
|
|
||||||
RETURN
|
RETURN
|
||||||
@ -2921,7 +2921,7 @@ int xml_value(MY_XML_PARSER *st,const char *attr, size_t len)
|
|||||||
SYNOPSYS
|
SYNOPSYS
|
||||||
|
|
||||||
A call-back function executed when XML parser
|
A call-back function executed when XML parser
|
||||||
is leaving a tag or an attribue.
|
is leaving a tag or an attribute.
|
||||||
Decrements data->level.
|
Decrements data->level.
|
||||||
|
|
||||||
RETURN
|
RETURN
|
||||||
|
@ -228,7 +228,7 @@ void key_restore(uchar *to_record, const uchar *from_key, KEY *key_info,
|
|||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
This in fact never happens, as we have only partial BLOB
|
This in fact never happens, as we have only partial BLOB
|
||||||
keys yet anyway, so it's difficult to find any sence to
|
keys yet anyway, so it's difficult to find any sense to
|
||||||
restore the part of a record.
|
restore the part of a record.
|
||||||
Maybe this branch is to be removed, but now we
|
Maybe this branch is to be removed, but now we
|
||||||
have to ignore GCov compaining.
|
have to ignore GCov compaining.
|
||||||
@ -612,8 +612,8 @@ int key_rec_cmp(void *key_p, uchar *first_rec, uchar *second_rec)
|
|||||||
max length. The exceptions are the BLOB and VARCHAR field types
|
max length. The exceptions are the BLOB and VARCHAR field types
|
||||||
that take the max length into account.
|
that take the max length into account.
|
||||||
*/
|
*/
|
||||||
if ((result= field->cmp_max(field->ptr+first_diff, field->ptr+sec_diff,
|
if ((result= field->cmp_prefix(field->ptr+first_diff, field->ptr+sec_diff,
|
||||||
key_part->length)))
|
key_part->length)))
|
||||||
DBUG_RETURN(result);
|
DBUG_RETURN(result);
|
||||||
next_loop:
|
next_loop:
|
||||||
key_part++;
|
key_part++;
|
||||||
|
@ -1109,7 +1109,7 @@ bool Global_read_lock::make_global_read_lock_block_commit(THD *thd)
|
|||||||
MDL_request mdl_request;
|
MDL_request mdl_request;
|
||||||
DBUG_ENTER("make_global_read_lock_block_commit");
|
DBUG_ENTER("make_global_read_lock_block_commit");
|
||||||
/*
|
/*
|
||||||
If we didn't succeed lock_global_read_lock(), or if we already suceeded
|
If we didn't succeed lock_global_read_lock(), or if we already succeeded
|
||||||
make_global_read_lock_block_commit(), do nothing.
|
make_global_read_lock_block_commit(), do nothing.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
18
sql/log.cc
18
sql/log.cc
@ -158,7 +158,7 @@ void setup_log_handling()
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
purge logs, master and slave sides both, related error code
|
purge logs, master and slave sides both, related error code
|
||||||
convertor.
|
converter.
|
||||||
Called from @c purge_error_message(), @c MYSQL_BIN_LOG::reset_logs()
|
Called from @c purge_error_message(), @c MYSQL_BIN_LOG::reset_logs()
|
||||||
|
|
||||||
@param res an internal to purging routines error code
|
@param res an internal to purging routines error code
|
||||||
@ -377,7 +377,7 @@ public:
|
|||||||
never zero.
|
never zero.
|
||||||
|
|
||||||
This is done while calling the constructor binlog_cache_mngr.
|
This is done while calling the constructor binlog_cache_mngr.
|
||||||
We cannot set informaton in the constructor binlog_cache_data
|
We cannot set information in the constructor binlog_cache_data
|
||||||
because the space for binlog_cache_mngr is allocated through
|
because the space for binlog_cache_mngr is allocated through
|
||||||
a placement new.
|
a placement new.
|
||||||
|
|
||||||
@ -2978,7 +2978,7 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time,
|
|||||||
|
|
||||||
mysql_mutex_lock(&LOCK_log);
|
mysql_mutex_lock(&LOCK_log);
|
||||||
if (is_open())
|
if (is_open())
|
||||||
{ // Safety agains reopen
|
{ // Safety against reopen
|
||||||
char buff[80], *end;
|
char buff[80], *end;
|
||||||
char query_time_buff[22+7], lock_time_buff[22+7];
|
char query_time_buff[22+7], lock_time_buff[22+7];
|
||||||
size_t buff_len;
|
size_t buff_len;
|
||||||
@ -3276,7 +3276,7 @@ void MYSQL_BIN_LOG::cleanup()
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
Free data for global binlog state.
|
Free data for global binlog state.
|
||||||
We can't do that automaticly as we need to do this before
|
We can't do that automatically as we need to do this before
|
||||||
safemalloc is shut down
|
safemalloc is shut down
|
||||||
*/
|
*/
|
||||||
if (!is_relay_log)
|
if (!is_relay_log)
|
||||||
@ -4051,7 +4051,7 @@ err:
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Delete all logs refered to in the index file.
|
Delete all logs referred to in the index file.
|
||||||
|
|
||||||
The new index file will only contain this file.
|
The new index file will only contain this file.
|
||||||
|
|
||||||
@ -5622,7 +5622,7 @@ binlog_cache_mngr *THD::binlog_setup_trx_data()
|
|||||||
|
|
||||||
- Start a statement transaction to allow us to truncate the cache.
|
- Start a statement transaction to allow us to truncate the cache.
|
||||||
|
|
||||||
- Save the currrent binlog position so that we can roll back the
|
- Save the current binlog position so that we can roll back the
|
||||||
statement by truncating the cache.
|
statement by truncating the cache.
|
||||||
|
|
||||||
We only update the saved position if the old one was undefined,
|
We only update the saved position if the old one was undefined,
|
||||||
@ -6811,7 +6811,7 @@ static const char* get_first_binlog(char* buf_arg)
|
|||||||
}
|
}
|
||||||
if (normalize_binlog_name(buf_arg, fname, false))
|
if (normalize_binlog_name(buf_arg, fname, false))
|
||||||
{
|
{
|
||||||
errmsg= "cound not normalize the first file name in the binlog index";
|
errmsg= "could not normalize the first file name in the binlog index";
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
end:
|
end:
|
||||||
@ -9796,7 +9796,7 @@ TC_LOG_BINLOG::mark_xid_done(ulong binlog_id, bool write_checkpoint)
|
|||||||
than compare all found against each other to find the one pointing to the
|
than compare all found against each other to find the one pointing to the
|
||||||
most recent binlog.
|
most recent binlog.
|
||||||
|
|
||||||
Note also that we need to first release LOCK_xid_list, then aquire
|
Note also that we need to first release LOCK_xid_list, then acquire
|
||||||
LOCK_log, then re-aquire LOCK_xid_list. If we were to take LOCK_log while
|
LOCK_log, then re-aquire LOCK_xid_list. If we were to take LOCK_log while
|
||||||
holding LOCK_xid_list, we might deadlock with other threads that take the
|
holding LOCK_xid_list, we might deadlock with other threads that take the
|
||||||
locks in the opposite order.
|
locks in the opposite order.
|
||||||
@ -9881,7 +9881,7 @@ TC_LOG_BINLOG::commit_checkpoint_notify(void *cookie)
|
|||||||
necessary stuff.
|
necessary stuff.
|
||||||
|
|
||||||
In the future, this thread could also be used to do log rotation in the
|
In the future, this thread could also be used to do log rotation in the
|
||||||
background, which could elimiate all stalls around binlog rotations.
|
background, which could eliminate all stalls around binlog rotations.
|
||||||
*/
|
*/
|
||||||
pthread_handler_t
|
pthread_handler_t
|
||||||
binlog_background_thread(void *arg __attribute__((unused)))
|
binlog_background_thread(void *arg __attribute__((unused)))
|
||||||
|
@ -4522,7 +4522,7 @@ get_str_len_and_pointer(const Log_event::Byte **src,
|
|||||||
const Log_event::Byte *end)
|
const Log_event::Byte *end)
|
||||||
{
|
{
|
||||||
if (*src >= end)
|
if (*src >= end)
|
||||||
return -1; // Will be UINT_MAX in two-complement arithmetics
|
return -1; // Will be UINT_MAX in two-complement arithmetic
|
||||||
uint length= **src;
|
uint length= **src;
|
||||||
if (length > 0)
|
if (length > 0)
|
||||||
{
|
{
|
||||||
@ -4886,7 +4886,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
|
|||||||
|
|
||||||
/* A 2nd variable part; this is common to all versions */
|
/* A 2nd variable part; this is common to all versions */
|
||||||
memcpy((char*) start, end, data_len); // Copy db and query
|
memcpy((char*) start, end, data_len); // Copy db and query
|
||||||
start[data_len]= '\0'; // End query with \0 (For safetly)
|
start[data_len]= '\0'; // End query with \0 (For safety)
|
||||||
db= (char *)start;
|
db= (char *)start;
|
||||||
query= (char *)(start + db_len + 1);
|
query= (char *)(start + db_len + 1);
|
||||||
q_len= data_len - db_len -1;
|
q_len= data_len - db_len -1;
|
||||||
@ -6581,7 +6581,7 @@ int Format_description_log_event::do_update_pos(rpl_group_info *rgi)
|
|||||||
If we do not skip stepping the group log position (and the
|
If we do not skip stepping the group log position (and the
|
||||||
server id was changed when restarting the server), it might well
|
server id was changed when restarting the server), it might well
|
||||||
be that we start executing at a position that is invalid, e.g.,
|
be that we start executing at a position that is invalid, e.g.,
|
||||||
at a Rows_log_event or a Query_log_event preceeded by a
|
at a Rows_log_event or a Query_log_event preceded by a
|
||||||
Intvar_log_event instead of starting at a Table_map_log_event or
|
Intvar_log_event instead of starting at a Table_map_log_event or
|
||||||
the Intvar_log_event respectively.
|
the Intvar_log_event respectively.
|
||||||
*/
|
*/
|
||||||
@ -6693,7 +6693,7 @@ Format_description_log_event::is_version_before_checksum(const master_version_sp
|
|||||||
|
|
||||||
@return the version-safe checksum alg descriptor where zero
|
@return the version-safe checksum alg descriptor where zero
|
||||||
designates no checksum, 255 - the orginator is
|
designates no checksum, 255 - the orginator is
|
||||||
checksum-unaware (effectively no checksum) and the actuall
|
checksum-unaware (effectively no checksum) and the actual
|
||||||
[1-254] range alg descriptor.
|
[1-254] range alg descriptor.
|
||||||
*/
|
*/
|
||||||
enum enum_binlog_checksum_alg get_checksum_alg(const char* buf, ulong len)
|
enum enum_binlog_checksum_alg get_checksum_alg(const char* buf, ulong len)
|
||||||
@ -7420,7 +7420,7 @@ int Load_log_event::do_apply_event(NET* net, rpl_group_info *rgi,
|
|||||||
/*
|
/*
|
||||||
When replication is running fine, if it was DUP_ERROR on the
|
When replication is running fine, if it was DUP_ERROR on the
|
||||||
master then we could choose IGNORE here, because if DUP_ERROR
|
master then we could choose IGNORE here, because if DUP_ERROR
|
||||||
suceeded on master, and data is identical on the master and slave,
|
succeeded on master, and data is identical on the master and slave,
|
||||||
then there should be no uniqueness errors on slave, so IGNORE is
|
then there should be no uniqueness errors on slave, so IGNORE is
|
||||||
the same as DUP_ERROR. But in the unlikely case of uniqueness errors
|
the same as DUP_ERROR. But in the unlikely case of uniqueness errors
|
||||||
(because the data on the master and slave happen to be different
|
(because the data on the master and slave happen to be different
|
||||||
@ -7966,7 +7966,7 @@ Gtid_log_event::Gtid_log_event(THD *thd_arg, uint64 seq_no_arg,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
Used to record GTID while sending binlog to slave, without having to
|
Used to record GTID while sending binlog to slave, without having to
|
||||||
fully contruct every Gtid_log_event() needlessly.
|
fully construct every Gtid_log_event() needlessly.
|
||||||
*/
|
*/
|
||||||
bool
|
bool
|
||||||
Gtid_log_event::peek(const char *event_start, size_t event_len,
|
Gtid_log_event::peek(const char *event_start, size_t event_len,
|
||||||
@ -8535,7 +8535,7 @@ err:
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
Used to record gtid_list event while sending binlog to slave, without having to
|
Used to record gtid_list event while sending binlog to slave, without having to
|
||||||
fully contruct the event object.
|
fully construct the event object.
|
||||||
*/
|
*/
|
||||||
bool
|
bool
|
||||||
Gtid_list_log_event::peek(const char *event_start, size_t event_len,
|
Gtid_list_log_event::peek(const char *event_start, size_t event_len,
|
||||||
@ -8615,7 +8615,7 @@ Intvar_log_event::Intvar_log_event(const char* buf,
|
|||||||
const Format_description_log_event* description_event)
|
const Format_description_log_event* description_event)
|
||||||
:Log_event(buf, description_event)
|
:Log_event(buf, description_event)
|
||||||
{
|
{
|
||||||
/* The Post-Header is empty. The Varible Data part begins immediately. */
|
/* The Post-Header is empty. The Variable Data part begins immediately. */
|
||||||
buf+= description_event->common_header_len +
|
buf+= description_event->common_header_len +
|
||||||
description_event->post_header_len[INTVAR_EVENT-1];
|
description_event->post_header_len[INTVAR_EVENT-1];
|
||||||
type= buf[I_TYPE_OFFSET];
|
type= buf[I_TYPE_OFFSET];
|
||||||
@ -9910,7 +9910,7 @@ void Create_file_log_event::pack_info(Protocol *protocol)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
Create_file_log_event::do_apply_event()
|
Create_file_log_event::do_apply_event()
|
||||||
Constructor for Create_file_log_event to intantiate an event
|
Constructor for Create_file_log_event to instantiate an event
|
||||||
from the relay log on the slave.
|
from the relay log on the slave.
|
||||||
|
|
||||||
@retval
|
@retval
|
||||||
@ -10984,7 +10984,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len,
|
|||||||
DBUG_VOID_RETURN;
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* if my_bitmap_init fails, catched in is_valid() */
|
/* if my_bitmap_init fails, caught in is_valid() */
|
||||||
if (likely(!my_bitmap_init(&m_cols,
|
if (likely(!my_bitmap_init(&m_cols,
|
||||||
m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL,
|
m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL,
|
||||||
m_width,
|
m_width,
|
||||||
@ -11401,7 +11401,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
{
|
{
|
||||||
DBUG_PRINT("debug", ("Checking compability of tables to lock - tables_to_lock: %p",
|
DBUG_PRINT("debug", ("Checking compatibility of tables to lock - tables_to_lock: %p",
|
||||||
rgi->tables_to_lock));
|
rgi->tables_to_lock));
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -11456,7 +11456,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
|
|||||||
ptr->table->s->table_name.str));
|
ptr->table->s->table_name.str));
|
||||||
/*
|
/*
|
||||||
We should not honour --slave-skip-errors at this point as we are
|
We should not honour --slave-skip-errors at this point as we are
|
||||||
having severe errors which should not be skiped.
|
having severe errors which should not be skipped.
|
||||||
*/
|
*/
|
||||||
thd->is_slave_error= 1;
|
thd->is_slave_error= 1;
|
||||||
/* remove trigger's tables */
|
/* remove trigger's tables */
|
||||||
@ -11838,7 +11838,7 @@ static int rows_event_stmt_cleanup(rpl_group_info *rgi, THD * thd)
|
|||||||
/**
|
/**
|
||||||
The method either increments the relay log position or
|
The method either increments the relay log position or
|
||||||
commits the current statement and increments the master group
|
commits the current statement and increments the master group
|
||||||
possition if the event is STMT_END_F flagged and
|
position if the event is STMT_END_F flagged and
|
||||||
the statement corresponds to the autocommit query (i.e replicated
|
the statement corresponds to the autocommit query (i.e replicated
|
||||||
without wrapping in BEGIN/COMMIT)
|
without wrapping in BEGIN/COMMIT)
|
||||||
|
|
||||||
@ -12050,7 +12050,7 @@ err:
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
Print an event "body" cache to @c file possibly in two fragments.
|
Print an event "body" cache to @c file possibly in two fragments.
|
||||||
Each fragement is optionally per @c do_wrap to produce an SQL statement.
|
Each fragment is optionally per @c do_wrap to produce an SQL statement.
|
||||||
|
|
||||||
@param file a file to print to
|
@param file a file to print to
|
||||||
@param body the "body" IO_CACHE of event
|
@param body the "body" IO_CACHE of event
|
||||||
@ -13863,7 +13863,7 @@ record_compare_exit:
|
|||||||
Find the best key to use when locating the row in @c find_row().
|
Find the best key to use when locating the row in @c find_row().
|
||||||
|
|
||||||
A primary key is preferred if it exists; otherwise a unique index is
|
A primary key is preferred if it exists; otherwise a unique index is
|
||||||
preferred. Else we pick the index with the smalles rec_per_key value.
|
preferred. Else we pick the index with the smallest rec_per_key value.
|
||||||
|
|
||||||
If a suitable key is found, set @c m_key, @c m_key_nr and @c m_key_info
|
If a suitable key is found, set @c m_key, @c m_key_nr and @c m_key_info
|
||||||
member fields appropriately.
|
member fields appropriately.
|
||||||
@ -13997,7 +13997,7 @@ static int row_not_found_error(rpl_group_info *rgi)
|
|||||||
Locate the current row in event's table.
|
Locate the current row in event's table.
|
||||||
|
|
||||||
The current row is pointed by @c m_curr_row. Member @c m_width tells
|
The current row is pointed by @c m_curr_row. Member @c m_width tells
|
||||||
how many columns are there in the row (this can be differnet from
|
how many columns are there in the row (this can be different from
|
||||||
the number of columns in the table). It is assumed that event's
|
the number of columns in the table). It is assumed that event's
|
||||||
table is already open and pointed by @c m_table.
|
table is already open and pointed by @c m_table.
|
||||||
|
|
||||||
@ -14038,7 +14038,7 @@ int Rows_log_event::find_row(rpl_group_info *rgi)
|
|||||||
rpl_row_tabledefs.test specifies that
|
rpl_row_tabledefs.test specifies that
|
||||||
if the extra field on the slave does not have a default value
|
if the extra field on the slave does not have a default value
|
||||||
and this is okay with Delete or Update events.
|
and this is okay with Delete or Update events.
|
||||||
Todo: fix wl3228 hld that requires defauls for all types of events
|
Todo: fix wl3228 hld that requires defaults for all types of events
|
||||||
*/
|
*/
|
||||||
|
|
||||||
prepare_record(table, m_width, FALSE);
|
prepare_record(table, m_width, FALSE);
|
||||||
@ -14291,7 +14291,7 @@ int Rows_log_event::find_row(rpl_group_info *rgi)
|
|||||||
while (record_compare(table));
|
while (record_compare(table));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Note: above record_compare will take into accout all record fields
|
Note: above record_compare will take into account all record fields
|
||||||
which might be incorrect in case a partial row was given in the event
|
which might be incorrect in case a partial row was given in the event
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -455,7 +455,7 @@ class String;
|
|||||||
/**
|
/**
|
||||||
@def LOG_EVENT_ARTIFICIAL_F
|
@def LOG_EVENT_ARTIFICIAL_F
|
||||||
|
|
||||||
Artificial events are created arbitarily and not written to binary
|
Artificial events are created arbitrarily and not written to binary
|
||||||
log
|
log
|
||||||
|
|
||||||
These events should not update the master log position when slave
|
These events should not update the master log position when slave
|
||||||
@ -962,13 +962,13 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
the struct aggregates two paramenters that identify an event
|
the struct aggregates two parameters that identify an event
|
||||||
uniquely in scope of communication of a particular master and slave couple.
|
uniquely in scope of communication of a particular master and slave couple.
|
||||||
I.e there can not be 2 events from the same staying connected master which
|
I.e there can not be 2 events from the same staying connected master which
|
||||||
have the same coordinates.
|
have the same coordinates.
|
||||||
@note
|
@note
|
||||||
Such identifier is not yet unique generally as the event originating master
|
Such identifier is not yet unique generally as the event originating master
|
||||||
is resetable. Also the crashed master can be replaced with some other.
|
is resettable. Also the crashed master can be replaced with some other.
|
||||||
*/
|
*/
|
||||||
typedef struct event_coordinates
|
typedef struct event_coordinates
|
||||||
{
|
{
|
||||||
@ -2760,7 +2760,7 @@ public:
|
|||||||
uint8 number_of_event_types;
|
uint8 number_of_event_types;
|
||||||
/*
|
/*
|
||||||
The list of post-headers' lengths followed
|
The list of post-headers' lengths followed
|
||||||
by the checksum alg decription byte
|
by the checksum alg description byte
|
||||||
*/
|
*/
|
||||||
uint8 *post_header_len;
|
uint8 *post_header_len;
|
||||||
struct master_version_split {
|
struct master_version_split {
|
||||||
@ -3100,7 +3100,7 @@ public:
|
|||||||
*/
|
*/
|
||||||
bool is_deferred() { return deferred; }
|
bool is_deferred() { return deferred; }
|
||||||
/*
|
/*
|
||||||
In case of the deffered applying the variable instance is flagged
|
In case of the deferred applying the variable instance is flagged
|
||||||
and the parsing time query id is stored to be used at applying time.
|
and the parsing time query id is stored to be used at applying time.
|
||||||
*/
|
*/
|
||||||
void set_deferred(query_id_t qid) { deferred= true; query_id= qid; }
|
void set_deferred(query_id_t qid) { deferred= true; query_id= qid; }
|
||||||
@ -4988,7 +4988,7 @@ private:
|
|||||||
/**
|
/**
|
||||||
@class Incident_log_event
|
@class Incident_log_event
|
||||||
|
|
||||||
Class representing an incident, an occurance out of the ordinary,
|
Class representing an incident, an occurence out of the ordinary,
|
||||||
that happened on the master.
|
that happened on the master.
|
||||||
|
|
||||||
The event is used to inform the slave that something out of the
|
The event is used to inform the slave that something out of the
|
||||||
@ -5032,7 +5032,7 @@ public:
|
|||||||
m_message.str= NULL; /* Just as a precaution */
|
m_message.str= NULL; /* Just as a precaution */
|
||||||
m_message.length= 0;
|
m_message.length= 0;
|
||||||
set_direct_logging();
|
set_direct_logging();
|
||||||
/* Replicate the incident irregardless of @@skip_replication. */
|
/* Replicate the incident regardless of @@skip_replication. */
|
||||||
flags&= ~LOG_EVENT_SKIP_REPLICATION_F;
|
flags&= ~LOG_EVENT_SKIP_REPLICATION_F;
|
||||||
DBUG_VOID_RETURN;
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
@ -5053,7 +5053,7 @@ public:
|
|||||||
strmake(m_message.str, msg->str, msg->length);
|
strmake(m_message.str, msg->str, msg->length);
|
||||||
m_message.length= msg->length;
|
m_message.length= msg->length;
|
||||||
set_direct_logging();
|
set_direct_logging();
|
||||||
/* Replicate the incident irregardless of @@skip_replication. */
|
/* Replicate the incident regardless of @@skip_replication. */
|
||||||
flags&= ~LOG_EVENT_SKIP_REPLICATION_F;
|
flags&= ~LOG_EVENT_SKIP_REPLICATION_F;
|
||||||
DBUG_VOID_RETURN;
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
|
@ -1227,7 +1227,7 @@ Old_rows_log_event::Old_rows_log_event(const char *buf, uint event_len,
|
|||||||
DBUG_VOID_RETURN;
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* if my_bitmap_init fails, catched in is_valid() */
|
/* if my_bitmap_init fails, caught in is_valid() */
|
||||||
if (likely(!my_bitmap_init(&m_cols,
|
if (likely(!my_bitmap_init(&m_cols,
|
||||||
m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL,
|
m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL,
|
||||||
m_width,
|
m_width,
|
||||||
|
@ -4005,13 +4005,15 @@ static int replace_user_table(THD *thd, const User_table &user_table,
|
|||||||
table->key_info->key_length);
|
table->key_info->key_length);
|
||||||
|
|
||||||
if (table->file->ha_index_read_idx_map(table->record[0], 0, user_key,
|
if (table->file->ha_index_read_idx_map(table->record[0], 0, user_key,
|
||||||
HA_WHOLE_KEY,
|
HA_WHOLE_KEY, HA_READ_KEY_EXACT))
|
||||||
HA_READ_KEY_EXACT))
|
|
||||||
{
|
{
|
||||||
/* what == 'N' means revoke */
|
/* what == 'N' means revoke */
|
||||||
if (what == 'N')
|
if (what == 'N')
|
||||||
{
|
{
|
||||||
my_error(ER_NONEXISTING_GRANT, MYF(0), combo.user.str, combo.host.str);
|
if (combo.host.length)
|
||||||
|
my_error(ER_NONEXISTING_GRANT, MYF(0), combo.user.str, combo.host.str);
|
||||||
|
else
|
||||||
|
my_error(ER_INVALID_ROLE, MYF(0), combo.user.str);
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
@ -5555,6 +5557,8 @@ static void propagate_role_grants(ACL_ROLE *role,
|
|||||||
enum PRIVS_TO_MERGE::what what,
|
enum PRIVS_TO_MERGE::what what,
|
||||||
const char *db= 0, const char *name= 0)
|
const char *db= 0, const char *name= 0)
|
||||||
{
|
{
|
||||||
|
if (!role)
|
||||||
|
return;
|
||||||
|
|
||||||
mysql_mutex_assert_owner(&acl_cache->lock);
|
mysql_mutex_assert_owner(&acl_cache->lock);
|
||||||
PRIVS_TO_MERGE data= { what, db, name };
|
PRIVS_TO_MERGE data= { what, db, name };
|
||||||
@ -7760,6 +7764,21 @@ err:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void check_grant_column_int(GRANT_TABLE *grant_table, const char *name,
|
||||||
|
uint length, ulong *want_access)
|
||||||
|
{
|
||||||
|
if (grant_table)
|
||||||
|
{
|
||||||
|
*want_access&= ~grant_table->privs;
|
||||||
|
if (*want_access & grant_table->cols)
|
||||||
|
{
|
||||||
|
GRANT_COLUMN *grant_column= column_hash_search(grant_table, name, length);
|
||||||
|
if (grant_column)
|
||||||
|
*want_access&= ~grant_column->rights;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Check column rights in given security context
|
Check column rights in given security context
|
||||||
|
|
||||||
@ -7782,9 +7801,6 @@ bool check_grant_column(THD *thd, GRANT_INFO *grant,
|
|||||||
const char *db_name, const char *table_name,
|
const char *db_name, const char *table_name,
|
||||||
const char *name, size_t length, Security_context *sctx)
|
const char *name, size_t length, Security_context *sctx)
|
||||||
{
|
{
|
||||||
GRANT_TABLE *grant_table;
|
|
||||||
GRANT_TABLE *grant_table_role;
|
|
||||||
GRANT_COLUMN *grant_column;
|
|
||||||
ulong want_access= grant->want_privilege & ~grant->privilege;
|
ulong want_access= grant->want_privilege & ~grant->privilege;
|
||||||
DBUG_ENTER("check_grant_column");
|
DBUG_ENTER("check_grant_column");
|
||||||
DBUG_PRINT("enter", ("table: %s want_access: %lu", table_name, want_access));
|
DBUG_PRINT("enter", ("table: %s want_access: %lu", table_name, want_access));
|
||||||
@ -7809,45 +7825,20 @@ bool check_grant_column(THD *thd, GRANT_INFO *grant,
|
|||||||
grant->version= grant_version; /* purecov: inspected */
|
grant->version= grant_version; /* purecov: inspected */
|
||||||
}
|
}
|
||||||
|
|
||||||
grant_table= grant->grant_table_user;
|
check_grant_column_int(grant->grant_table_user, name, (uint)length,
|
||||||
grant_table_role= grant->grant_table_role;
|
&want_access);
|
||||||
|
check_grant_column_int(grant->grant_table_role, name, (uint)length,
|
||||||
|
&want_access);
|
||||||
|
|
||||||
if (!grant_table && !grant_table_role)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
if (grant_table)
|
|
||||||
{
|
|
||||||
grant_column= column_hash_search(grant_table, name, length);
|
|
||||||
if (grant_column)
|
|
||||||
{
|
|
||||||
want_access&= ~grant_column->rights;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (grant_table_role)
|
|
||||||
{
|
|
||||||
grant_column= column_hash_search(grant_table_role, name, length);
|
|
||||||
if (grant_column)
|
|
||||||
{
|
|
||||||
want_access&= ~grant_column->rights;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!want_access)
|
|
||||||
{
|
|
||||||
mysql_rwlock_unlock(&LOCK_grant);
|
|
||||||
DBUG_RETURN(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
err:
|
|
||||||
mysql_rwlock_unlock(&LOCK_grant);
|
mysql_rwlock_unlock(&LOCK_grant);
|
||||||
|
if (!want_access)
|
||||||
|
DBUG_RETURN(0);
|
||||||
|
|
||||||
char command[128];
|
char command[128];
|
||||||
get_privilege_desc(command, sizeof(command), want_access);
|
get_privilege_desc(command, sizeof(command), want_access);
|
||||||
/* TODO perhaps error should print current rolename aswell */
|
/* TODO perhaps error should print current rolename aswell */
|
||||||
my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0),
|
my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0), command, sctx->priv_user,
|
||||||
command,
|
sctx->host_or_ip, name, table_name);
|
||||||
sctx->priv_user,
|
|
||||||
sctx->host_or_ip,
|
|
||||||
name,
|
|
||||||
table_name);
|
|
||||||
DBUG_RETURN(1);
|
DBUG_RETURN(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -768,8 +768,18 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
|
|||||||
{
|
{
|
||||||
compl_result_code= result_code= HA_ADMIN_INVALID;
|
compl_result_code= result_code= HA_ADMIN_INVALID;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
The check for ALTER_PARTITION_ADMIN implements this logic:
|
||||||
|
do not collect EITS STATS for this syntax:
|
||||||
|
ALTER TABLE ... ANALYZE PARTITION p
|
||||||
|
EITS statistics is global (not per-partition). Collecting global stats
|
||||||
|
is much more expensive processing just one partition, so the most
|
||||||
|
appropriate action is to just not collect EITS stats for this command.
|
||||||
|
*/
|
||||||
collect_eis=
|
collect_eis=
|
||||||
(table->table->s->table_category == TABLE_CATEGORY_USER &&
|
(table->table->s->table_category == TABLE_CATEGORY_USER &&
|
||||||
|
!(lex->alter_info.flags & ALTER_PARTITION_ADMIN) &&
|
||||||
(get_use_stat_tables_mode(thd) > NEVER ||
|
(get_use_stat_tables_mode(thd) > NEVER ||
|
||||||
lex->with_persistent_for_clause));
|
lex->with_persistent_for_clause));
|
||||||
}
|
}
|
||||||
|
@ -1261,7 +1261,7 @@ bool wait_while_table_is_used(THD *thd, TABLE *table,
|
|||||||
FALSE);
|
FALSE);
|
||||||
/* extra() call must come only after all instances above are closed */
|
/* extra() call must come only after all instances above are closed */
|
||||||
if (function != HA_EXTRA_NOT_USED)
|
if (function != HA_EXTRA_NOT_USED)
|
||||||
(void) table->file->extra(function);
|
DBUG_RETURN(table->file->extra(function));
|
||||||
DBUG_RETURN(FALSE);
|
DBUG_RETURN(FALSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -7776,15 +7776,11 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
|
|||||||
FALSE ok; In this case *map will include the chosen index
|
FALSE ok; In this case *map will include the chosen index
|
||||||
TRUE error
|
TRUE error
|
||||||
*/
|
*/
|
||||||
bool setup_tables_and_check_access(THD *thd,
|
bool setup_tables_and_check_access(THD *thd, Name_resolution_context *context,
|
||||||
Name_resolution_context *context,
|
|
||||||
List<TABLE_LIST> *from_clause,
|
List<TABLE_LIST> *from_clause,
|
||||||
TABLE_LIST *tables,
|
TABLE_LIST *tables, List<TABLE_LIST> &leaves,
|
||||||
List<TABLE_LIST> &leaves,
|
bool select_insert, ulong want_access_first,
|
||||||
bool select_insert,
|
ulong want_access, bool full_table_list)
|
||||||
ulong want_access_first,
|
|
||||||
ulong want_access,
|
|
||||||
bool full_table_list)
|
|
||||||
{
|
{
|
||||||
DBUG_ENTER("setup_tables_and_check_access");
|
DBUG_ENTER("setup_tables_and_check_access");
|
||||||
|
|
||||||
|
@ -4088,7 +4088,8 @@ bool st_select_lex::optimize_unflattened_subqueries(bool const_only)
|
|||||||
sl->options|= SELECT_DESCRIBE;
|
sl->options|= SELECT_DESCRIBE;
|
||||||
inner_join->select_options|= SELECT_DESCRIBE;
|
inner_join->select_options|= SELECT_DESCRIBE;
|
||||||
}
|
}
|
||||||
res= inner_join->optimize();
|
if ((res= inner_join->optimize()))
|
||||||
|
return TRUE;
|
||||||
if (!inner_join->cleaned)
|
if (!inner_join->cleaned)
|
||||||
sl->update_used_tables();
|
sl->update_used_tables();
|
||||||
sl->update_correlated_cache();
|
sl->update_correlated_cache();
|
||||||
|
@ -1442,7 +1442,6 @@ err:
|
|||||||
|
|
||||||
bool JOIN::build_explain()
|
bool JOIN::build_explain()
|
||||||
{
|
{
|
||||||
create_explain_query_if_not_exists(thd->lex, thd->mem_root);
|
|
||||||
have_query_plan= QEP_AVAILABLE;
|
have_query_plan= QEP_AVAILABLE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1485,6 +1484,7 @@ bool JOIN::build_explain()
|
|||||||
int JOIN::optimize()
|
int JOIN::optimize()
|
||||||
{
|
{
|
||||||
int res= 0;
|
int res= 0;
|
||||||
|
create_explain_query_if_not_exists(thd->lex, thd->mem_root);
|
||||||
join_optimization_state init_state= optimization_state;
|
join_optimization_state init_state= optimization_state;
|
||||||
if (optimization_state == JOIN::OPTIMIZATION_PHASE_1_DONE)
|
if (optimization_state == JOIN::OPTIMIZATION_PHASE_1_DONE)
|
||||||
res= optimize_stage2();
|
res= optimize_stage2();
|
||||||
|
@ -1281,8 +1281,7 @@ bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list,
|
|||||||
DBUG_RETURN(TRUE);
|
DBUG_RETURN(TRUE);
|
||||||
|
|
||||||
if (setup_tables_and_check_access(thd, &select_lex->context,
|
if (setup_tables_and_check_access(thd, &select_lex->context,
|
||||||
&select_lex->top_join_list,
|
&select_lex->top_join_list, table_list,
|
||||||
table_list,
|
|
||||||
select_lex->leaf_tables,
|
select_lex->leaf_tables,
|
||||||
FALSE, UPDATE_ACL, SELECT_ACL, TRUE) ||
|
FALSE, UPDATE_ACL, SELECT_ACL, TRUE) ||
|
||||||
setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
|
setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
|
||||||
|
@ -449,9 +449,8 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
|
|||||||
if (thd->open_temporary_tables(lex->query_tables) ||
|
if (thd->open_temporary_tables(lex->query_tables) ||
|
||||||
open_and_lock_tables(thd, lex->query_tables, TRUE, 0))
|
open_and_lock_tables(thd, lex->query_tables, TRUE, 0))
|
||||||
{
|
{
|
||||||
view= lex->unlink_first_table(&link_to_local);
|
|
||||||
res= TRUE;
|
res= TRUE;
|
||||||
goto err;
|
goto err_no_relink;
|
||||||
}
|
}
|
||||||
|
|
||||||
view= lex->unlink_first_table(&link_to_local);
|
view= lex->unlink_first_table(&link_to_local);
|
||||||
@ -714,9 +713,11 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
|
|||||||
|
|
||||||
WSREP_ERROR_LABEL:
|
WSREP_ERROR_LABEL:
|
||||||
res= TRUE;
|
res= TRUE;
|
||||||
|
goto err_no_relink;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
lex->link_first_table_back(view, link_to_local);
|
lex->link_first_table_back(view, link_to_local);
|
||||||
|
err_no_relink:
|
||||||
unit->cleanup();
|
unit->cleanup();
|
||||||
DBUG_RETURN(res || thd->is_error());
|
DBUG_RETURN(res || thd->is_error());
|
||||||
}
|
}
|
||||||
|
@ -355,7 +355,6 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
|
|||||||
} // endif mode
|
} // endif mode
|
||||||
|
|
||||||
rcop = false;
|
rcop = false;
|
||||||
|
|
||||||
} catch (int n) {
|
} catch (int n) {
|
||||||
if (trace(1))
|
if (trace(1))
|
||||||
htrc("Exception %d: %s\n", n, g->Message);
|
htrc("Exception %d: %s\n", n, g->Message);
|
||||||
|
@ -49,6 +49,7 @@
|
|||||||
#include "global.h"
|
#include "global.h"
|
||||||
#include "plgdbsem.h"
|
#include "plgdbsem.h"
|
||||||
#include "filamdbf.h"
|
#include "filamdbf.h"
|
||||||
|
#include "filamzip.h"
|
||||||
#include "tabdos.h"
|
#include "tabdos.h"
|
||||||
#include "valblk.h"
|
#include "valblk.h"
|
||||||
#define NO_FUNC
|
#define NO_FUNC
|
||||||
@ -139,7 +140,7 @@ static int dbfhead(PGLOBAL g, FILE *file, PCSZ fn, DBFHEADER *buf)
|
|||||||
if (fread(buf, HEADLEN, 1, file) != 1) {
|
if (fread(buf, HEADLEN, 1, file) != 1) {
|
||||||
strcpy(g->Message, MSG(NO_READ_32));
|
strcpy(g->Message, MSG(NO_READ_32));
|
||||||
return RC_NF;
|
return RC_NF;
|
||||||
} // endif fread
|
} // endif fread
|
||||||
|
|
||||||
// Check first byte to be sure of .dbf type
|
// Check first byte to be sure of .dbf type
|
||||||
if ((buf->Version & 0x03) != DBFTYPE) {
|
if ((buf->Version & 0x03) != DBFTYPE) {
|
||||||
@ -149,7 +150,7 @@ static int dbfhead(PGLOBAL g, FILE *file, PCSZ fn, DBFHEADER *buf)
|
|||||||
if ((buf->Version & 0x30) == 0x30) {
|
if ((buf->Version & 0x30) == 0x30) {
|
||||||
strcpy(g->Message, MSG(FOXPRO_FILE));
|
strcpy(g->Message, MSG(FOXPRO_FILE));
|
||||||
dbc = 264; // FoxPro database container
|
dbc = 264; // FoxPro database container
|
||||||
} // endif Version
|
} // endif Version
|
||||||
|
|
||||||
} else
|
} else
|
||||||
strcpy(g->Message, MSG(DBASE_FILE));
|
strcpy(g->Message, MSG(DBASE_FILE));
|
||||||
@ -158,12 +159,12 @@ static int dbfhead(PGLOBAL g, FILE *file, PCSZ fn, DBFHEADER *buf)
|
|||||||
if (fseek(file, buf->Headlen() - dbc, SEEK_SET) != 0) {
|
if (fseek(file, buf->Headlen() - dbc, SEEK_SET) != 0) {
|
||||||
sprintf(g->Message, MSG(BAD_HEADER), fn);
|
sprintf(g->Message, MSG(BAD_HEADER), fn);
|
||||||
return RC_FX;
|
return RC_FX;
|
||||||
} // endif fseek
|
} // endif fseek
|
||||||
|
|
||||||
if (fread(&endmark, 2, 1, file) != 1) {
|
if (fread(&endmark, 2, 1, file) != 1) {
|
||||||
strcpy(g->Message, MSG(BAD_HEAD_END));
|
strcpy(g->Message, MSG(BAD_HEAD_END));
|
||||||
return RC_FX;
|
return RC_FX;
|
||||||
} // endif fread
|
} // endif fread
|
||||||
|
|
||||||
// Some files have just 1D others have 1D00 following fields
|
// Some files have just 1D others have 1D00 following fields
|
||||||
if (endmark[0] != EOH && endmark[1] != EOH) {
|
if (endmark[0] != EOH && endmark[1] != EOH) {
|
||||||
@ -172,7 +173,7 @@ static int dbfhead(PGLOBAL g, FILE *file, PCSZ fn, DBFHEADER *buf)
|
|||||||
if (rc == RC_OK)
|
if (rc == RC_OK)
|
||||||
return RC_FX;
|
return RC_FX;
|
||||||
|
|
||||||
} // endif endmark
|
} // endif endmark
|
||||||
|
|
||||||
// Calculate here the number of fields while we have the dbc info
|
// Calculate here the number of fields while we have the dbc info
|
||||||
buf->SetFields((buf->Headlen() - dbc - 1) / 32);
|
buf->SetFields((buf->Headlen() - dbc - 1) / 32);
|
||||||
@ -180,13 +181,58 @@ static int dbfhead(PGLOBAL g, FILE *file, PCSZ fn, DBFHEADER *buf)
|
|||||||
return rc;
|
return rc;
|
||||||
} // end of dbfhead
|
} // end of dbfhead
|
||||||
|
|
||||||
|
/****************************************************************************/
|
||||||
|
/* dbfields: Analyze a DBF header and set the table fields number. */
|
||||||
|
/* Parameters: */
|
||||||
|
/* PGLOBAL g -- pointer to the CONNECT Global structure */
|
||||||
|
/* DBFHEADER *hdrp -- pointer to _dbfheader structure */
|
||||||
|
/* Returns: */
|
||||||
|
/* RC_OK, RC_INFO, or RC_FX if error. */
|
||||||
|
/****************************************************************************/
|
||||||
|
static int dbfields(PGLOBAL g, DBFHEADER* hdrp)
|
||||||
|
{
|
||||||
|
char* endmark;
|
||||||
|
int dbc = 2, rc = RC_OK;
|
||||||
|
|
||||||
|
*g->Message = '\0';
|
||||||
|
|
||||||
|
// Check first byte to be sure of .dbf type
|
||||||
|
if ((hdrp->Version & 0x03) != DBFTYPE) {
|
||||||
|
strcpy(g->Message, MSG(NOT_A_DBF_FILE));
|
||||||
|
rc = RC_INFO;
|
||||||
|
|
||||||
|
if ((hdrp->Version & 0x30) == 0x30) {
|
||||||
|
strcpy(g->Message, MSG(FOXPRO_FILE));
|
||||||
|
dbc = 264; // FoxPro database container
|
||||||
|
} // endif Version
|
||||||
|
|
||||||
|
} else
|
||||||
|
strcpy(g->Message, MSG(DBASE_FILE));
|
||||||
|
|
||||||
|
// Check last byte(s) of header
|
||||||
|
endmark = (char*)hdrp + hdrp->Headlen() - dbc;
|
||||||
|
|
||||||
|
// Some headers just have 1D others have 1D00 following fields
|
||||||
|
if (endmark[0] != EOH && endmark[1] != EOH) {
|
||||||
|
sprintf(g->Message, MSG(NO_0DH_HEAD), dbc);
|
||||||
|
|
||||||
|
if (rc == RC_OK)
|
||||||
|
return RC_FX;
|
||||||
|
|
||||||
|
} // endif endmark
|
||||||
|
|
||||||
|
// Calculate here the number of fields while we have the dbc info
|
||||||
|
hdrp->SetFields((hdrp->Headlen() - dbc - 1) / 32);
|
||||||
|
return rc;
|
||||||
|
} // end of dbfields
|
||||||
|
|
||||||
/* -------------------------- Function DBFColumns ------------------------- */
|
/* -------------------------- Function DBFColumns ------------------------- */
|
||||||
|
|
||||||
/****************************************************************************/
|
/****************************************************************************/
|
||||||
/* DBFColumns: constructs the result blocks containing the description */
|
/* DBFColumns: constructs the result blocks containing the description */
|
||||||
/* of all the columns of a DBF file that will be retrieved by #GetData. */
|
/* of all the columns of a DBF file that will be retrieved by #GetData. */
|
||||||
/****************************************************************************/
|
/****************************************************************************/
|
||||||
PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info)
|
PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, PTOS topt, bool info)
|
||||||
{
|
{
|
||||||
int buftyp[] = {TYPE_STRING, TYPE_SHORT, TYPE_STRING,
|
int buftyp[] = {TYPE_STRING, TYPE_SHORT, TYPE_STRING,
|
||||||
TYPE_INT, TYPE_INT, TYPE_SHORT};
|
TYPE_INT, TYPE_INT, TYPE_SHORT};
|
||||||
@ -196,10 +242,12 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info)
|
|||||||
char buf[2], filename[_MAX_PATH];
|
char buf[2], filename[_MAX_PATH];
|
||||||
int ncol = sizeof(buftyp) / sizeof(int);
|
int ncol = sizeof(buftyp) / sizeof(int);
|
||||||
int rc, type, len, field, fields;
|
int rc, type, len, field, fields;
|
||||||
bool bad;
|
bool bad, mul;
|
||||||
DBFHEADER mainhead;
|
PCSZ target, pwd;
|
||||||
DESCRIPTOR thisfield;
|
DBFHEADER mainhead, *hp;
|
||||||
FILE *infile = NULL;
|
DESCRIPTOR thisfield, *tfp;
|
||||||
|
FILE *infile = NULL;
|
||||||
|
UNZIPUTL *zutp = NULL;
|
||||||
PQRYRES qrp;
|
PQRYRES qrp;
|
||||||
PCOLRES crp;
|
PCOLRES crp;
|
||||||
|
|
||||||
@ -217,21 +265,55 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info)
|
|||||||
/************************************************************************/
|
/************************************************************************/
|
||||||
PlugSetPath(filename, fn, dp);
|
PlugSetPath(filename, fn, dp);
|
||||||
|
|
||||||
if (!(infile= global_fopen(g, MSGID_CANNOT_OPEN, filename, "rb")))
|
if (topt->zipped) {
|
||||||
return NULL;
|
target = GetStringTableOption(g, topt, "Entry", NULL);
|
||||||
|
mul = (target && *target) ? strchr(target, '*') || strchr(target, '?')
|
||||||
|
: false;
|
||||||
|
mul = GetBooleanTableOption(g, topt, "Mulentries", mul);
|
||||||
|
|
||||||
/************************************************************************/
|
if (mul) {
|
||||||
/* Get the first 32 bytes of the header. */
|
strcpy(g->Message, "Cannot find column definition for multiple entries");
|
||||||
/************************************************************************/
|
return NULL;
|
||||||
if ((rc = dbfhead(g, infile, filename, &mainhead)) == RC_FX) {
|
} // endif Multiple
|
||||||
fclose(infile);
|
|
||||||
return NULL;
|
|
||||||
} // endif dbfhead
|
|
||||||
|
|
||||||
/************************************************************************/
|
pwd = GetStringTableOption(g, topt, "Password", NULL);
|
||||||
/* Allocate the structures used to refer to the result set. */
|
zutp = new(g) UNZIPUTL(target, pwd, mul);
|
||||||
/************************************************************************/
|
|
||||||
fields = mainhead.Fields();
|
if (!zutp->OpenTable(g, MODE_READ, filename))
|
||||||
|
hp = (DBFHEADER*)zutp->memory;
|
||||||
|
else
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
/**********************************************************************/
|
||||||
|
/* Set the table fields number. */
|
||||||
|
/**********************************************************************/
|
||||||
|
if ((rc = dbfields(g, hp)) == RC_FX) {
|
||||||
|
zutp->close();
|
||||||
|
return NULL;
|
||||||
|
} // endif dbfields
|
||||||
|
|
||||||
|
tfp = (DESCRIPTOR*)hp;
|
||||||
|
} else {
|
||||||
|
if (!(infile = global_fopen(g, MSGID_CANNOT_OPEN, filename, "rb")))
|
||||||
|
return NULL;
|
||||||
|
else
|
||||||
|
hp = &mainhead;
|
||||||
|
|
||||||
|
/**********************************************************************/
|
||||||
|
/* Get the first 32 bytes of the header. */
|
||||||
|
/**********************************************************************/
|
||||||
|
if ((rc = dbfhead(g, infile, filename, hp)) == RC_FX) {
|
||||||
|
fclose(infile);
|
||||||
|
return NULL;
|
||||||
|
} // endif dbfhead
|
||||||
|
|
||||||
|
tfp = &thisfield;
|
||||||
|
} // endif zipped
|
||||||
|
|
||||||
|
/************************************************************************/
|
||||||
|
/* Get the number of the table fields. */
|
||||||
|
/************************************************************************/
|
||||||
|
fields = hp->Fields();
|
||||||
} else
|
} else
|
||||||
fields = 0;
|
fields = 0;
|
||||||
|
|
||||||
@ -241,19 +323,21 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info)
|
|||||||
if (info || !qrp) {
|
if (info || !qrp) {
|
||||||
if (infile)
|
if (infile)
|
||||||
fclose(infile);
|
fclose(infile);
|
||||||
|
else if (zutp)
|
||||||
|
zutp->close();
|
||||||
|
|
||||||
return qrp;
|
return qrp;
|
||||||
} // endif info
|
} // endif info
|
||||||
|
|
||||||
if (trace(1)) {
|
if (trace(1)) {
|
||||||
htrc("Structure of %s\n", filename);
|
htrc("Structure of %s\n", filename);
|
||||||
htrc("headlen=%hd reclen=%hd degree=%d\n",
|
htrc("headlen=%hd reclen=%hd degree=%d\n",
|
||||||
mainhead.Headlen(), mainhead.Reclen(), fields);
|
hp->Headlen(), hp->Reclen(), fields);
|
||||||
htrc("flags(iem)=%d,%d,%d cp=%d\n", mainhead.Incompleteflag,
|
htrc("flags(iem)=%d,%d,%d cp=%d\n", hp->Incompleteflag,
|
||||||
mainhead.Encryptflag, mainhead.Mdxflag, mainhead.Language);
|
hp->Encryptflag, hp->Mdxflag, hp->Language);
|
||||||
htrc("%hd records, last changed %02d/%02d/%d\n",
|
htrc("%hd records, last changed %02d/%02d/%d\n",
|
||||||
mainhead.Records(), mainhead.Filedate[1], mainhead.Filedate[2],
|
hp->Records(), hp->Filedate[1], hp->Filedate[2],
|
||||||
mainhead.Filedate[0] + (mainhead.Filedate[0] <= 30) ? 2000 : 1900);
|
hp->Filedate[0] + (hp->Filedate[0] <= 30) ? 2000 : 1900);
|
||||||
htrc("Field Type Offset Len Dec Set Mdx\n");
|
htrc("Field Type Offset Len Dec Set Mdx\n");
|
||||||
} // endif trace
|
} // endif trace
|
||||||
|
|
||||||
@ -265,21 +349,24 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info)
|
|||||||
for (field = 0; field < fields; field++) {
|
for (field = 0; field < fields; field++) {
|
||||||
bad = FALSE;
|
bad = FALSE;
|
||||||
|
|
||||||
if (fread(&thisfield, HEADLEN, 1, infile) != 1) {
|
if (topt->zipped) {
|
||||||
|
tfp = (DESCRIPTOR*)((char*)tfp + HEADLEN);
|
||||||
|
} else if (fread(tfp, HEADLEN, 1, infile) != 1) {
|
||||||
sprintf(g->Message, MSG(ERR_READING_REC), field+1, fn);
|
sprintf(g->Message, MSG(ERR_READING_REC), field+1, fn);
|
||||||
goto err;
|
goto err;
|
||||||
} else
|
} // endif fread
|
||||||
len = thisfield.Length;
|
|
||||||
|
len = tfp->Length;
|
||||||
|
|
||||||
if (trace(1))
|
if (trace(1))
|
||||||
htrc("%-11s %c %6ld %3d %2d %3d %3d\n",
|
htrc("%-11s %c %6ld %3d %2d %3d %3d\n",
|
||||||
thisfield.Name, thisfield.Type, thisfield.Offset, len,
|
tfp->Name, tfp->Type, tfp->Offset, len,
|
||||||
thisfield.Decimals, thisfield.Setfield, thisfield.Mdxfield);
|
tfp->Decimals, tfp->Setfield, tfp->Mdxfield);
|
||||||
|
|
||||||
/************************************************************************/
|
/************************************************************************/
|
||||||
/* Now get the results into blocks. */
|
/* Now get the results into blocks. */
|
||||||
/************************************************************************/
|
/************************************************************************/
|
||||||
switch (thisfield.Type) {
|
switch (tfp->Type) {
|
||||||
case 'C': // Characters
|
case 'C': // Characters
|
||||||
case 'L': // Logical 'T' or 'F' or space
|
case 'L': // Logical 'T' or 'F' or space
|
||||||
type = TYPE_STRING;
|
type = TYPE_STRING;
|
||||||
@ -294,7 +381,7 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info)
|
|||||||
// type = TYPE_INT;
|
// type = TYPE_INT;
|
||||||
// break;
|
// break;
|
||||||
case 'N':
|
case 'N':
|
||||||
type = (thisfield.Decimals) ? TYPE_DOUBLE
|
type = (tfp->Decimals) ? TYPE_DOUBLE
|
||||||
: (len > 10) ? TYPE_BIGINT : TYPE_INT;
|
: (len > 10) ? TYPE_BIGINT : TYPE_INT;
|
||||||
break;
|
break;
|
||||||
case 'F': // Float
|
case 'F': // Float
|
||||||
@ -306,8 +393,8 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info)
|
|||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
if (!info) {
|
if (!info) {
|
||||||
sprintf(g->Message, MSG(BAD_DBF_TYPE), thisfield.Type
|
sprintf(g->Message, MSG(BAD_DBF_TYPE), tfp->Type
|
||||||
, thisfield.Name);
|
, tfp->Name);
|
||||||
goto err;
|
goto err;
|
||||||
} // endif info
|
} // endif info
|
||||||
|
|
||||||
@ -316,27 +403,31 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info)
|
|||||||
} // endswitch Type
|
} // endswitch Type
|
||||||
|
|
||||||
crp = qrp->Colresp; // Column Name
|
crp = qrp->Colresp; // Column Name
|
||||||
crp->Kdata->SetValue(thisfield.Name, field);
|
crp->Kdata->SetValue(tfp->Name, field);
|
||||||
crp = crp->Next; // Data Type
|
crp = crp->Next; // Data Type
|
||||||
crp->Kdata->SetValue((int)type, field);
|
crp->Kdata->SetValue((int)type, field);
|
||||||
crp = crp->Next; // Type Name
|
crp = crp->Next; // Type Name
|
||||||
|
|
||||||
if (bad) {
|
if (bad) {
|
||||||
buf[0] = thisfield.Type;
|
buf[0] = tfp->Type;
|
||||||
crp->Kdata->SetValue(buf, field);
|
crp->Kdata->SetValue(buf, field);
|
||||||
} else
|
} else
|
||||||
crp->Kdata->SetValue(GetTypeName(type), field);
|
crp->Kdata->SetValue(GetTypeName(type), field);
|
||||||
|
|
||||||
crp = crp->Next; // Precision
|
crp = crp->Next; // Precision
|
||||||
crp->Kdata->SetValue((int)thisfield.Length, field);
|
crp->Kdata->SetValue((int)tfp->Length, field);
|
||||||
crp = crp->Next; // Length
|
crp = crp->Next; // Length
|
||||||
crp->Kdata->SetValue((int)thisfield.Length, field);
|
crp->Kdata->SetValue((int)tfp->Length, field);
|
||||||
crp = crp->Next; // Scale (precision)
|
crp = crp->Next; // Scale (precision)
|
||||||
crp->Kdata->SetValue((int)thisfield.Decimals, field);
|
crp->Kdata->SetValue((int)tfp->Decimals, field);
|
||||||
} // endfor field
|
} // endfor field
|
||||||
|
|
||||||
qrp->Nblin = field;
|
qrp->Nblin = field;
|
||||||
fclose(infile);
|
|
||||||
|
if (infile)
|
||||||
|
fclose(infile);
|
||||||
|
else if (zutp)
|
||||||
|
zutp->close();
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
if (info) {
|
if (info) {
|
||||||
@ -347,9 +438,9 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info)
|
|||||||
|
|
||||||
sprintf(buf,
|
sprintf(buf,
|
||||||
"Ver=%02x ncol=%hu nlin=%u lrecl=%hu headlen=%hu date=%02d/%02d/%02d",
|
"Ver=%02x ncol=%hu nlin=%u lrecl=%hu headlen=%hu date=%02d/%02d/%02d",
|
||||||
mainhead.Version, fields, mainhead.Records, mainhead.Reclen,
|
hp->Version, fields, hp->Records, hp->Reclen,
|
||||||
mainhead.Headlen, mainhead.Filedate[0], mainhead.Filedate[1],
|
hp->Headlen, hp->Filedate[0], hp->Filedate[1],
|
||||||
mainhead.Filedate[2]);
|
hp->Filedate[2]);
|
||||||
|
|
||||||
strcat(g->Message, buf);
|
strcat(g->Message, buf);
|
||||||
} // endif info
|
} // endif info
|
||||||
@ -360,9 +451,13 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info)
|
|||||||
/**************************************************************************/
|
/**************************************************************************/
|
||||||
return qrp;
|
return qrp;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
fclose(infile);
|
if (infile)
|
||||||
return NULL;
|
fclose(infile);
|
||||||
|
else if (zutp)
|
||||||
|
zutp->close();
|
||||||
|
|
||||||
|
return NULL;
|
||||||
} // end of DBFColumns
|
} // end of DBFColumns
|
||||||
|
|
||||||
/* ---------------------------- Class DBFBASE ----------------------------- */
|
/* ---------------------------- Class DBFBASE ----------------------------- */
|
||||||
|
@ -19,7 +19,7 @@ typedef class DBMFAM *PDBMFAM;
|
|||||||
/****************************************************************************/
|
/****************************************************************************/
|
||||||
/* Functions used externally. */
|
/* Functions used externally. */
|
||||||
/****************************************************************************/
|
/****************************************************************************/
|
||||||
PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info);
|
PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, PTOS tiop, bool info);
|
||||||
|
|
||||||
/****************************************************************************/
|
/****************************************************************************/
|
||||||
/* This is the base class for dBASE file access methods. */
|
/* This is the base class for dBASE file access methods. */
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
/*********** File AM Zip C++ Program Source Code File (.CPP) ***********/
|
/*********** File AM Zip C++ Program Source Code File (.CPP) ***********/
|
||||||
/* PROGRAM NAME: FILAMZIP */
|
/* PROGRAM NAME: FILAMZIP */
|
||||||
/* ------------- */
|
/* ------------- */
|
||||||
/* Version 1.3 */
|
/* Version 1.4 */
|
||||||
/* */
|
/* */
|
||||||
/* COPYRIGHT: */
|
/* COPYRIGHT: */
|
||||||
/* ---------- */
|
/* ---------- */
|
||||||
/* (C) Copyright to the author Olivier BERTRAND 2016-2017 */
|
/* (C) Copyright to the author Olivier BERTRAND 2016-2020 */
|
||||||
/* */
|
/* */
|
||||||
/* WHAT THIS PROGRAM DOES: */
|
/* WHAT THIS PROGRAM DOES: */
|
||||||
/* ----------------------- */
|
/* ----------------------- */
|
||||||
@ -45,6 +45,62 @@
|
|||||||
|
|
||||||
#define WRITEBUFFERSIZE (16384)
|
#define WRITEBUFFERSIZE (16384)
|
||||||
|
|
||||||
|
/****************************************************************************/
|
||||||
|
/* Definitions used for DBF tables. */
|
||||||
|
/****************************************************************************/
|
||||||
|
#define HEADLEN 32 /* sizeof ( mainhead or thisfield ) */
|
||||||
|
//efine MEMOLEN 10 /* length of memo field in .dbf */
|
||||||
|
#define DBFTYPE 3 /* value of bits 0 and 1 if .dbf */
|
||||||
|
#define EOH 0x0D /* end-of-header marker in .dbf file */
|
||||||
|
|
||||||
|
/****************************************************************************/
|
||||||
|
/* First 32 bytes of a DBF table. */
|
||||||
|
/* Note: some reserved fields are used here to store info (Fields) */
|
||||||
|
/****************************************************************************/
|
||||||
|
typedef struct _dbfheader {
|
||||||
|
uchar Version; /* Version information flags */
|
||||||
|
char Filedate[3]; /* date, YYMMDD, binary. YY=year-1900 */
|
||||||
|
private:
|
||||||
|
/* The following four members are stored in little-endian format on disk */
|
||||||
|
char m_RecordsBuf[4]; /* records in the file */
|
||||||
|
char m_HeadlenBuf[2]; /* bytes in the header */
|
||||||
|
char m_ReclenBuf[2]; /* bytes in a record */
|
||||||
|
char m_FieldsBuf[2]; /* Reserved but used to store fields */
|
||||||
|
public:
|
||||||
|
char Incompleteflag; /* 01 if incomplete, else 00 */
|
||||||
|
char Encryptflag; /* 01 if encrypted, else 00 */
|
||||||
|
char Reserved2[12]; /* for LAN use */
|
||||||
|
char Mdxflag; /* 01 if production .mdx, else 00 */
|
||||||
|
char Language; /* Codepage */
|
||||||
|
char Reserved3[2];
|
||||||
|
|
||||||
|
uint Records(void) const { return uint4korr(m_RecordsBuf); }
|
||||||
|
ushort Headlen(void) const { return uint2korr(m_HeadlenBuf); }
|
||||||
|
ushort Reclen(void) const { return uint2korr(m_ReclenBuf); }
|
||||||
|
ushort Fields(void) const { return uint2korr(m_FieldsBuf); }
|
||||||
|
|
||||||
|
void SetHeadlen(ushort num) { int2store(m_HeadlenBuf, num); }
|
||||||
|
void SetReclen(ushort num) { int2store(m_ReclenBuf, num); }
|
||||||
|
void SetFields(ushort num) { int2store(m_FieldsBuf, num); }
|
||||||
|
} DBFHEADER;
|
||||||
|
|
||||||
|
/****************************************************************************/
|
||||||
|
/* Column field descriptor of a .dbf file. */
|
||||||
|
/****************************************************************************/
|
||||||
|
typedef struct _descriptor {
|
||||||
|
char Name[11]; /* field name, in capitals, null filled*/
|
||||||
|
char Type; /* field type, C, D, F, L, M or N */
|
||||||
|
uint Offset; /* used in memvars, not in files. */
|
||||||
|
uchar Length; /* field length */
|
||||||
|
uchar Decimals; /* number of decimal places */
|
||||||
|
short Reserved4;
|
||||||
|
char Workarea; /* ??? */
|
||||||
|
char Reserved5[2];
|
||||||
|
char Setfield; /* ??? */
|
||||||
|
char Reserved6[7];
|
||||||
|
char Mdxfield; /* 01 if tag field in production .mdx */
|
||||||
|
} DESCRIPTOR;
|
||||||
|
|
||||||
bool ZipLoadFile(PGLOBAL g, PCSZ zfn, PCSZ fn, PCSZ entry, bool append, bool mul);
|
bool ZipLoadFile(PGLOBAL g, PCSZ zfn, PCSZ fn, PCSZ entry, bool append, bool mul);
|
||||||
|
|
||||||
/***********************************************************************/
|
/***********************************************************************/
|
||||||
@ -214,10 +270,21 @@ bool ZipLoadFile(PGLOBAL g, PCSZ zfn, PCSZ fn, PCSZ entry, bool append, bool mul
|
|||||||
|
|
||||||
buf = (char*)PlugSubAlloc(g, NULL, WRITEBUFFERSIZE);
|
buf = (char*)PlugSubAlloc(g, NULL, WRITEBUFFERSIZE);
|
||||||
|
|
||||||
if (mul)
|
if (!mul) {
|
||||||
err = ZipFiles(g, zutp, fn, buf);
|
PCSZ entp;
|
||||||
else
|
|
||||||
err = ZipFile(g, zutp, fn, entry, buf);
|
if (!entry) { // entry defaults to the file name
|
||||||
|
char* p = strrchr((char*)fn, '/');
|
||||||
|
#if defined(__WIN__)
|
||||||
|
if (!p) p = strrchr((char*)fn, '\\');
|
||||||
|
#endif // __WIN__
|
||||||
|
entp = (p) ? p + 1 : entry;
|
||||||
|
} else
|
||||||
|
entp = entry;
|
||||||
|
|
||||||
|
err = ZipFile(g, zutp, fn, entp, buf);
|
||||||
|
} else
|
||||||
|
err = ZipFiles(g, zutp, fn, buf);
|
||||||
|
|
||||||
zutp->close();
|
zutp->close();
|
||||||
return err;
|
return err;
|
||||||
@ -232,6 +299,7 @@ ZIPUTIL::ZIPUTIL(PCSZ tgt)
|
|||||||
{
|
{
|
||||||
zipfile = NULL;
|
zipfile = NULL;
|
||||||
target = tgt;
|
target = tgt;
|
||||||
|
pwd = NULL;
|
||||||
fp = NULL;
|
fp = NULL;
|
||||||
entryopen = false;
|
entryopen = false;
|
||||||
} // end of ZIPUTIL standard constructor
|
} // end of ZIPUTIL standard constructor
|
||||||
@ -241,6 +309,7 @@ ZIPUTIL::ZIPUTIL(ZIPUTIL *zutp)
|
|||||||
{
|
{
|
||||||
zipfile = zutp->zipfile;
|
zipfile = zutp->zipfile;
|
||||||
target = zutp->target;
|
target = zutp->target;
|
||||||
|
pwd = zutp->pwd;
|
||||||
fp = zutp->fp;
|
fp = zutp->fp;
|
||||||
entryopen = zutp->entryopen;
|
entryopen = zutp->entryopen;
|
||||||
} // end of UNZIPUTL copy constructor
|
} // end of UNZIPUTL copy constructor
|
||||||
@ -385,11 +454,11 @@ void ZIPUTIL::closeEntry()
|
|||||||
/***********************************************************************/
|
/***********************************************************************/
|
||||||
/* Constructors. */
|
/* Constructors. */
|
||||||
/***********************************************************************/
|
/***********************************************************************/
|
||||||
UNZIPUTL::UNZIPUTL(PCSZ tgt, bool mul)
|
UNZIPUTL::UNZIPUTL(PCSZ tgt, PCSZ pw, bool mul)
|
||||||
{
|
{
|
||||||
zipfile = NULL;
|
zipfile = NULL;
|
||||||
target = tgt;
|
target = tgt;
|
||||||
pwd = NULL;
|
pwd = pw;
|
||||||
fp = NULL;
|
fp = NULL;
|
||||||
memory = NULL;
|
memory = NULL;
|
||||||
size = 0;
|
size = 0;
|
||||||
@ -959,7 +1028,7 @@ int UZXFAM::Cardinality(PGLOBAL g)
|
|||||||
} // end of Cardinality
|
} // end of Cardinality
|
||||||
|
|
||||||
/***********************************************************************/
|
/***********************************************************************/
|
||||||
/* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */
|
/* OpenTableFile: Open a FIX/UNIX table file from a ZIP file. */
|
||||||
/***********************************************************************/
|
/***********************************************************************/
|
||||||
bool UZXFAM::OpenTableFile(PGLOBAL g)
|
bool UZXFAM::OpenTableFile(PGLOBAL g)
|
||||||
{
|
{
|
||||||
@ -1015,6 +1084,197 @@ int UZXFAM::GetNext(PGLOBAL g)
|
|||||||
return RC_OK;
|
return RC_OK;
|
||||||
} // end of GetNext
|
} // end of GetNext
|
||||||
|
|
||||||
|
/* -------------------------- class UZDFAM --------------------------- */
|
||||||
|
|
||||||
|
/***********************************************************************/
|
||||||
|
/* Constructors. */
|
||||||
|
/***********************************************************************/
|
||||||
|
UZDFAM::UZDFAM(PDOSDEF tdp) : DBMFAM(tdp)
|
||||||
|
{
|
||||||
|
zutp = NULL;
|
||||||
|
tdfp = tdp;
|
||||||
|
//target = tdp->GetEntry();
|
||||||
|
//mul = tdp->GetMul();
|
||||||
|
//Lrecl = tdp->GetLrecl();
|
||||||
|
} // end of UZXFAM standard constructor
|
||||||
|
|
||||||
|
UZDFAM::UZDFAM(PUZDFAM txfp) : DBMFAM(txfp)
|
||||||
|
{
|
||||||
|
zutp = txfp->zutp;
|
||||||
|
tdfp = txfp->tdfp;
|
||||||
|
//target = txfp->target;
|
||||||
|
//mul = txfp->mul;
|
||||||
|
//Lrecl = txfp->Lrecl;
|
||||||
|
} // end of UZXFAM copy constructor
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
/****************************************************************************/
|
||||||
|
/* dbfhead: Routine to analyze a DBF header. */
|
||||||
|
/* Parameters: */
|
||||||
|
/* PGLOBAL g -- pointer to the CONNECT Global structure */
|
||||||
|
/* DBFHEADER *hdrp -- pointer to _dbfheader structure */
|
||||||
|
/* Returns: */
|
||||||
|
/* RC_OK, RC_NF, RC_INFO, or RC_FX if error. */
|
||||||
|
/* Side effects: */
|
||||||
|
/* Set the fields number in the header. */
|
||||||
|
/****************************************************************************/
|
||||||
|
int UZDFAM::dbfhead(PGLOBAL g, void* buf)
|
||||||
|
{
|
||||||
|
char *endmark;
|
||||||
|
int dbc = 2, rc = RC_OK;
|
||||||
|
DBFHEADER* hdrp = (DBFHEADER*)buf;
|
||||||
|
|
||||||
|
*g->Message = '\0';
|
||||||
|
|
||||||
|
// Check first byte to be sure of .dbf type
|
||||||
|
if ((hdrp->Version & 0x03) != DBFTYPE) {
|
||||||
|
strcpy(g->Message, MSG(NOT_A_DBF_FILE));
|
||||||
|
rc = RC_INFO;
|
||||||
|
|
||||||
|
if ((hdrp->Version & 0x30) == 0x30) {
|
||||||
|
strcpy(g->Message, MSG(FOXPRO_FILE));
|
||||||
|
dbc = 264; // FoxPro database container
|
||||||
|
} // endif Version
|
||||||
|
|
||||||
|
} else
|
||||||
|
strcpy(g->Message, MSG(DBASE_FILE));
|
||||||
|
|
||||||
|
// Check last byte(s) of header
|
||||||
|
endmark = (char*)hdrp + hdrp->Headlen() - dbc;
|
||||||
|
|
||||||
|
// Some headers just have 1D others have 1D00 following fields
|
||||||
|
if (endmark[0] != EOH && endmark[1] != EOH) {
|
||||||
|
sprintf(g->Message, MSG(NO_0DH_HEAD), dbc);
|
||||||
|
|
||||||
|
if (rc == RC_OK)
|
||||||
|
return RC_FX;
|
||||||
|
|
||||||
|
} // endif endmark
|
||||||
|
|
||||||
|
// Calculate here the number of fields while we have the dbc info
|
||||||
|
hdrp->SetFields((hdrp->Headlen() - dbc - 1) / 32);
|
||||||
|
return rc;
|
||||||
|
} // end of dbfhead
|
||||||
|
|
||||||
|
/****************************************************************************/
|
||||||
|
/* ScanHeader: scan the DBF file header for number of records, record size,*/
|
||||||
|
/* and header length. Set Records, check that Reclen is equal to lrecl and */
|
||||||
|
/* return the header length or 0 in case of error. */
|
||||||
|
/****************************************************************************/
|
||||||
|
int UZDFAM::ScanHeader(PGLOBAL g, int* rln)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
DBFHEADER header;
|
||||||
|
|
||||||
|
/************************************************************************/
|
||||||
|
/* Get the first 32 bytes of the header. */
|
||||||
|
/************************************************************************/
|
||||||
|
rc = dbfhead(g, &header);
|
||||||
|
|
||||||
|
if (rc == RC_FX)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
*rln = (int)header.Reclen();
|
||||||
|
Records = (int)header.Records();
|
||||||
|
return (int)header.Headlen();
|
||||||
|
} // end of ScanHeader
|
||||||
|
#endif // 0
|
||||||
|
|
||||||
|
/***********************************************************************/
|
||||||
|
/* ZIP GetFileLength: returns file size in number of bytes. */
|
||||||
|
/***********************************************************************/
|
||||||
|
int UZDFAM::GetFileLength(PGLOBAL g)
|
||||||
|
{
|
||||||
|
int len;
|
||||||
|
|
||||||
|
if (!zutp && OpenTableFile(g))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (zutp->entryopen)
|
||||||
|
len = zutp->size;
|
||||||
|
else
|
||||||
|
len = 0;
|
||||||
|
|
||||||
|
return len;
|
||||||
|
} // end of GetFileLength
|
||||||
|
|
||||||
|
/***********************************************************************/
|
||||||
|
/* ZIP Cardinality: return the number of rows if possible. */
|
||||||
|
/***********************************************************************/
|
||||||
|
int UZDFAM::Cardinality(PGLOBAL g)
|
||||||
|
{
|
||||||
|
if (!g)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
int card = -1;
|
||||||
|
int len = GetFileLength(g);
|
||||||
|
|
||||||
|
card = Records;
|
||||||
|
|
||||||
|
// Set number of blocks for later use
|
||||||
|
Block = (card > 0) ? (card + Nrec - 1) / Nrec : 0;
|
||||||
|
return card;
|
||||||
|
} // end of Cardinality
|
||||||
|
|
||||||
|
/***********************************************************************/
|
||||||
|
/* OpenTableFile: Open a DBF table file from a ZIP file. */
|
||||||
|
/***********************************************************************/
|
||||||
|
bool UZDFAM::OpenTableFile(PGLOBAL g)
|
||||||
|
{
|
||||||
|
// May have been already opened in GetFileLength
|
||||||
|
if (!zutp || !zutp->zipfile) {
|
||||||
|
char filename[_MAX_PATH];
|
||||||
|
MODE mode = Tdbp->GetMode();
|
||||||
|
|
||||||
|
/*********************************************************************/
|
||||||
|
/* Allocate the ZIP utility class. */
|
||||||
|
/*********************************************************************/
|
||||||
|
if (!zutp)
|
||||||
|
zutp = new(g)UNZIPUTL(tdfp);
|
||||||
|
|
||||||
|
// We used the file name relative to recorded datapath
|
||||||
|
PlugSetPath(filename, To_File, Tdbp->GetPath());
|
||||||
|
|
||||||
|
if (!zutp->OpenTable(g, mode, filename)) {
|
||||||
|
// The pseudo "buffer" is here the entire real buffer
|
||||||
|
Memory = zutp->memory;
|
||||||
|
Top = Memory + zutp->size;
|
||||||
|
To_Fb = zutp->fp; // Useful when closing
|
||||||
|
return AllocateBuffer(g);
|
||||||
|
} else
|
||||||
|
return true;
|
||||||
|
|
||||||
|
} else
|
||||||
|
Reset();
|
||||||
|
|
||||||
|
return false;
|
||||||
|
} // end of OpenTableFile
|
||||||
|
|
||||||
|
/***********************************************************************/
|
||||||
|
/* GetNext: go to next entry. */
|
||||||
|
/***********************************************************************/
|
||||||
|
int UZDFAM::GetNext(PGLOBAL g)
|
||||||
|
{
|
||||||
|
int rc = zutp->nextEntry(g);
|
||||||
|
|
||||||
|
if (rc != RC_OK)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
int len = zutp->size;
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
if (len % Lrecl) {
|
||||||
|
sprintf(g->Message, MSG(NOT_FIXED_LEN), zutp->fn, len, Lrecl);
|
||||||
|
return RC_FX;
|
||||||
|
} // endif size
|
||||||
|
#endif // 0
|
||||||
|
|
||||||
|
Memory = zutp->memory;
|
||||||
|
Top = Memory + len;
|
||||||
|
Rewind();
|
||||||
|
return RC_OK;
|
||||||
|
} // end of GetNext
|
||||||
|
|
||||||
/* -------------------------- class ZIPFAM --------------------------- */
|
/* -------------------------- class ZIPFAM --------------------------- */
|
||||||
|
|
||||||
/***********************************************************************/
|
/***********************************************************************/
|
||||||
@ -1045,7 +1305,7 @@ bool ZIPFAM::OpenTableFile(PGLOBAL g)
|
|||||||
strcpy(g->Message, "No insert into existing zip file");
|
strcpy(g->Message, "No insert into existing zip file");
|
||||||
return true;
|
return true;
|
||||||
} else if (append && len > 0) {
|
} else if (append && len > 0) {
|
||||||
UNZIPUTL *zutp = new(g) UNZIPUTL(target, false);
|
UNZIPUTL *zutp = new(g) UNZIPUTL(target, NULL, false);
|
||||||
|
|
||||||
if (!zutp->IsInsertOk(g, filename)) {
|
if (!zutp->IsInsertOk(g, filename)) {
|
||||||
strcpy(g->Message, "No insert into existing entry");
|
strcpy(g->Message, "No insert into existing entry");
|
||||||
@ -1129,7 +1389,7 @@ bool ZPXFAM::OpenTableFile(PGLOBAL g)
|
|||||||
strcpy(g->Message, "No insert into existing zip file");
|
strcpy(g->Message, "No insert into existing zip file");
|
||||||
return true;
|
return true;
|
||||||
} else if (append && len > 0) {
|
} else if (append && len > 0) {
|
||||||
UNZIPUTL *zutp = new(g) UNZIPUTL(target, false);
|
UNZIPUTL *zutp = new(g) UNZIPUTL(target, NULL, false);
|
||||||
|
|
||||||
if (!zutp->IsInsertOk(g, filename)) {
|
if (!zutp->IsInsertOk(g, filename)) {
|
||||||
strcpy(g->Message, "No insert into existing entry");
|
strcpy(g->Message, "No insert into existing entry");
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/************** filamzip H Declares Source Code File (.H) **************/
|
/************** filamzip H Declares Source Code File (.H) **************/
|
||||||
/* Name: filamzip.h Version 1.2 */
|
/* Name: filamzip.h Version 1.3 */
|
||||||
/* */
|
/* */
|
||||||
/* (C) Copyright to the author Olivier BERTRAND 2016-2017 */
|
/* (C) Copyright to the author Olivier BERTRAND 2016-2020 */
|
||||||
/* */
|
/* */
|
||||||
/* This file contains the ZIP file access method classes declares. */
|
/* This file contains the ZIP file access method classes declares. */
|
||||||
/***********************************************************************/
|
/***********************************************************************/
|
||||||
@ -11,6 +11,7 @@
|
|||||||
#include "block.h"
|
#include "block.h"
|
||||||
#include "filamap.h"
|
#include "filamap.h"
|
||||||
#include "filamfix.h"
|
#include "filamfix.h"
|
||||||
|
#include "filamdbf.h"
|
||||||
#include "zip.h"
|
#include "zip.h"
|
||||||
#include "unzip.h"
|
#include "unzip.h"
|
||||||
|
|
||||||
@ -18,6 +19,7 @@
|
|||||||
|
|
||||||
typedef class UNZFAM *PUNZFAM;
|
typedef class UNZFAM *PUNZFAM;
|
||||||
typedef class UZXFAM *PUZXFAM;
|
typedef class UZXFAM *PUZXFAM;
|
||||||
|
typedef class UZDFAM* PUZDFAM;
|
||||||
typedef class ZIPFAM *PZIPFAM;
|
typedef class ZIPFAM *PZIPFAM;
|
||||||
typedef class ZPXFAM *PZPXFAM;
|
typedef class ZPXFAM *PZPXFAM;
|
||||||
|
|
||||||
@ -53,7 +55,7 @@ class DllExport ZIPUTIL : public BLOCK {
|
|||||||
class DllExport UNZIPUTL : public BLOCK {
|
class DllExport UNZIPUTL : public BLOCK {
|
||||||
public:
|
public:
|
||||||
// Constructor
|
// Constructor
|
||||||
UNZIPUTL(PCSZ tgt, bool mul);
|
UNZIPUTL(PCSZ tgt, PCSZ pw, bool mul);
|
||||||
UNZIPUTL(PDOSDEF tdp);
|
UNZIPUTL(PDOSDEF tdp);
|
||||||
|
|
||||||
// Implementation
|
// Implementation
|
||||||
@ -143,6 +145,36 @@ class DllExport UZXFAM : public MPXFAM {
|
|||||||
PDOSDEF tdfp;
|
PDOSDEF tdfp;
|
||||||
}; // end of UZXFAM
|
}; // end of UZXFAM
|
||||||
|
|
||||||
|
/***********************************************************************/
|
||||||
|
/* This is the fixed unzip file access method. */
|
||||||
|
/***********************************************************************/
|
||||||
|
class DllExport UZDFAM : public DBMFAM {
|
||||||
|
//friend class UNZFAM;
|
||||||
|
public:
|
||||||
|
// Constructors
|
||||||
|
UZDFAM(PDOSDEF tdp);
|
||||||
|
UZDFAM(PUZDFAM txfp);
|
||||||
|
|
||||||
|
// Implementation
|
||||||
|
virtual AMT GetAmType(void) { return TYPE_AM_ZIP; }
|
||||||
|
virtual PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)UZDFAM(this); }
|
||||||
|
|
||||||
|
// Methods
|
||||||
|
virtual int GetFileLength(PGLOBAL g);
|
||||||
|
virtual int Cardinality(PGLOBAL g);
|
||||||
|
virtual bool OpenTableFile(PGLOBAL g);
|
||||||
|
virtual int GetNext(PGLOBAL g);
|
||||||
|
//virtual int ReadBuffer(PGLOBAL g);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
int dbfhead(PGLOBAL g, void* buf);
|
||||||
|
int ScanHeader(PGLOBAL g, int* rln);
|
||||||
|
|
||||||
|
// Members
|
||||||
|
UNZIPUTL* zutp;
|
||||||
|
PDOSDEF tdfp;
|
||||||
|
}; // end of UZDFAM
|
||||||
|
|
||||||
/***********************************************************************/
|
/***********************************************************************/
|
||||||
/* This is the zip file access method. */
|
/* This is the zip file access method. */
|
||||||
/***********************************************************************/
|
/***********************************************************************/
|
||||||
|
@ -5882,7 +5882,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
|
|||||||
|
|
||||||
} else switch (ttp) {
|
} else switch (ttp) {
|
||||||
case TAB_DBF:
|
case TAB_DBF:
|
||||||
qrp= DBFColumns(g, dpath, fn, fnc == FNC_COL);
|
qrp= DBFColumns(g, dpath, fn, topt, fnc == FNC_COL);
|
||||||
break;
|
break;
|
||||||
#if defined(ODBC_SUPPORT)
|
#if defined(ODBC_SUPPORT)
|
||||||
case TAB_ODBC:
|
case TAB_ODBC:
|
||||||
@ -6733,11 +6733,6 @@ int ha_connect::create(const char *name, TABLE *table_arg,
|
|||||||
PCSZ m= GetListOption(g, "Mulentries", options->oplist, "NO");
|
PCSZ m= GetListOption(g, "Mulentries", options->oplist, "NO");
|
||||||
bool mul= *m == '1' || *m == 'Y' || *m == 'y' || !stricmp(m, "ON");
|
bool mul= *m == '1' || *m == 'Y' || *m == 'y' || !stricmp(m, "ON");
|
||||||
|
|
||||||
if (!entry && !mul) {
|
|
||||||
my_message(ER_UNKNOWN_ERROR, "Missing entry name", MYF(0));
|
|
||||||
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
|
|
||||||
} // endif entry
|
|
||||||
|
|
||||||
strcat(strcat(strcpy(dbpath, "./"), table->s->db.str), "/");
|
strcat(strcat(strcpy(dbpath, "./"), table->s->db.str), "/");
|
||||||
PlugSetPath(zbuf, options->filename, dbpath);
|
PlugSetPath(zbuf, options->filename, dbpath);
|
||||||
PlugSetPath(buf, fn, dbpath);
|
PlugSetPath(buf, fn, dbpath);
|
||||||
|
@ -380,7 +380,6 @@ MGODEF::MGODEF(void)
|
|||||||
Uri = NULL;
|
Uri = NULL;
|
||||||
Colist = NULL;
|
Colist = NULL;
|
||||||
Filter = NULL;
|
Filter = NULL;
|
||||||
Level = 0;
|
|
||||||
Base = 0;
|
Base = 0;
|
||||||
Version = 0;
|
Version = 0;
|
||||||
Pipe = false;
|
Pipe = false;
|
||||||
|
@ -82,7 +82,6 @@ protected:
|
|||||||
PSZ Wrapname; /* Java wrapper name */
|
PSZ Wrapname; /* Java wrapper name */
|
||||||
PCSZ Colist; /* Options list */
|
PCSZ Colist; /* Options list */
|
||||||
PCSZ Filter; /* Filtering query */
|
PCSZ Filter; /* Filtering query */
|
||||||
int Level; /* Used for catalog table */
|
|
||||||
int Base; /* The array index base */
|
int Base; /* The array index base */
|
||||||
int Version; /* The Java driver version */
|
int Version; /* The Java driver version */
|
||||||
bool Pipe; /* True is Colist is a pipeline */
|
bool Pipe; /* True is Colist is a pipeline */
|
||||||
|
@ -49,7 +49,7 @@ bool XMLDOCUMENT::InitZip(PGLOBAL g, PCSZ entry)
|
|||||||
{
|
{
|
||||||
#if defined(ZIP_SUPPORT)
|
#if defined(ZIP_SUPPORT)
|
||||||
bool mul = (entry) ? strchr(entry, '*') || strchr(entry, '?') : false;
|
bool mul = (entry) ? strchr(entry, '*') || strchr(entry, '?') : false;
|
||||||
zip = new(g) UNZIPUTL(entry, mul);
|
zip = new(g) UNZIPUTL(entry, NULL, mul);
|
||||||
return zip == NULL;
|
return zip == NULL;
|
||||||
#else // !ZIP_SUPPORT
|
#else // !ZIP_SUPPORT
|
||||||
sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
|
sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
|
||||||
|
@ -26,6 +26,8 @@
|
|||||||
#include "tabmul.h"
|
#include "tabmul.h"
|
||||||
#include "filter.h"
|
#include "filter.h"
|
||||||
|
|
||||||
|
PQRYRES MGOColumns(PGLOBAL g, PCSZ db, PCSZ uri, PTOS topt, bool info);
|
||||||
|
|
||||||
/* -------------------------- Class CMGDISC -------------------------- */
|
/* -------------------------- Class CMGDISC -------------------------- */
|
||||||
|
|
||||||
/***********************************************************************/
|
/***********************************************************************/
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
/************* TabDos C++ Program Source Code File (.CPP) **************/
|
/************* TabDos C++ Program Source Code File (.CPP) **************/
|
||||||
/* PROGRAM NAME: TABDOS */
|
/* PROGRAM NAME: TABDOS */
|
||||||
/* ------------- */
|
/* ------------- */
|
||||||
/* Version 4.9.4 */
|
/* Version 4.9.5 */
|
||||||
/* */
|
/* */
|
||||||
/* COPYRIGHT: */
|
/* COPYRIGHT: */
|
||||||
/* ---------- */
|
/* ---------- */
|
||||||
/* (C) Copyright to the author Olivier BERTRAND 1998-2019 */
|
/* (C) Copyright to the author Olivier BERTRAND 1998-2020 */
|
||||||
/* */
|
/* */
|
||||||
/* WHAT THIS PROGRAM DOES: */
|
/* WHAT THIS PROGRAM DOES: */
|
||||||
/* ----------------------- */
|
/* ----------------------- */
|
||||||
@ -359,7 +359,26 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode)
|
|||||||
/* Allocate table and file processing class of the proper type. */
|
/* Allocate table and file processing class of the proper type. */
|
||||||
/* Column blocks will be allocated only when needed. */
|
/* Column blocks will be allocated only when needed. */
|
||||||
/*********************************************************************/
|
/*********************************************************************/
|
||||||
if (Zipped) {
|
if (Recfm == RECFM_DBF) {
|
||||||
|
if (Catfunc == FNC_NO) {
|
||||||
|
if (Zipped) {
|
||||||
|
if (mode == MODE_READ || mode == MODE_ANY || mode == MODE_ALTER) {
|
||||||
|
txfp = new(g) UZDFAM(this);
|
||||||
|
} else {
|
||||||
|
strcpy(g->Message, "Zipped DBF tables are read only");
|
||||||
|
return NULL;
|
||||||
|
} // endif's mode
|
||||||
|
|
||||||
|
} else if (map)
|
||||||
|
txfp = new(g) DBMFAM(this);
|
||||||
|
else
|
||||||
|
txfp = new(g) DBFFAM(this);
|
||||||
|
|
||||||
|
tdbp = new(g) TDBFIX(this, txfp);
|
||||||
|
} else
|
||||||
|
tdbp = new(g) TDBDCL(this); // Catfunc should be 'C'
|
||||||
|
|
||||||
|
} else if (Zipped) {
|
||||||
#if defined(ZIP_SUPPORT)
|
#if defined(ZIP_SUPPORT)
|
||||||
if (Recfm == RECFM_VAR) {
|
if (Recfm == RECFM_VAR) {
|
||||||
if (mode == MODE_READ || mode == MODE_ANY || mode == MODE_ALTER) {
|
if (mode == MODE_READ || mode == MODE_ANY || mode == MODE_ALTER) {
|
||||||
@ -389,17 +408,6 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode)
|
|||||||
sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
|
sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
|
||||||
return NULL;
|
return NULL;
|
||||||
#endif // !ZIP_SUPPORT
|
#endif // !ZIP_SUPPORT
|
||||||
} else if (Recfm == RECFM_DBF) {
|
|
||||||
if (Catfunc == FNC_NO) {
|
|
||||||
if (map)
|
|
||||||
txfp = new(g) DBMFAM(this);
|
|
||||||
else
|
|
||||||
txfp = new(g) DBFFAM(this);
|
|
||||||
|
|
||||||
tdbp = new(g) TDBFIX(this, txfp);
|
|
||||||
} else // Catfunc should be 'C'
|
|
||||||
tdbp = new(g) TDBDCL(this);
|
|
||||||
|
|
||||||
} else if (Recfm != RECFM_VAR && Compressed < 2) {
|
} else if (Recfm != RECFM_VAR && Compressed < 2) {
|
||||||
if (Huge)
|
if (Huge)
|
||||||
txfp = new(g) BGXFAM(this);
|
txfp = new(g) BGXFAM(this);
|
||||||
|
@ -30,6 +30,7 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */
|
|||||||
friend class DBFBASE;
|
friend class DBFBASE;
|
||||||
friend class UNZIPUTL;
|
friend class UNZIPUTL;
|
||||||
friend class JSONCOL;
|
friend class JSONCOL;
|
||||||
|
friend class TDBDCL;
|
||||||
public:
|
public:
|
||||||
// Constructor
|
// Constructor
|
||||||
DOSDEF(void);
|
DOSDEF(void);
|
||||||
|
@ -98,18 +98,20 @@ class DllExport BINCOL : public DOSCOL {
|
|||||||
/* This is the class declaration for the DBF columns catalog table. */
|
/* This is the class declaration for the DBF columns catalog table. */
|
||||||
/***********************************************************************/
|
/***********************************************************************/
|
||||||
class TDBDCL : public TDBCAT {
|
class TDBDCL : public TDBCAT {
|
||||||
public:
|
public:
|
||||||
// Constructor
|
// Constructor
|
||||||
TDBDCL(PDOSDEF tdp) : TDBCAT(tdp) {Fn = tdp->GetFn();}
|
TDBDCL(PDOSDEF tdp) : TDBCAT(tdp)
|
||||||
|
{Fn = tdp->GetFn(); Topt = tdp->GetTopt();}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// Specific routines
|
// Specific routines
|
||||||
virtual PQRYRES GetResult(PGLOBAL g)
|
virtual PQRYRES GetResult(PGLOBAL g)
|
||||||
{return DBFColumns(g, ((PTABDEF)To_Def)->GetPath(), Fn, false);}
|
{return DBFColumns(g, ((PTABDEF)To_Def)->GetPath(), Fn, Topt, false);}
|
||||||
|
|
||||||
// Members
|
// Members
|
||||||
PCSZ Fn; // The DBF file (path) name
|
PCSZ Fn; // The DBF file (path) name
|
||||||
}; // end of class TDBOCL
|
PTOS Topt;
|
||||||
|
}; // end of class TDBOCL
|
||||||
|
|
||||||
|
|
||||||
#endif // __TABFIX__
|
#endif // __TABFIX__
|
||||||
|
@ -739,6 +739,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m)
|
|||||||
/***********************************************************************/
|
/***********************************************************************/
|
||||||
TDBJSN::TDBJSN(PJDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp)
|
TDBJSN::TDBJSN(PJDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp)
|
||||||
{
|
{
|
||||||
|
G = NULL;
|
||||||
Top = NULL;
|
Top = NULL;
|
||||||
Row = NULL;
|
Row = NULL;
|
||||||
Val = NULL;
|
Val = NULL;
|
||||||
|
@ -104,7 +104,6 @@ public:
|
|||||||
PCSZ Xcol; /* Name of expandable column */
|
PCSZ Xcol; /* Name of expandable column */
|
||||||
int Limit; /* Limit of multiple values */
|
int Limit; /* Limit of multiple values */
|
||||||
int Pretty; /* Depends on file structure */
|
int Pretty; /* Depends on file structure */
|
||||||
int Level; /* Used for catalog table */
|
|
||||||
int Base; /* The array index base */
|
int Base; /* The array index base */
|
||||||
bool Strict; /* Strict syntax checking */
|
bool Strict; /* Strict syntax checking */
|
||||||
char Sep; /* The Jpath separator */
|
char Sep; /* The Jpath separator */
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
#include "filamzip.h"
|
#include "filamzip.h"
|
||||||
#include "resource.h" // for IDS_COLUMNS
|
#include "resource.h" // for IDS_COLUMNS
|
||||||
#include "tabdos.h"
|
#include "tabdos.h"
|
||||||
|
#include "tabmul.h"
|
||||||
#include "tabzip.h"
|
#include "tabzip.h"
|
||||||
|
|
||||||
/* -------------------------- Class ZIPDEF --------------------------- */
|
/* -------------------------- Class ZIPDEF --------------------------- */
|
||||||
@ -41,7 +42,14 @@ bool ZIPDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
|
|||||||
/***********************************************************************/
|
/***********************************************************************/
|
||||||
PTDB ZIPDEF::GetTable(PGLOBAL g, MODE m)
|
PTDB ZIPDEF::GetTable(PGLOBAL g, MODE m)
|
||||||
{
|
{
|
||||||
return new(g) TDBZIP(this);
|
PTDB tdbp = NULL;
|
||||||
|
|
||||||
|
tdbp = new(g) TDBZIP(this);
|
||||||
|
|
||||||
|
if (Multiple)
|
||||||
|
tdbp = new(g) TDBMUL(tdbp);
|
||||||
|
|
||||||
|
return tdbp;
|
||||||
} // end of GetTable
|
} // end of GetTable
|
||||||
|
|
||||||
/* ------------------------------------------------------------------- */
|
/* ------------------------------------------------------------------- */
|
||||||
@ -108,7 +116,7 @@ int TDBZIP::Cardinality(PGLOBAL g)
|
|||||||
|
|
||||||
Cardinal = (err == UNZ_OK) ? (int)ginfo.number_entry : 0;
|
Cardinal = (err == UNZ_OK) ? (int)ginfo.number_entry : 0;
|
||||||
} else
|
} else
|
||||||
Cardinal = 0;
|
Cardinal = 10; // Dummy for multiple tables
|
||||||
|
|
||||||
} // endif Cardinal
|
} // endif Cardinal
|
||||||
|
|
||||||
@ -187,6 +195,7 @@ int TDBZIP::DeleteDB(PGLOBAL g, int irc)
|
|||||||
void TDBZIP::CloseDB(PGLOBAL g)
|
void TDBZIP::CloseDB(PGLOBAL g)
|
||||||
{
|
{
|
||||||
close();
|
close();
|
||||||
|
nexterr = UNZ_OK; // For multiple tables
|
||||||
Use = USE_READY; // Just to be clean
|
Use = USE_READY; // Just to be clean
|
||||||
} // end of CloseDB
|
} // end of CloseDB
|
||||||
|
|
||||||
|
@ -48,6 +48,8 @@ public:
|
|||||||
|
|
||||||
// Implementation
|
// Implementation
|
||||||
virtual AMT GetAmType(void) {return TYPE_AM_ZIP;}
|
virtual AMT GetAmType(void) {return TYPE_AM_ZIP;}
|
||||||
|
virtual PCSZ GetFile(PGLOBAL) {return zfn;}
|
||||||
|
virtual void SetFile(PGLOBAL, PCSZ fn) {zfn = fn;}
|
||||||
|
|
||||||
// Methods
|
// Methods
|
||||||
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
|
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
|
||||||
|
@ -861,7 +861,7 @@ buf_LRU_check_size_of_non_data_objects(
|
|||||||
+ UT_LIST_GET_LEN(buf_pool->LRU))
|
+ UT_LIST_GET_LEN(buf_pool->LRU))
|
||||||
< buf_pool->curr_size / 3) {
|
< buf_pool->curr_size / 3) {
|
||||||
|
|
||||||
if (!buf_lru_switched_on_innodb_mon) {
|
if (!buf_lru_switched_on_innodb_mon && srv_monitor_event) {
|
||||||
|
|
||||||
/* Over 67 % of the buffer pool is occupied by lock
|
/* Over 67 % of the buffer pool is occupied by lock
|
||||||
heaps or the adaptive hash index. This may be a memory
|
heaps or the adaptive hash index. This may be a memory
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user