Merge branch '10.1' into 10.2
This commit is contained in:
commit
ef7cb0a0b5
@ -31,7 +31,6 @@ SET(HAVE_BACKTRACE_SYMBOLS CACHE INTERNAL "")
|
||||
SET(HAVE_BACKTRACE_SYMBOLS_FD CACHE INTERNAL "")
|
||||
SET(HAVE_BFILL CACHE INTERNAL "")
|
||||
SET(HAVE_BSD_SIGNALS CACHE INTERNAL "")
|
||||
SET(HAVE_BSS_START CACHE INTERNAL "")
|
||||
SET(HAVE_CLOCK_GETTIME CACHE INTERNAL "")
|
||||
SET(HAVE_COMPRESS CACHE INTERNAL "")
|
||||
SET(HAVE_CRYPT CACHE INTERNAL "")
|
||||
|
@ -250,7 +250,6 @@
|
||||
/* Symbols we may use */
|
||||
#cmakedefine HAVE_SYS_ERRLIST 1
|
||||
/* used by stacktrace functions */
|
||||
#cmakedefine HAVE_BSS_START 1
|
||||
#cmakedefine HAVE_BACKTRACE 1
|
||||
#cmakedefine HAVE_BACKTRACE_SYMBOLS 1
|
||||
#cmakedefine HAVE_BACKTRACE_SYMBOLS_FD 1
|
||||
|
@ -839,14 +839,6 @@ CHECK_CXX_SOURCE_COMPILES("
|
||||
HAVE_ABI_CXA_DEMANGLE)
|
||||
ENDIF()
|
||||
|
||||
CHECK_C_SOURCE_COMPILES("
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
extern char *__bss_start;
|
||||
return __bss_start ? 1 : 0;
|
||||
}"
|
||||
HAVE_BSS_START)
|
||||
|
||||
CHECK_C_SOURCE_COMPILES("
|
||||
int main()
|
||||
{
|
||||
|
@ -1,6 +1,6 @@
|
||||
'\" t
|
||||
.\"
|
||||
.TH "\FBMYSQL_UPGRADE\FR" "1" "3 April 2017" "MariaDB 10\&.2" "MariaDB Database System"
|
||||
.TH "\FBMYSQL_UPGRADE\FR" "1" "20 July 2020" "MariaDB 10\&.2" "MariaDB Database System"
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" * set default formatting
|
||||
.\" -----------------------------------------------------------------
|
||||
@ -165,6 +165,8 @@ in the data directory\&. This is used to quickly check whether all tables have b
|
||||
\fB\-\-force\fR
|
||||
option\&.
|
||||
.PP
|
||||
For this reason, \fBmysql_upgrade\fR needs to be run as a user with write access to the data directory\&.
|
||||
.PP
|
||||
If you install MariaDB from RPM packages on Linux, you must install the server and client RPMs\&.
|
||||
\fBmysql_upgrade\fR
|
||||
is included in the server RPM but requires the client RPM because the latter includes
|
||||
|
@ -848,8 +848,7 @@ sub run_test_server ($$$) {
|
||||
My::CoreDump->show($core_file, $exe_mysqld, $opt_parallel);
|
||||
|
||||
# Limit number of core files saved
|
||||
if ($opt_max_save_core > 0 &&
|
||||
$num_saved_cores >= $opt_max_save_core)
|
||||
if ($num_saved_cores >= $opt_max_save_core)
|
||||
{
|
||||
mtr_report(" - deleting it, already saved",
|
||||
"$opt_max_save_core");
|
||||
@ -865,8 +864,7 @@ sub run_test_server ($$$) {
|
||||
},
|
||||
$worker_savedir);
|
||||
|
||||
if ($opt_max_save_datadir > 0 &&
|
||||
$num_saved_datadir >= $opt_max_save_datadir)
|
||||
if ($num_saved_datadir >= $opt_max_save_datadir)
|
||||
{
|
||||
mtr_report(" - skipping '$worker_savedir/'");
|
||||
rmtree($worker_savedir);
|
||||
@ -875,9 +873,9 @@ sub run_test_server ($$$) {
|
||||
{
|
||||
mtr_report(" - saving '$worker_savedir/' to '$savedir/'");
|
||||
rename($worker_savedir, $savedir);
|
||||
$num_saved_datadir++;
|
||||
}
|
||||
resfile_print_test();
|
||||
$num_saved_datadir++;
|
||||
$num_failed_test++ unless ($result->{retries} ||
|
||||
$result->{exp_fail});
|
||||
|
||||
@ -1433,6 +1431,17 @@ sub command_line_setup {
|
||||
report_option('verbose', $opt_verbose);
|
||||
}
|
||||
|
||||
# Negative values aren't meaningful on integer options
|
||||
foreach(grep(/=i$/, keys %options))
|
||||
{
|
||||
if (defined ${$options{$_}} &&
|
||||
do { no warnings "numeric"; int ${$options{$_}} < 0})
|
||||
{
|
||||
my $v= (split /=/)[0];
|
||||
die("$v doesn't accept a negative value:");
|
||||
}
|
||||
}
|
||||
|
||||
# Find the absolute path to the test directory
|
||||
$glob_mysql_test_dir= cwd();
|
||||
if ($glob_mysql_test_dir =~ / /)
|
||||
@ -6540,12 +6549,12 @@ Options for debugging the product
|
||||
test(s)
|
||||
max-save-core Limit the number of core files saved (to avoid filling
|
||||
up disks for heavily crashing server). Defaults to
|
||||
$opt_max_save_core, set to 0 for no limit. Set
|
||||
it's default with MTR_MAX_SAVE_CORE
|
||||
$opt_max_save_core. Set its default with
|
||||
MTR_MAX_SAVE_CORE
|
||||
max-save-datadir Limit the number of datadir saved (to avoid filling
|
||||
up disks for heavily crashing server). Defaults to
|
||||
$opt_max_save_datadir, set to 0 for no limit. Set
|
||||
it's default with MTR_MAX_SAVE_DATADIR
|
||||
$opt_max_save_datadir. Set its default with
|
||||
MTR_MAX_SAVE_DATADIR
|
||||
max-test-fail Limit the number of test failures before aborting
|
||||
the current test run. Defaults to
|
||||
$opt_max_test_fail, set to 0 for no limit. Set
|
||||
|
@ -1186,13 +1186,13 @@ i count(*) std(e1/e2)
|
||||
3 4 0.00000000
|
||||
select std(s1/s2) from bug22555;
|
||||
std(s1/s2)
|
||||
0.21325764
|
||||
0.21328517
|
||||
select std(o1/o2) from bug22555;
|
||||
std(o1/o2)
|
||||
0.2132576358664934
|
||||
select std(e1/e2) from bug22555;
|
||||
std(e1/e2)
|
||||
0.21325764
|
||||
0.21328517
|
||||
set @saved_div_precision_increment=@@div_precision_increment;
|
||||
set div_precision_increment=19;
|
||||
select i, count(*), std(s1/s2) from bug22555 group by i order by i;
|
||||
|
@ -16,6 +16,7 @@ GRANT USAGE ON *.* TO 'foo'
|
||||
show grants for foo@'%';
|
||||
ERROR 42000: Access denied for user 'test'@'%' to database 'mysql'
|
||||
connection default;
|
||||
disconnect conn_1;
|
||||
drop user test, foo;
|
||||
drop role foo;
|
||||
CREATE TABLE t1 (a INT);
|
||||
@ -25,3 +26,16 @@ ERROR HY000: Table 'user' was not locked with LOCK TABLES
|
||||
REVOKE PROCESS ON *.* FROM u;
|
||||
ERROR HY000: Table 'user' was not locked with LOCK TABLES
|
||||
DROP TABLE t1;
|
||||
create database mysqltest1;
|
||||
use mysqltest1;
|
||||
create table t1(id int);
|
||||
insert t1 values(2);
|
||||
create user u1@localhost;
|
||||
grant select on mysqltest1.t1 to u1@localhost;
|
||||
grant update on mysqltest1.* to u1@localhost;
|
||||
connect u1, localhost, u1;
|
||||
update mysqltest1.t1 set id=1 where id=2;
|
||||
connection default;
|
||||
disconnect u1;
|
||||
drop user u1@localhost;
|
||||
drop database mysqltest1;
|
||||
|
@ -619,7 +619,7 @@ select 4 - 3 * 2, (4 - 3) * 2, 4 - (3 * 2);
|
||||
Testing that / is left associative
|
||||
select 15 / 5 / 3, (15 / 5) / 3, 15 / (5 / 3);
|
||||
15 / 5 / 3 (15 / 5) / 3 15 / (5 / 3)
|
||||
1.00000000 1.00000000 9.0000
|
||||
1.00000000 1.00000000 8.9998
|
||||
Testing that / has precedence over |
|
||||
select 105 / 5 | 2, (105 / 5) | 2, 105 / (5 | 2);
|
||||
105 / 5 | 2 (105 / 5) | 2 105 / (5 | 2)
|
||||
|
@ -2756,5 +2756,45 @@ SELECT 1 FROM t1 WHERE a XOR 'a';
|
||||
1
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# Bug #25207522: INCORRECT ORDER-BY BEHAVIOR ON A PARTITIONED TABLE
|
||||
# WITH A COMPOSITE PREFIX INDEX
|
||||
#
|
||||
create table t1(id int unsigned not null,
|
||||
data varchar(2) default null,
|
||||
key data_idx (data(1),id)
|
||||
) default charset=utf8
|
||||
partition by range (id) (
|
||||
partition p10 values less than (10),
|
||||
partition p20 values less than (20)
|
||||
);
|
||||
insert t1 values (6, 'ab'), (4, 'ab'), (5, 'ab'), (16, 'ab'), (14, 'ab'), (15, 'ab'), (5, 'ac'), (15, 'aa') ;
|
||||
select id from t1 where data = 'ab' order by id;
|
||||
id
|
||||
4
|
||||
5
|
||||
6
|
||||
14
|
||||
15
|
||||
16
|
||||
drop table t1;
|
||||
create table t1(id int unsigned not null,
|
||||
data text default null,
|
||||
key data_idx (data(1),id)
|
||||
) default charset=utf8
|
||||
partition by range (id) (
|
||||
partition p10 values less than (10),
|
||||
partition p20 values less than (20)
|
||||
);
|
||||
insert t1 values (6, 'ab'), (4, 'ab'), (5, 'ab'), (16, 'ab'), (14, 'ab'), (15, 'ab'), (5, 'ac'), (15, 'aa') ;
|
||||
select id from t1 where data = 'ab' order by id;
|
||||
id
|
||||
4
|
||||
5
|
||||
6
|
||||
14
|
||||
15
|
||||
16
|
||||
drop table t1;
|
||||
#
|
||||
# End of 10.1 tests
|
||||
#
|
||||
|
@ -9,5 +9,37 @@ ANALYZE TABLE t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
SET use_stat_tables = DEFAULT;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-21472: ALTER TABLE ... ANALYZE PARTITION ... with EITS reads and locks all rows
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
id int(11) auto_increment primary key,
|
||||
c1 int(11) DEFAULT NULL
|
||||
) PARTITION BY RANGE (id) (
|
||||
PARTITION p0 VALUES LESS THAN (4),
|
||||
PARTITION p1 VALUES LESS THAN MAXVALUE
|
||||
);
|
||||
insert into t1(c1) values (1),(1),(1),(1), (1),(1),(1),(1);
|
||||
insert into t1(c1) select c1 from t1;
|
||||
insert into t1(c1) select c1 from t1;
|
||||
select count(*) from t1;
|
||||
count(*)
|
||||
32
|
||||
select count(*) from t1 where id <4;
|
||||
count(*)
|
||||
3
|
||||
flush status;
|
||||
set session use_stat_tables='preferably';
|
||||
# Must NOT show "Engine-independent statistics collected":
|
||||
alter table t1 analyze partition p0;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
# Should not have Handler_read_rnd_next=34
|
||||
show session status like 'Handler_read_rnd%';
|
||||
Variable_name Value
|
||||
Handler_read_rnd 0
|
||||
Handler_read_rnd_deleted 0
|
||||
Handler_read_rnd_next 0
|
||||
drop table t1;
|
||||
SET use_stat_tables = DEFAULT;
|
||||
|
@ -16,7 +16,6 @@ SET use_stat_tables = PREFERABLY;
|
||||
CREATE TABLE t1 ( a INT ) ENGINE=MyISAM PARTITION BY HASH(a) PARTITIONS 2;
|
||||
ALTER TABLE t1 ANALYZE PARTITION p1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
include/show_binlog_events.inc
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
|
@ -2585,6 +2585,30 @@ e 2
|
||||
o 6
|
||||
DROP TABLE t1, t2;
|
||||
#
|
||||
# MDEV-19232: Floating point precision / value comparison problem
|
||||
#
|
||||
CREATE TABLE t1 (region varchar(60), area decimal(10,0), population decimal(11,0));
|
||||
INSERT INTO t1 VALUES ('Central America and the Caribbean',91,11797);
|
||||
INSERT INTO t1 VALUES ('Central America and the Caribbean',442,66422);
|
||||
SET @save_optimizer_switch=@@optimizer_switch;
|
||||
SET optimizer_switch='subquery_cache=on';
|
||||
SELECT
|
||||
population, area, population/area,
|
||||
cast(population/area as DECIMAL(20,9)) FROM t1 LIMIT 1;
|
||||
population area population/area cast(population/area as DECIMAL(20,9))
|
||||
11797 91 129.6374 129.637400000
|
||||
SELECT * FROM t1 A
|
||||
WHERE population/area = (SELECT MAX(population/area) from t1 B where A.region = B.region);
|
||||
region area population
|
||||
Central America and the Caribbean 442 66422
|
||||
SET optimizer_switch='subquery_cache=off';
|
||||
SELECT * FROM t1 A
|
||||
WHERE population/area = (SELECT MAX(population/area) from t1 B where A.region = B.region);
|
||||
region area population
|
||||
Central America and the Caribbean 442 66422
|
||||
SET @@optimizer_switch= @save_optimizer_switch;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-22852: SIGSEGV in sortlength (optimized builds)
|
||||
#
|
||||
SET @save_optimizer_switch=@@optimizer_switch;
|
||||
|
@ -972,4 +972,131 @@ id
|
||||
DROP PROCEDURE p1;
|
||||
DROP TABLE t1;
|
||||
# End of 10.0 tests
|
||||
#
|
||||
# MDEV-23221: A subquery causes crash
|
||||
#
|
||||
create table t1 (
|
||||
location_code varchar(10),
|
||||
country_id varchar(10)
|
||||
);
|
||||
insert into t1 values ('HKG', 'HK');
|
||||
insert into t1 values ('NYC', 'US');
|
||||
insert into t1 values ('LAX', 'US');
|
||||
create table t2 (
|
||||
container_id varchar(10),
|
||||
cntr_activity_type varchar(10),
|
||||
cntr_dest varchar(10)
|
||||
);
|
||||
insert into t2 values ('AAAA1111', 'VSL', 'NYC');
|
||||
insert into t2 values ('AAAA1111', 'CUV', 'NYC');
|
||||
insert into t2 values ('BBBB2222', 'VSL', 'LAX');
|
||||
insert into t2 values ('BBBB2222', 'XYZ', 'LAX');
|
||||
# Must not crash or return an error:
|
||||
select
|
||||
(select country_id from t1 where location_code = cl1.cntr_dest) as dest_cntry,
|
||||
(select
|
||||
max(container_id)
|
||||
from t2 as cl2
|
||||
where
|
||||
cl2.container_id = cl1.container_id and
|
||||
cl2.cntr_activity_type = 'CUV' and
|
||||
exists (select location_code
|
||||
from t1
|
||||
where
|
||||
location_code = cl2.cntr_dest and
|
||||
country_id = dest_cntry)
|
||||
) as CUV
|
||||
from
|
||||
t2 cl1;
|
||||
dest_cntry CUV
|
||||
US AAAA1111
|
||||
US AAAA1111
|
||||
US NULL
|
||||
US NULL
|
||||
prepare s from "select
|
||||
(select country_id from t1 where location_code = cl1.cntr_dest) as dest_cntry,
|
||||
(select
|
||||
max(container_id)
|
||||
from t2 as cl2
|
||||
where
|
||||
cl2.container_id = cl1.container_id and
|
||||
cl2.cntr_activity_type = 'CUV' and
|
||||
exists (select location_code
|
||||
from t1
|
||||
where
|
||||
location_code = cl2.cntr_dest and
|
||||
country_id = dest_cntry)
|
||||
) as CUV
|
||||
from
|
||||
t2 cl1";
|
||||
execute s;
|
||||
dest_cntry CUV
|
||||
US AAAA1111
|
||||
US AAAA1111
|
||||
US NULL
|
||||
US NULL
|
||||
execute s;
|
||||
dest_cntry CUV
|
||||
US AAAA1111
|
||||
US AAAA1111
|
||||
US NULL
|
||||
US NULL
|
||||
drop table t1,t2;
|
||||
#
|
||||
# MDEV-20557: SQL query with duplicate table aliases consistently crashes server
|
||||
# (Just a testcase)
|
||||
#
|
||||
create table t1 (id int, id2 int);
|
||||
create table t2 (id int, id2 int, a int);
|
||||
create table t3 (id int);
|
||||
create table t4 (id int);
|
||||
select (select 1 from t1 where (exists
|
||||
(select 1 from t2
|
||||
where t2.a = (select t4.id from t4 where t4.id = t3.id) and t2.id2 = t1.id2))) dt
|
||||
from t3;
|
||||
ERROR 42000: This version of MariaDB doesn't yet support 'SUBQUERY in ROW in left expression of IN/ALL/ANY'
|
||||
drop table t1,t2,t3,t4;
|
||||
#
|
||||
# MDEV-21649: Crash when using nested EXISTS
|
||||
# (Just a testcase)
|
||||
#
|
||||
CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(id));
|
||||
CREATE TABLE t2 (id INT NOT NULL AUTO_INCREMENT, ip_id INT, PRIMARY KEY(id));
|
||||
CREATE TABLE t3 (id INT NOT NULL AUTO_INCREMENT, storage_method_id INT, storage_target_id INT, PRIMARY KEY(id));
|
||||
SELECT
|
||||
W0.`id`
|
||||
FROM
|
||||
`t1` W0
|
||||
WHERE (
|
||||
EXISTS(
|
||||
SELECT
|
||||
V0.`id`
|
||||
FROM
|
||||
`t2` V0
|
||||
WHERE (
|
||||
EXISTS(
|
||||
SELECT
|
||||
U0.`id`
|
||||
FROM
|
||||
`t2` U0
|
||||
INNER JOIN `t3` U4 ON (U0.`id` = U4.`storage_target_id`)
|
||||
WHERE (
|
||||
U0.`ip_id` = V0.`ip_id`
|
||||
AND U4.`storage_method_id` = (
|
||||
SELECT
|
||||
U5.`storage_method_id`
|
||||
FROM
|
||||
`t3` U5
|
||||
WHERE
|
||||
U5.`storage_target_id` = V0.`id`
|
||||
LIMIT
|
||||
1
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
);
|
||||
id
|
||||
drop table t1,t2,t3;
|
||||
set optimizer_switch=default;
|
||||
|
@ -676,6 +676,27 @@ Warnings:
|
||||
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 2010e0
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-23282 FLOAT(53,0) badly handles out-of-range values
|
||||
#
|
||||
CREATE OR REPLACE TABLE t1 (c1 FLOAT NOT NULL, c2 FLOAT NOT NULL);
|
||||
INSERT IGNORE INTO t1 VALUES (1e+40, -1e+40);
|
||||
Warnings:
|
||||
Warning 1264 Out of range value for column 'c1' at row 1
|
||||
Warning 1264 Out of range value for column 'c2' at row 1
|
||||
SELECT c1, c2 FROM t1;
|
||||
c1 c2
|
||||
3.40282e38 -3.40282e38
|
||||
DROP TABLE t1;
|
||||
CREATE OR REPLACE TABLE t1 (c1 FLOAT(53,0) NOT NULL, c2 FLOAT(53,0) NOT NULL);
|
||||
INSERT IGNORE INTO t1 VALUES (1e+40, -1e+40);
|
||||
Warnings:
|
||||
Warning 1264 Out of range value for column 'c1' at row 1
|
||||
Warning 1264 Out of range value for column 'c2' at row 1
|
||||
SELECT c1, c2 FROM t1;
|
||||
c1 c2
|
||||
340282346638528860000000000000000000000 -340282346638528860000000000000000000000
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# End of 10.1 tests
|
||||
#
|
||||
#
|
||||
|
@ -1532,11 +1532,8 @@ select (1.20396873 * 0.89550000 * 0.68000000 * 1.08721696 * 0.99500000 *
|
||||
1.01500000 * 1.01500000 * 0.99500000)
|
||||
0.81298807395367312459230693948000000000
|
||||
create table t1 as select 5.05 / 0.014;
|
||||
Warnings:
|
||||
Note 1265 Data truncated for column '5.05 / 0.014' at row 1
|
||||
show warnings;
|
||||
Level Code Message
|
||||
Note 1265 Data truncated for column '5.05 / 0.014' at row 1
|
||||
show create table t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
@ -1651,8 +1648,6 @@ my_col
|
||||
0.12345678912345678912345678912345678912
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 SELECT 1 / .123456789123456789123456789123456789123456789123456789123456789123456789123456789 AS my_col;
|
||||
Warnings:
|
||||
Note 1265 Data truncated for column 'my_col' at row 1
|
||||
DESCRIBE t1;
|
||||
Field Type Null Key Default Extra
|
||||
my_col decimal(65,4) YES NULL
|
||||
|
@ -91,8 +91,6 @@ DROP INDEX test ON t1;
|
||||
insert into t1 values (10, 1,1,1,1,1,1,1,1,1,1,1,1,1,NULL,0,0,0,1,1,1,1,'one','one');
|
||||
insert into t1 values (NULL,2,2,2,2,2,2,2,2,2,2,2,2,2,NULL,NULL,NULL,NULL,NULL,NULL,2,2,'two','two,one');
|
||||
insert ignore into t1 values (0,1/3,3,3,3,3,3,3,3,3,3,3,3,3,NULL,'19970303','10:10:10','19970303101010','','','','3',3,3);
|
||||
Warnings:
|
||||
Warning 1265 Data truncated for column 'string' at row 1
|
||||
insert ignore into t1 values (0,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,NULL,19970807,080706,19970403090807,-1,-1,-1,'-1',-1,-1);
|
||||
Warnings:
|
||||
Warning 1264 Out of range value for column 'utiny' at row 1
|
||||
@ -130,7 +128,7 @@ select auto,string,tiny,short,medium,long_int,longlong,real_float,real_double,ut
|
||||
auto string tiny short medium long_int longlong real_float real_double utiny ushort umedium ulong ulonglong mod(floor(time_stamp/1000000),1000000)-mod(curdate(),1000000) date_field time_field date_time blob_col tinyblob_col mediumblob_col longblob_col
|
||||
10 1 1 1 1 1 1 1.0 1.0000 1 00001 1 1 1 0 0000-00-00 00:00:00 0000-00-00 00:00:00 1 1 1 1
|
||||
11 2 2 2 2 2 2 2.0 2.0000 2 00002 2 2 2 0 NULL NULL NULL NULL NULL 2 2
|
||||
12 0.33333333 3 3 3 3 3 3.0 3.0000 3 00003 3 3 3 0 1997-03-03 10:10:10 1997-03-03 10:10:10 3
|
||||
12 0.3333 3 3 3 3 3 3.0 3.0000 3 00003 3 3 3 0 1997-03-03 10:10:10 1997-03-03 10:10:10 3
|
||||
13 -1 -1 -1 -1 -1 -1 -1.0 -1.0000 0 00000 0 0 0 0 1997-08-07 08:07:06 1997-04-03 09:08:07 -1 -1 -1 -1
|
||||
14 -429496729 -128 -32768 -8388608 -2147483648 -4294967295 -4294967296.0 -4294967295.0000 0 00000 0 0 0 0 0000-00-00 00:00:00 0000-00-00 00:00:00 -4294967295 -4294967295 -4294967295 -4294967295
|
||||
15 4294967295 127 32767 8388607 2147483647 4294967295 4294967296.0 4294967295.0000 255 65535 16777215 4294967295 4294967295 0 0000-00-00 00:00:00 0000-00-00 00:00:00 4294967295 4294967295 4294967295 4294967295
|
||||
@ -182,7 +180,7 @@ Warning 1265 Data truncated for column 'new_field' at row 7
|
||||
select * from t2;
|
||||
auto string mediumblob_col new_field
|
||||
1 2 2 ne
|
||||
2 0.33333333 ne
|
||||
2 0.3333 ne
|
||||
3 -1 -1 ne
|
||||
4 -429496729 -4294967295 ne
|
||||
5 4294967295 4294967295 ne
|
||||
|
@ -1896,9 +1896,13 @@ Warnings:
|
||||
Warning 1264 Out of range value for column 'c1' at row 3
|
||||
INSERT IGNORE INTO t5 VALUES('1e+52','-1e+52','1e+52',5),('1e-52','-1e-52','1e-52',6);
|
||||
Warnings:
|
||||
Warning 1264 Out of range value for column 'c1' at row 1
|
||||
Warning 1264 Out of range value for column 'c2' at row 1
|
||||
Warning 1264 Out of range value for column 'c3' at row 1
|
||||
INSERT IGNORE INTO t5 VALUES('1e+53','-1e+53','1e+53',7),('1e-53','-1e-53','1e-53',8);
|
||||
Warnings:
|
||||
Warning 1264 Out of range value for column 'c1' at row 1
|
||||
Warning 1264 Out of range value for column 'c2' at row 1
|
||||
Warning 1264 Out of range value for column 'c3' at row 1
|
||||
SELECT * FROM t5;
|
||||
c1 c2 c3 c4
|
||||
|
@ -30,6 +30,7 @@ SELECT * FROM t;
|
||||
a
|
||||
1
|
||||
UPDATE t SET a=3 WHERE a=1;
|
||||
SET GLOBAL innodb_status_output= @@GLOBAL.innodb_status_output;
|
||||
# Starting with MariaDB 10.2, innodb_read_only implies READ UNCOMMITTED.
|
||||
# In earlier versions, this would return the last committed version
|
||||
# (empty table)!
|
||||
|
@ -52,6 +52,7 @@ SELECT * FROM t;
|
||||
UPDATE t SET a=3 WHERE a=1;
|
||||
--let $restart_parameters= --innodb-read-only
|
||||
--source include/restart_mysqld.inc
|
||||
SET GLOBAL innodb_status_output= @@GLOBAL.innodb_status_output;
|
||||
--echo # Starting with MariaDB 10.2, innodb_read_only implies READ UNCOMMITTED.
|
||||
--echo # In earlier versions, this would return the last committed version
|
||||
--echo # (empty table)!
|
||||
|
15
mysql-test/suite/maria/encrypt-no-key.result
Normal file
15
mysql-test/suite/maria/encrypt-no-key.result
Normal file
@ -0,0 +1,15 @@
|
||||
call mtr.add_suppression('Unknown key id 1. Can''t continue');
|
||||
set global aria_encrypt_tables= 1;
|
||||
create table t1 (pk int primary key, a int, key(a)) engine=aria transactional=1;
|
||||
alter table t1 disable keys;
|
||||
insert into t1 values (1,1);
|
||||
alter table t1 enable keys;
|
||||
ERROR HY000: Unknown key id 1. Can't continue!
|
||||
repair table t1 use_frm;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 repair warning Number of rows changed from 0 to 1
|
||||
test.t1 repair Error Unknown key id 1. Can't continue!
|
||||
test.t1 repair Error Unknown key id 1. Can't continue!
|
||||
test.t1 repair status OK
|
||||
drop table t1;
|
||||
set global aria_encrypt_tables= default;
|
14
mysql-test/suite/maria/encrypt-no-key.test
Normal file
14
mysql-test/suite/maria/encrypt-no-key.test
Normal file
@ -0,0 +1,14 @@
|
||||
#
|
||||
# MDEV-18496 Crash when Aria encryption is enabled but plugin not available
|
||||
#
|
||||
call mtr.add_suppression('Unknown key id 1. Can''t continue');
|
||||
|
||||
set global aria_encrypt_tables= 1;
|
||||
create table t1 (pk int primary key, a int, key(a)) engine=aria transactional=1;
|
||||
alter table t1 disable keys;
|
||||
insert into t1 values (1,1);
|
||||
error 192;
|
||||
alter table t1 enable keys;
|
||||
repair table t1 use_frm;
|
||||
drop table t1;
|
||||
set global aria_encrypt_tables= default;
|
5
mysql-test/suite/roles/drop_current_role.result
Normal file
5
mysql-test/suite/roles/drop_current_role.result
Normal file
@ -0,0 +1,5 @@
|
||||
create role r;
|
||||
set role r;
|
||||
drop role r;
|
||||
revoke all on *.* from current_role;
|
||||
ERROR OP000: Invalid role specification `r`
|
9
mysql-test/suite/roles/drop_current_role.test
Normal file
9
mysql-test/suite/roles/drop_current_role.test
Normal file
@ -0,0 +1,9 @@
|
||||
--source include/not_embedded.inc
|
||||
#
|
||||
# MDEV-22521 Server crashes in traverse_role_graph_up or Assertion `user' fails in traverse_role_graph_impl
|
||||
#
|
||||
create role r;
|
||||
set role r;
|
||||
drop role r;
|
||||
error ER_INVALID_ROLE;
|
||||
revoke all on *.* from current_role;
|
@ -51,9 +51,9 @@ INSERT into t1(name, salary, income_tax) values('Record_2', 501, 501*2.5/1000);
|
||||
INSERT into t1(name, salary, income_tax) values('Record_3', 210, 210*2.5/1000);
|
||||
SELECT * from t1;
|
||||
id name salary income_tax
|
||||
1 Record_1 100011 250.027
|
||||
2 Record_2 501 1.2525
|
||||
3 Record_3 210 0.525
|
||||
1 Record_1 100011 250.03
|
||||
2 Record_2 501 1.25
|
||||
3 Record_3 210 0.53
|
||||
connect test_con2, localhost, root,,;
|
||||
connection test_con2;
|
||||
## Verifying session & global value of variable ##
|
||||
@ -69,11 +69,11 @@ INSERT into t1(name, salary, income_tax) values('Record_5', 501, 501*2.5/1000);
|
||||
INSERT into t1(name, salary, income_tax) values('Record_6', 210, 210*2.5/1000);
|
||||
SELECT * from t1;
|
||||
id name salary income_tax
|
||||
1 Record_1 100011 250.027
|
||||
2 Record_2 501 1.2525
|
||||
3 Record_3 210 0.525
|
||||
4 Record_4 100011 250.027
|
||||
5 Record_5 501 1.2525
|
||||
1 Record_1 100011 250.03
|
||||
2 Record_2 501 1.25
|
||||
3 Record_3 210 0.53
|
||||
4 Record_4 100011 250.028
|
||||
5 Record_5 501 1.253
|
||||
6 Record_6 210 0.525
|
||||
## Dropping table t1 ##
|
||||
drop table t1;
|
||||
|
@ -1215,7 +1215,7 @@
|
||||
VARIABLE_NAME INNODB_VERSION
|
||||
SESSION_VALUE NULL
|
||||
-GLOBAL_VALUE 5.6.49
|
||||
+GLOBAL_VALUE 5.6.48-88.0
|
||||
+GLOBAL_VALUE 5.6.49-89.0
|
||||
GLOBAL_VALUE_ORIGIN COMPILE-TIME
|
||||
DEFAULT_VALUE NULL
|
||||
VARIABLE_SCOPE GLOBAL
|
||||
|
@ -685,7 +685,7 @@
|
||||
VARIABLE_NAME INNODB_VERSION
|
||||
SESSION_VALUE NULL
|
||||
-GLOBAL_VALUE 5.6.49
|
||||
+GLOBAL_VALUE 5.6.48-88.0
|
||||
+GLOBAL_VALUE 5.6.49-89.0
|
||||
GLOBAL_VALUE_ORIGIN COMPILE-TIME
|
||||
DEFAULT_VALUE NULL
|
||||
VARIABLE_SCOPE GLOBAL
|
||||
|
@ -29,7 +29,7 @@ set time_zone='+1:00';
|
||||
flush tables;
|
||||
select * from t1;
|
||||
a b v
|
||||
1 2 0.3333333330000000000
|
||||
1 2 0.3333000000000000000
|
||||
select * from t8;
|
||||
a b v
|
||||
1234567890 2 2009-02-14 00:31:30
|
||||
|
@ -20,6 +20,7 @@ show grants for foo; # role
|
||||
--error ER_DBACCESS_DENIED_ERROR
|
||||
show grants for foo@'%'; # user
|
||||
--connection default
|
||||
--disconnect conn_1
|
||||
drop user test, foo;
|
||||
drop role foo;
|
||||
|
||||
@ -33,3 +34,24 @@ REVOKE EXECUTE ON PROCEDURE sp FROM u;
|
||||
--error ER_TABLE_NOT_LOCKED
|
||||
REVOKE PROCESS ON *.* FROM u;
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# MDEV-23010 UPDATE privilege at Database and Table level fail to update with SELECT command denied to user
|
||||
#
|
||||
create database mysqltest1;
|
||||
use mysqltest1;
|
||||
create table t1(id int);
|
||||
insert t1 values(2);
|
||||
create user u1@localhost;
|
||||
grant select on mysqltest1.t1 to u1@localhost;
|
||||
grant update on mysqltest1.* to u1@localhost;
|
||||
connect u1, localhost, u1;
|
||||
update mysqltest1.t1 set id=1 where id=2;
|
||||
connection default;
|
||||
disconnect u1;
|
||||
drop user u1@localhost;
|
||||
drop database mysqltest1;
|
||||
|
||||
#
|
||||
# End of 10.1 tests
|
||||
#
|
||||
|
@ -2970,6 +2970,34 @@ CREATE TABLE t1(a BINARY(80)) PARTITION BY KEY(a) PARTITIONS 3;
|
||||
SELECT 1 FROM t1 WHERE a XOR 'a';
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # Bug #25207522: INCORRECT ORDER-BY BEHAVIOR ON A PARTITIONED TABLE
|
||||
--echo # WITH A COMPOSITE PREFIX INDEX
|
||||
--echo #
|
||||
create table t1(id int unsigned not null,
|
||||
data varchar(2) default null,
|
||||
key data_idx (data(1),id)
|
||||
) default charset=utf8
|
||||
partition by range (id) (
|
||||
partition p10 values less than (10),
|
||||
partition p20 values less than (20)
|
||||
);
|
||||
insert t1 values (6, 'ab'), (4, 'ab'), (5, 'ab'), (16, 'ab'), (14, 'ab'), (15, 'ab'), (5, 'ac'), (15, 'aa') ;
|
||||
select id from t1 where data = 'ab' order by id;
|
||||
drop table t1;
|
||||
|
||||
create table t1(id int unsigned not null,
|
||||
data text default null,
|
||||
key data_idx (data(1),id)
|
||||
) default charset=utf8
|
||||
partition by range (id) (
|
||||
partition p10 values less than (10),
|
||||
partition p20 values less than (20)
|
||||
);
|
||||
insert t1 values (6, 'ab'), (4, 'ab'), (5, 'ab'), (16, 'ab'), (14, 'ab'), (15, 'ab'), (5, 'ac'), (15, 'aa') ;
|
||||
select id from t1 where data = 'ab' order by id;
|
||||
drop table t1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.1 tests
|
||||
--echo #
|
||||
|
@ -11,7 +11,33 @@ CREATE TABLE t1 (pk int PRIMARY KEY, a bit(1), INDEX idx(a)
|
||||
INSERT INTO t1 VALUES (1,1),(2,0),(3,0),(4,1);
|
||||
|
||||
ANALYZE TABLE t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-21472: ALTER TABLE ... ANALYZE PARTITION ... with EITS reads and locks all rows
|
||||
--echo #
|
||||
CREATE TABLE t1 (
|
||||
id int(11) auto_increment primary key,
|
||||
c1 int(11) DEFAULT NULL
|
||||
) PARTITION BY RANGE (id) (
|
||||
PARTITION p0 VALUES LESS THAN (4),
|
||||
PARTITION p1 VALUES LESS THAN MAXVALUE
|
||||
);
|
||||
|
||||
insert into t1(c1) values (1),(1),(1),(1), (1),(1),(1),(1);
|
||||
insert into t1(c1) select c1 from t1;
|
||||
insert into t1(c1) select c1 from t1;
|
||||
|
||||
select count(*) from t1;
|
||||
select count(*) from t1 where id <4;
|
||||
flush status;
|
||||
set session use_stat_tables='preferably';
|
||||
|
||||
--echo # Must NOT show "Engine-independent statistics collected":
|
||||
alter table t1 analyze partition p0;
|
||||
|
||||
--echo # Should not have Handler_read_rnd_next=34
|
||||
show session status like 'Handler_read_rnd%';
|
||||
drop table t1;
|
||||
|
||||
SET use_stat_tables = DEFAULT;
|
||||
|
||||
DROP TABLE t1;
|
||||
|
@ -2115,6 +2115,32 @@ EXPLAIN EXTENDED SELECT * FROM t1 where ( t1.l1 < ANY (SELECT MAX(t2.v1) FROM t
|
||||
SELECT * FROM t1 where ( t1.l1 < ANY (SELECT MAX(t2.v1) FROM t2));
|
||||
DROP TABLE t1, t2;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-19232: Floating point precision / value comparison problem
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (region varchar(60), area decimal(10,0), population decimal(11,0));
|
||||
INSERT INTO t1 VALUES ('Central America and the Caribbean',91,11797);
|
||||
INSERT INTO t1 VALUES ('Central America and the Caribbean',442,66422);
|
||||
|
||||
SET @save_optimizer_switch=@@optimizer_switch;
|
||||
SET optimizer_switch='subquery_cache=on';
|
||||
|
||||
SELECT
|
||||
population, area, population/area,
|
||||
cast(population/area as DECIMAL(20,9)) FROM t1 LIMIT 1;
|
||||
|
||||
SELECT * FROM t1 A
|
||||
WHERE population/area = (SELECT MAX(population/area) from t1 B where A.region = B.region);
|
||||
|
||||
SET optimizer_switch='subquery_cache=off';
|
||||
SELECT * FROM t1 A
|
||||
WHERE population/area = (SELECT MAX(population/area) from t1 B where A.region = B.region);
|
||||
|
||||
SET @@optimizer_switch= @save_optimizer_switch;
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-22852: SIGSEGV in sortlength (optimized builds)
|
||||
--echo #
|
||||
|
@ -829,5 +829,117 @@ DROP TABLE t1;
|
||||
|
||||
--echo # End of 10.0 tests
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-23221: A subquery causes crash
|
||||
--echo #
|
||||
create table t1 (
|
||||
location_code varchar(10),
|
||||
country_id varchar(10)
|
||||
);
|
||||
insert into t1 values ('HKG', 'HK');
|
||||
insert into t1 values ('NYC', 'US');
|
||||
insert into t1 values ('LAX', 'US');
|
||||
|
||||
create table t2 (
|
||||
container_id varchar(10),
|
||||
cntr_activity_type varchar(10),
|
||||
cntr_dest varchar(10)
|
||||
);
|
||||
insert into t2 values ('AAAA1111', 'VSL', 'NYC');
|
||||
insert into t2 values ('AAAA1111', 'CUV', 'NYC');
|
||||
insert into t2 values ('BBBB2222', 'VSL', 'LAX');
|
||||
insert into t2 values ('BBBB2222', 'XYZ', 'LAX');
|
||||
|
||||
let $query=
|
||||
select
|
||||
(select country_id from t1 where location_code = cl1.cntr_dest) as dest_cntry,
|
||||
(select
|
||||
max(container_id)
|
||||
from t2 as cl2
|
||||
where
|
||||
cl2.container_id = cl1.container_id and
|
||||
cl2.cntr_activity_type = 'CUV' and
|
||||
exists (select location_code
|
||||
from t1
|
||||
where
|
||||
location_code = cl2.cntr_dest and
|
||||
country_id = dest_cntry)
|
||||
) as CUV
|
||||
from
|
||||
t2 cl1;
|
||||
|
||||
--echo # Must not crash or return an error:
|
||||
eval $query;
|
||||
|
||||
eval prepare s from "$query";
|
||||
execute s;
|
||||
execute s;
|
||||
|
||||
drop table t1,t2;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-20557: SQL query with duplicate table aliases consistently crashes server
|
||||
--echo # (Just a testcase)
|
||||
--echo #
|
||||
|
||||
create table t1 (id int, id2 int);
|
||||
create table t2 (id int, id2 int, a int);
|
||||
create table t3 (id int);
|
||||
create table t4 (id int);
|
||||
|
||||
--error ER_NOT_SUPPORTED_YET
|
||||
select (select 1 from t1 where (exists
|
||||
(select 1 from t2
|
||||
where t2.a = (select t4.id from t4 where t4.id = t3.id) and t2.id2 = t1.id2))) dt
|
||||
from t3;
|
||||
|
||||
drop table t1,t2,t3,t4;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-21649: Crash when using nested EXISTS
|
||||
--echo # (Just a testcase)
|
||||
--echo #
|
||||
CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(id));
|
||||
CREATE TABLE t2 (id INT NOT NULL AUTO_INCREMENT, ip_id INT, PRIMARY KEY(id));
|
||||
CREATE TABLE t3 (id INT NOT NULL AUTO_INCREMENT, storage_method_id INT, storage_target_id INT, PRIMARY KEY(id));
|
||||
|
||||
SELECT
|
||||
W0.`id`
|
||||
FROM
|
||||
`t1` W0
|
||||
WHERE (
|
||||
EXISTS(
|
||||
SELECT
|
||||
V0.`id`
|
||||
FROM
|
||||
`t2` V0
|
||||
WHERE (
|
||||
EXISTS(
|
||||
SELECT
|
||||
U0.`id`
|
||||
FROM
|
||||
`t2` U0
|
||||
INNER JOIN `t3` U4 ON (U0.`id` = U4.`storage_target_id`)
|
||||
WHERE (
|
||||
U0.`ip_id` = V0.`ip_id`
|
||||
AND U4.`storage_method_id` = (
|
||||
SELECT
|
||||
U5.`storage_method_id`
|
||||
FROM
|
||||
`t3` U5
|
||||
WHERE
|
||||
U5.`storage_target_id` = V0.`id`
|
||||
LIMIT
|
||||
1
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
drop table t1,t2,t3;
|
||||
|
||||
#restore defaults
|
||||
set optimizer_switch=default;
|
||||
|
@ -485,6 +485,20 @@ EXPLAIN EXTENDED SELECT * FROM t1 WHERE a=2010e0 AND a>=2010e0;
|
||||
DROP TABLE t1;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-23282 FLOAT(53,0) badly handles out-of-range values
|
||||
--echo #
|
||||
|
||||
CREATE OR REPLACE TABLE t1 (c1 FLOAT NOT NULL, c2 FLOAT NOT NULL);
|
||||
INSERT IGNORE INTO t1 VALUES (1e+40, -1e+40);
|
||||
SELECT c1, c2 FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
CREATE OR REPLACE TABLE t1 (c1 FLOAT(53,0) NOT NULL, c2 FLOAT(53,0) NOT NULL);
|
||||
INSERT IGNORE INTO t1 VALUES (1e+40, -1e+40);
|
||||
SELECT c1, c2 FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.1 tests
|
||||
--echo #
|
||||
|
@ -34,23 +34,21 @@
|
||||
#include <execinfo.h>
|
||||
#endif
|
||||
|
||||
#ifdef __linux__
|
||||
#define PTR_SANE(p) ((p) && (char*)(p) >= heap_start && (char*)(p) <= heap_end)
|
||||
|
||||
static char *heap_start;
|
||||
|
||||
#if(defined HAVE_BSS_START) && !(defined __linux__)
|
||||
extern char *__bss_start;
|
||||
#endif
|
||||
#else
|
||||
#define PTR_SANE(p) (p)
|
||||
#endif /* __linux */
|
||||
|
||||
#ifdef __linux__
|
||||
|
||||
void my_init_stacktrace()
|
||||
{
|
||||
#if(defined HAVE_BSS_START) && !(defined __linux__)
|
||||
heap_start = (char*) &__bss_start;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef __linux__
|
||||
|
||||
static void print_buffer(char *buffer, size_t count)
|
||||
{
|
||||
const char s[]= " ";
|
||||
@ -149,15 +147,15 @@ static int safe_print_str(const char *addr, int max_len)
|
||||
|
||||
int my_safe_print_str(const char* val, int max_len)
|
||||
{
|
||||
#ifdef __linux__
|
||||
char *heap_end;
|
||||
|
||||
#ifdef __linux__
|
||||
// Try and make use of /proc filesystem to safely print memory contents.
|
||||
if (!safe_print_str(val, max_len))
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
heap_end= (char*) sbrk(0);
|
||||
#endif
|
||||
|
||||
if (!PTR_SANE(val))
|
||||
{
|
||||
|
@ -68,6 +68,7 @@ Usage: $0 [OPTIONS]
|
||||
--cross-bootstrap For internal use. Used when building the MariaDB system
|
||||
tables on a different host than the target.
|
||||
--datadir=path The path to the MariaDB data directory.
|
||||
--no-defaults Don't read default options from any option file.
|
||||
--defaults-extra-file=name
|
||||
Read this file after the global files are read.
|
||||
--defaults-file=name Only read default options from the given file name.
|
||||
@ -80,8 +81,6 @@ Usage: $0 [OPTIONS]
|
||||
--help Display this help and exit.
|
||||
--ldata=path The path to the MariaDB data directory. Same as
|
||||
--datadir.
|
||||
--no-defaults Don't read default options from any option file.
|
||||
--defaults-file=path Read only this configuration file.
|
||||
--rpm For internal use. This option is used by RPM files
|
||||
during the MariaDB installation process.
|
||||
--skip-auth-anonymous-user
|
||||
|
@ -290,7 +290,7 @@ uint my_datetime_binary_length(uint dec)
|
||||
|
||||
/*
|
||||
On disk we store as unsigned number with DATETIMEF_INT_OFS offset,
|
||||
for HA_KETYPE_BINARY compatibilty purposes.
|
||||
for HA_KETYPE_BINARY compatibility purposes.
|
||||
*/
|
||||
#define DATETIMEF_INT_OFS 0x8000000000LL
|
||||
|
||||
|
@ -33,7 +33,7 @@
|
||||
/*
|
||||
Action to perform at a synchronization point.
|
||||
NOTE: This structure is moved around in memory by realloc(), qsort(),
|
||||
and memmove(). Do not add objects with non-trivial constuctors
|
||||
and memmove(). Do not add objects with non-trivial constructors
|
||||
or destructors, which might prevent moving of this structure
|
||||
with these functions.
|
||||
*/
|
||||
@ -542,7 +542,7 @@ static void debug_sync_reset(THD *thd)
|
||||
@description
|
||||
Removing an action mainly means to decrement the ds_active counter.
|
||||
But if the action is between other active action in the array, then
|
||||
the array needs to be shrinked. The active actions above the one to
|
||||
the array needs to be shrunk. The active actions above the one to
|
||||
be removed have to be moved down by one slot.
|
||||
*/
|
||||
|
||||
|
@ -236,7 +236,7 @@ static File open_error_msg_file(const char *file_name, const char *language,
|
||||
MYF(0))) < 0)
|
||||
{
|
||||
/*
|
||||
Trying pre-5.4 sematics of the --language parameter.
|
||||
Trying pre-5.4 semantics of the --language parameter.
|
||||
It included the language-specific part, e.g.:
|
||||
--language=/path/to/english/
|
||||
*/
|
||||
|
@ -75,8 +75,8 @@ int initialize_encryption_plugin(st_plugin_int *plugin)
|
||||
(struct st_mariadb_encryption*) plugin->plugin->info;
|
||||
|
||||
/*
|
||||
Copmiler on Spark doesn't like the '?' operator here as it
|
||||
belives the (uint (*)...) implies the C++ call model.
|
||||
Compiler on Spark doesn't like the '?' operator here as it
|
||||
believes the (uint (*)...) implies the C++ call model.
|
||||
*/
|
||||
if (handle->crypt_ctx_size)
|
||||
encryption_handler.encryption_ctx_size_func= handle->crypt_ctx_size;
|
||||
|
@ -159,7 +159,7 @@ Event_creation_ctx::load_from_db(THD *thd,
|
||||
/*************************************************************************/
|
||||
|
||||
/*
|
||||
Initiliazes dbname and name of an Event_queue_element_for_exec
|
||||
Initializes dbname and name of an Event_queue_element_for_exec
|
||||
object
|
||||
|
||||
SYNOPSIS
|
||||
|
@ -669,7 +669,7 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data,
|
||||
DBUG_PRINT("info", ("name: %.*s", (int) parse_data->name.length,
|
||||
parse_data->name.str));
|
||||
|
||||
DBUG_PRINT("info", ("check existance of an event with the same name"));
|
||||
DBUG_PRINT("info", ("check existence of an event with the same name"));
|
||||
if (!find_named_event(parse_data->dbname, parse_data->name, table))
|
||||
{
|
||||
if (thd->lex->create_info.or_replace())
|
||||
|
@ -100,7 +100,7 @@ Event_parse_data::init_name(THD *thd, sp_name *spn)
|
||||
ENDS or AT is in the past, we are trying to create an event that
|
||||
will never be executed. If it has ON COMPLETION NOT PRESERVE
|
||||
(default), then it would normally be dropped already, so on CREATE
|
||||
EVENT we give a warning, and do not create anyting. On ALTER EVENT
|
||||
EVENT we give a warning, and do not create anything. On ALTER EVENT
|
||||
we give a error, and do not change the event.
|
||||
|
||||
If the event has ON COMPLETION PRESERVE, then we see if the event is
|
||||
@ -359,7 +359,7 @@ wrong_value:
|
||||
EVERY 5 MINUTE STARTS "2004-12-12 10:00:00" means that
|
||||
the event will be executed every 5 minutes but this will
|
||||
start at the date shown above. Expressions are possible :
|
||||
DATE_ADD(NOW(), INTERVAL 1 DAY) -- start tommorow at
|
||||
DATE_ADD(NOW(), INTERVAL 1 DAY) -- start tomorrow at
|
||||
same time.
|
||||
|
||||
RETURN VALUE
|
||||
@ -413,7 +413,7 @@ wrong_value:
|
||||
EVERY 5 MINUTE ENDS "2004-12-12 10:00:00" means that
|
||||
the event will be executed every 5 minutes but this will
|
||||
end at the date shown above. Expressions are possible :
|
||||
DATE_ADD(NOW(), INTERVAL 1 DAY) -- end tommorow at
|
||||
DATE_ADD(NOW(), INTERVAL 1 DAY) -- end tomorrow at
|
||||
same time.
|
||||
|
||||
RETURN VALUE
|
||||
|
@ -357,7 +357,7 @@ Event_queue::drop_matching_events(THD *thd, LEX_STRING pattern,
|
||||
We don't call mysql_cond_broadcast(&COND_queue_state);
|
||||
If we remove the top event:
|
||||
1. The queue is empty. The scheduler will wake up at some time and
|
||||
realize that the queue is empty. If create_event() comes inbetween
|
||||
realize that the queue is empty. If create_event() comes in between
|
||||
it will signal the scheduler
|
||||
2. The queue is not empty, but the next event after the previous top,
|
||||
won't be executed any time sooner than the element we removed. Hence,
|
||||
|
@ -128,7 +128,7 @@ bool Events::check_if_system_tables_error()
|
||||
|
||||
/**
|
||||
Reconstructs interval expression from interval type and expression
|
||||
value that is in form of a value of the smalles entity:
|
||||
value that is in form of a value of the smallest entity:
|
||||
For
|
||||
YEAR_MONTH - expression is in months
|
||||
DAY_MINUTE - expression is in minutes
|
||||
|
99
sql/field.cc
99
sql/field.cc
@ -47,7 +47,7 @@
|
||||
#define MAX_EXPONENT 1024
|
||||
|
||||
/*****************************************************************************
|
||||
Instansiate templates and static variables
|
||||
Instantiate templates and static variables
|
||||
*****************************************************************************/
|
||||
|
||||
static const char *zero_timestamp="0000-00-00 00:00:00.000000";
|
||||
@ -91,7 +91,7 @@ const char field_separator=',';
|
||||
/*
|
||||
Rules for merging different types of fields in UNION
|
||||
|
||||
NOTE: to avoid 256*256 table, gap in table types numeration is skiped
|
||||
NOTE: to avoid 256*256 table, gap in table types numeration is skipped
|
||||
following #defines describe that gap and how to canculate number of fields
|
||||
and index of field in this array.
|
||||
*/
|
||||
@ -1526,7 +1526,7 @@ Item *Field_num::get_equal_zerofill_const_item(THD *thd, const Context &ctx,
|
||||
|
||||
|
||||
/**
|
||||
Contruct warning parameters using thd->no_errors
|
||||
Construct warning parameters using thd->no_errors
|
||||
to determine whether to generate or suppress warnings.
|
||||
We can get here in a query like this:
|
||||
SELECT COUNT(@@basedir);
|
||||
@ -1574,7 +1574,7 @@ Value_source::Converter_string_to_number::check_edom_and_truncation(THD *thd,
|
||||
if (filter.want_warning_edom())
|
||||
{
|
||||
/*
|
||||
We can use err.ptr() here as ErrConvString is guranteed to put an
|
||||
We can use err.ptr() here as ErrConvString is guaranteed to put an
|
||||
end \0 here.
|
||||
*/
|
||||
THD *wthd= thd ? thd : current_thd;
|
||||
@ -1606,7 +1606,7 @@ Value_source::Converter_string_to_number::check_edom_and_truncation(THD *thd,
|
||||
- found garbage at the end of the string.
|
||||
|
||||
@param type Data type name (e.g. "decimal", "integer", "double")
|
||||
@param edom Indicates that the string-to-number routine retuned
|
||||
@param edom Indicates that the string-to-number routine returned
|
||||
an error code equivalent to EDOM (value out of domain),
|
||||
i.e. the string fully consisted of garbage and the
|
||||
conversion routine could not get any digits from it.
|
||||
@ -1670,7 +1670,7 @@ int Field_num::check_edom_and_truncation(const char *type, bool edom,
|
||||
|
||||
|
||||
/*
|
||||
Conver a string to an integer then check bounds.
|
||||
Convert a string to an integer then check bounds.
|
||||
|
||||
SYNOPSIS
|
||||
Field_num::get_int
|
||||
@ -2671,7 +2671,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs)
|
||||
We only have to generate warnings if count_cuted_fields is set.
|
||||
This is to avoid extra checks of the number when they are not needed.
|
||||
Even if this flag is not set, it's OK to increment warnings, if
|
||||
it makes the code easer to read.
|
||||
it makes the code easier to read.
|
||||
*/
|
||||
|
||||
if (get_thd()->count_cuted_fields)
|
||||
@ -2754,7 +2754,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs)
|
||||
}
|
||||
|
||||
/*
|
||||
Now write the formated number
|
||||
Now write the formatted number
|
||||
|
||||
First the digits of the int_% parts.
|
||||
Do we have enough room to write these digits ?
|
||||
@ -3331,7 +3331,7 @@ int Field_new_decimal::store(const char *from, uint length,
|
||||
If check_decimal() failed because of EDOM-alike error,
|
||||
(e.g. E_DEC_BAD_NUM), we have to initialize decimal_value to zero.
|
||||
Note: if check_decimal() failed because of truncation,
|
||||
decimal_value is alreay properly initialized.
|
||||
decimal_value is already properly initialized.
|
||||
*/
|
||||
my_decimal_set_zero(&decimal_value);
|
||||
/*
|
||||
@ -4835,11 +4835,12 @@ int truncate_double(double *nr, uint field_length, uint dec,
|
||||
{
|
||||
uint order= field_length - dec;
|
||||
uint step= array_elements(log_10) - 1;
|
||||
max_value= 1.0;
|
||||
double max_value_by_dec= 1.0;
|
||||
for (; order > step; order-= step)
|
||||
max_value*= log_10[step];
|
||||
max_value*= log_10[order];
|
||||
max_value-= 1.0 / log_10[dec];
|
||||
max_value_by_dec*= log_10[step];
|
||||
max_value_by_dec*= log_10[order];
|
||||
max_value_by_dec-= 1.0 / log_10[dec];
|
||||
set_if_smaller(max_value, max_value_by_dec);
|
||||
|
||||
/* Check for infinity so we don't get NaN in calculations */
|
||||
if (!std::isinf(res))
|
||||
@ -5134,7 +5135,7 @@ Field_timestamp::Field_timestamp(uchar *ptr_arg, uint32 len_arg,
|
||||
{
|
||||
/*
|
||||
We mark the flag with TIMESTAMP_FLAG to indicate to the client that
|
||||
this field will be automaticly updated on insert.
|
||||
this field will be automatically updated on insert.
|
||||
*/
|
||||
flags|= TIMESTAMP_FLAG;
|
||||
if (unireg_check != TIMESTAMP_DN_FIELD)
|
||||
@ -7589,7 +7590,7 @@ Field_string::unpack(uchar *to, const uchar *from, const uchar *from_end,
|
||||
with the real type. Since all allowable types have 0xF as most
|
||||
significant bits of the metadata word, lengths <256 will not affect
|
||||
the real type at all, while all other values will result in a
|
||||
non-existant type in the range 17-244.
|
||||
non-existent type in the range 17-244.
|
||||
|
||||
@see Field_string::unpack
|
||||
|
||||
@ -7781,8 +7782,7 @@ void Field_varstring::mark_unused_memory_as_defined()
|
||||
#endif
|
||||
|
||||
|
||||
int Field_varstring::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
|
||||
uint max_len)
|
||||
int Field_varstring::cmp(const uchar *a_ptr, const uchar *b_ptr)
|
||||
{
|
||||
uint a_length, b_length;
|
||||
int diff;
|
||||
@ -7797,8 +7797,8 @@ int Field_varstring::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
|
||||
a_length= uint2korr(a_ptr);
|
||||
b_length= uint2korr(b_ptr);
|
||||
}
|
||||
set_if_smaller(a_length, max_len);
|
||||
set_if_smaller(b_length, max_len);
|
||||
set_if_smaller(a_length, field_length);
|
||||
set_if_smaller(b_length, field_length);
|
||||
diff= field_charset->coll->strnncollsp(field_charset,
|
||||
a_ptr+
|
||||
length_bytes,
|
||||
@ -7810,6 +7810,43 @@ int Field_varstring::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
|
||||
}
|
||||
|
||||
|
||||
static int cmp_str_prefix(const uchar *ua, size_t alen, const uchar *ub,
|
||||
size_t blen, size_t prefix, CHARSET_INFO *cs)
|
||||
{
|
||||
const char *a= (char*)ua, *b= (char*)ub;
|
||||
MY_STRCOPY_STATUS status;
|
||||
prefix/= cs->mbmaxlen;
|
||||
alen= cs->cset->well_formed_char_length(cs, a, a + alen, prefix, &status);
|
||||
blen= cs->cset->well_formed_char_length(cs, b, b + blen, prefix, &status);
|
||||
return cs->coll->strnncollsp(cs, ua, alen, ub, blen);
|
||||
}
|
||||
|
||||
|
||||
|
||||
int Field_varstring::cmp_prefix(const uchar *a_ptr, const uchar *b_ptr,
|
||||
size_t prefix_len)
|
||||
{
|
||||
/* avoid expensive well_formed_char_length if possible */
|
||||
if (prefix_len == table->field[field_index]->field_length)
|
||||
return Field_varstring::cmp(a_ptr, b_ptr);
|
||||
|
||||
size_t a_length, b_length;
|
||||
|
||||
if (length_bytes == 1)
|
||||
{
|
||||
a_length= *a_ptr;
|
||||
b_length= *b_ptr;
|
||||
}
|
||||
else
|
||||
{
|
||||
a_length= uint2korr(a_ptr);
|
||||
b_length= uint2korr(b_ptr);
|
||||
}
|
||||
return cmp_str_prefix(a_ptr+length_bytes, a_length, b_ptr+length_bytes,
|
||||
b_length, prefix_len, field_charset);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
@note
|
||||
varstring and blob keys are ALWAYS stored with a 2 byte length prefix
|
||||
@ -8374,16 +8411,24 @@ int Field_blob::cmp(const uchar *a,uint32 a_length, const uchar *b,
|
||||
}
|
||||
|
||||
|
||||
int Field_blob::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
|
||||
uint max_length)
|
||||
int Field_blob::cmp(const uchar *a_ptr, const uchar *b_ptr)
|
||||
{
|
||||
uchar *blob1,*blob2;
|
||||
memcpy(&blob1, a_ptr+packlength, sizeof(char*));
|
||||
memcpy(&blob2, b_ptr+packlength, sizeof(char*));
|
||||
uint a_len= get_length(a_ptr), b_len= get_length(b_ptr);
|
||||
set_if_smaller(a_len, max_length);
|
||||
set_if_smaller(b_len, max_length);
|
||||
return Field_blob::cmp(blob1,a_len,blob2,b_len);
|
||||
size_t a_len= get_length(a_ptr), b_len= get_length(b_ptr);
|
||||
return cmp(blob1, a_len, blob2, b_len);
|
||||
}
|
||||
|
||||
|
||||
int Field_blob::cmp_prefix(const uchar *a_ptr, const uchar *b_ptr,
|
||||
size_t prefix_len)
|
||||
{
|
||||
uchar *blob1,*blob2;
|
||||
memcpy(&blob1, a_ptr+packlength, sizeof(char*));
|
||||
memcpy(&blob2, b_ptr+packlength, sizeof(char*));
|
||||
size_t a_len= get_length(a_ptr), b_len= get_length(b_ptr);
|
||||
return cmp_str_prefix(blob1, a_len, blob2, b_len, prefix_len, field_charset);
|
||||
}
|
||||
|
||||
|
||||
@ -9709,7 +9754,7 @@ my_decimal *Field_bit::val_decimal(my_decimal *deciaml_value)
|
||||
The a and b pointer must be pointers to the field in a record
|
||||
(not the table->record[0] necessarily)
|
||||
*/
|
||||
int Field_bit::cmp_max(const uchar *a, const uchar *b, uint max_len)
|
||||
int Field_bit::cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len)
|
||||
{
|
||||
my_ptrdiff_t a_diff= a - ptr;
|
||||
my_ptrdiff_t b_diff= b - ptr;
|
||||
@ -10278,7 +10323,7 @@ bool Column_definition::check(THD *thd)
|
||||
break;
|
||||
case MYSQL_TYPE_VARCHAR:
|
||||
/*
|
||||
Long VARCHAR's are automaticly converted to blobs in mysql_prepare_table
|
||||
Long VARCHAR's are automatically converted to blobs in mysql_prepare_table
|
||||
if they don't have a default value
|
||||
*/
|
||||
max_field_charlength= MAX_FIELD_VARCHARLENGTH;
|
||||
|
30
sql/field.h
30
sql/field.h
@ -270,7 +270,7 @@ protected:
|
||||
};
|
||||
|
||||
|
||||
// String-to-number convertion methods for the old code compatibility
|
||||
// String-to-number conversion methods for the old code compatibility
|
||||
longlong longlong_from_string_with_check(CHARSET_INFO *cs, const char *cptr,
|
||||
const char *end) const
|
||||
{
|
||||
@ -351,7 +351,7 @@ public:
|
||||
/*
|
||||
Item context attributes.
|
||||
Comparison functions pass their attributes to propagate_equal_fields().
|
||||
For exmple, for string comparison, the collation of the comparison
|
||||
For example, for string comparison, the collation of the comparison
|
||||
operation is important inside propagate_equal_fields().
|
||||
*/
|
||||
class Context
|
||||
@ -484,7 +484,7 @@ inline bool is_timestamp_type(enum_field_types type)
|
||||
|
||||
|
||||
/**
|
||||
Convert temporal real types as retuned by field->real_type()
|
||||
Convert temporal real types as returned by field->real_type()
|
||||
to field type as returned by field->type().
|
||||
|
||||
@param real_type Real type.
|
||||
@ -1055,9 +1055,13 @@ public:
|
||||
return type();
|
||||
}
|
||||
inline int cmp(const uchar *str) { return cmp(ptr,str); }
|
||||
virtual int cmp_max(const uchar *a, const uchar *b, uint max_len)
|
||||
{ return cmp(a, b); }
|
||||
virtual int cmp(const uchar *,const uchar *)=0;
|
||||
/*
|
||||
The following method is used for comparing prefix keys.
|
||||
Currently it's only used in partitioning.
|
||||
*/
|
||||
virtual int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len)
|
||||
{ return cmp(a, b); }
|
||||
virtual int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0U)
|
||||
{ return memcmp(a,b,pack_length()); }
|
||||
virtual int cmp_offset(uint row_offset)
|
||||
@ -3268,11 +3272,8 @@ public:
|
||||
longlong val_int(void);
|
||||
String *val_str(String*,String *);
|
||||
my_decimal *val_decimal(my_decimal *);
|
||||
int cmp_max(const uchar *, const uchar *, uint max_length);
|
||||
int cmp(const uchar *a,const uchar *b)
|
||||
{
|
||||
return cmp_max(a, b, ~0U);
|
||||
}
|
||||
int cmp(const uchar *a,const uchar *b);
|
||||
int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len);
|
||||
void sort_string(uchar *buff,uint length);
|
||||
uint get_key_image(uchar *buff,uint length, imagetype type);
|
||||
void set_key_image(const uchar *buff,uint length);
|
||||
@ -3389,9 +3390,8 @@ public:
|
||||
longlong val_int(void);
|
||||
String *val_str(String*,String *);
|
||||
my_decimal *val_decimal(my_decimal *);
|
||||
int cmp_max(const uchar *, const uchar *, uint max_length);
|
||||
int cmp(const uchar *a,const uchar *b)
|
||||
{ return cmp_max(a, b, ~0U); }
|
||||
int cmp(const uchar *a,const uchar *b);
|
||||
int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len);
|
||||
int cmp(const uchar *a, uint32 a_length, const uchar *b, uint32 b_length);
|
||||
int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0U);
|
||||
int key_cmp(const uchar *,const uchar*);
|
||||
@ -3725,7 +3725,7 @@ private:
|
||||
This is the reason:
|
||||
- Field_bit::cmp_binary() is only implemented in the base class
|
||||
(Field::cmp_binary()).
|
||||
- Field::cmp_binary() currenly use pack_length() to calculate how
|
||||
- Field::cmp_binary() currently uses pack_length() to calculate how
|
||||
long the data is.
|
||||
- pack_length() includes size of the bits stored in the NULL bytes
|
||||
of the record.
|
||||
@ -3780,7 +3780,7 @@ public:
|
||||
}
|
||||
int cmp_binary_offset(uint row_offset)
|
||||
{ return cmp_offset(row_offset); }
|
||||
int cmp_max(const uchar *a, const uchar *b, uint max_length);
|
||||
int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len);
|
||||
int key_cmp(const uchar *a, const uchar *b)
|
||||
{ return cmp_binary((uchar *) a, (uchar *) b); }
|
||||
int key_cmp(const uchar *str, uint length);
|
||||
|
@ -229,7 +229,7 @@ static void do_skip(Copy_field *copy __attribute__((unused)))
|
||||
|
||||
note: if the record we're copying from is NULL-complemetned (i.e.
|
||||
from_field->table->null_row==1), it will also have all NULLable columns to be
|
||||
set to NULLs, so we dont need to check table->null_row here.
|
||||
set to NULLs, so we don't need to check table->null_row here.
|
||||
*/
|
||||
|
||||
static void do_copy_null(Copy_field *copy)
|
||||
|
@ -885,12 +885,12 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
|
||||
}
|
||||
if (!quick_select)
|
||||
{
|
||||
(void) file->extra(HA_EXTRA_NO_CACHE); /* End cacheing of records */
|
||||
(void) file->extra(HA_EXTRA_NO_CACHE); /* End caching of records */
|
||||
if (!next_pos)
|
||||
file->ha_rnd_end();
|
||||
}
|
||||
|
||||
/* Signal we should use orignal column read and write maps */
|
||||
/* Signal we should use original column read and write maps */
|
||||
sort_form->column_bitmaps_set(save_read_set, save_write_set, save_vcol_set);
|
||||
|
||||
if (thd->is_error())
|
||||
|
@ -1877,7 +1877,7 @@ int Gcalc_scan_iterator::add_eq_node(Gcalc_heap::Info *node, point *sp)
|
||||
if (!en)
|
||||
GCALC_DBUG_RETURN(1);
|
||||
|
||||
/* eq_node iserted after teh equal point. */
|
||||
/* eq_node inserted after the equal point. */
|
||||
en->next= node->get_next();
|
||||
node->next= en;
|
||||
|
||||
|
@ -362,9 +362,9 @@ enum Gcalc_scan_events
|
||||
|
||||
|
||||
/*
|
||||
Gcalc_scan_iterator incapsulates the slisescan algorithm.
|
||||
It takes filled Gcalc_heap as an datasource. Then can be
|
||||
iterated trought the vertexes and intersection points with
|
||||
Gcalc_scan_iterator incapsulates the slicescan algorithm.
|
||||
It takes filled Gcalc_heap as a datasource. Then can be
|
||||
iterated through the vertexes and intersection points with
|
||||
the step() method. After the 'step()' one usually observes
|
||||
the current 'slice' to do the necessary calculations, like
|
||||
looking for intersections, calculating the area, whatever.
|
||||
|
@ -1184,14 +1184,14 @@ int Gcalc_operation_reducer::connect_threads(
|
||||
{
|
||||
rp0->outer_poly= prev_range->thread_start;
|
||||
tb->thread_start= prev_range->thread_start;
|
||||
/* Chack if needed */
|
||||
/* Check if needed */
|
||||
ta->thread_start= prev_range->thread_start;
|
||||
}
|
||||
else
|
||||
{
|
||||
rp0->outer_poly= 0;
|
||||
ta->thread_start= rp0;
|
||||
/* Chack if needed */
|
||||
/* Check if needed */
|
||||
tb->thread_start= rp0;
|
||||
}
|
||||
GCALC_DBUG_RETURN(0);
|
||||
|
@ -546,7 +546,7 @@ bool ha_partition::initialize_partition(MEM_ROOT *mem_root)
|
||||
point.
|
||||
|
||||
If you do not implement this, the default delete_table() is called from
|
||||
handler.cc and it will delete all files with the file extentions returned
|
||||
handler.cc and it will delete all files with the file extensions returned
|
||||
by bas_ext().
|
||||
|
||||
Called from handler.cc by delete_table and ha_create_table(). Only used
|
||||
@ -578,7 +578,7 @@ int ha_partition::delete_table(const char *name)
|
||||
Renames a table from one name to another from alter table call.
|
||||
|
||||
If you do not implement this, the default rename_table() is called from
|
||||
handler.cc and it will rename all files with the file extentions returned
|
||||
handler.cc and it will rename all files with the file extensions returned
|
||||
by bas_ext().
|
||||
|
||||
Called from sql_table.cc by mysql_rename_table().
|
||||
@ -1432,7 +1432,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
|
||||
|
||||
|
||||
/**
|
||||
@brief Check and repair the table if neccesary
|
||||
@brief Check and repair the table if necessary
|
||||
|
||||
@param thd Thread object
|
||||
|
||||
@ -2912,7 +2912,7 @@ error_end:
|
||||
/**
|
||||
Read the .par file to get the partitions engines and names
|
||||
|
||||
@param name Name of table file (without extention)
|
||||
@param name Name of table file (without extension)
|
||||
|
||||
@return Operation status
|
||||
@retval true Failure
|
||||
@ -3140,7 +3140,7 @@ static uchar *get_part_name(PART_NAME_DEF *part, size_t *length,
|
||||
|
||||
@return Operation status
|
||||
@retval true Failure
|
||||
@retval false Sucess
|
||||
@retval false Success
|
||||
*/
|
||||
|
||||
bool ha_partition::insert_partition_name_in_hash(const char *name, uint part_id,
|
||||
@ -3266,7 +3266,7 @@ err:
|
||||
|
||||
@return Operation status
|
||||
@retval true Failure
|
||||
@retval false Sucess
|
||||
@retval false Success
|
||||
*/
|
||||
|
||||
bool ha_partition::set_ha_share_ref(Handler_share **ha_share_arg)
|
||||
@ -4151,7 +4151,7 @@ int ha_partition::write_row(uchar * buf)
|
||||
/*
|
||||
If we have failed to set the auto-increment value for this row,
|
||||
it is highly likely that we will not be able to insert it into
|
||||
the correct partition. We must check and fail if neccessary.
|
||||
the correct partition. We must check and fail if necessary.
|
||||
*/
|
||||
if (error)
|
||||
goto exit;
|
||||
@ -4221,7 +4221,7 @@ exit:
|
||||
have the previous row record in it, while new_data will have the newest
|
||||
data in it.
|
||||
Keep in mind that the server can do updates based on ordering if an
|
||||
ORDER BY clause was used. Consecutive ordering is not guarenteed.
|
||||
ORDER BY clause was used. Consecutive ordering is not guaranteed.
|
||||
|
||||
Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
|
||||
new_data is always record[0]
|
||||
@ -4363,7 +4363,7 @@ exit:
|
||||
(from either a previous rnd_xxx() or index_xxx() call).
|
||||
If you keep a pointer to the last row or can access a primary key it will
|
||||
make doing the deletion quite a bit easier.
|
||||
Keep in mind that the server does no guarentee consecutive deletions.
|
||||
Keep in mind that the server does no guarantee consecutive deletions.
|
||||
ORDER BY clauses can be used.
|
||||
|
||||
Called in sql_acl.cc and sql_udf.cc to manage internal table information.
|
||||
@ -4747,7 +4747,7 @@ int ha_partition::end_bulk_insert()
|
||||
|
||||
When scan is used we will scan one handler partition at a time.
|
||||
When preparing for rnd_pos we will init all handler partitions.
|
||||
No extra cache handling is needed when scannning is not performed.
|
||||
No extra cache handling is needed when scanning is not performed.
|
||||
|
||||
Before initialising we will call rnd_end to ensure that we clean up from
|
||||
any previous incarnation of a table scan.
|
||||
@ -5843,7 +5843,7 @@ int ha_partition::read_range_next()
|
||||
SYNOPSIS
|
||||
ha_partition::partition_scan_set_up()
|
||||
buf Buffer to later return record in (this function
|
||||
needs it to calculcate partitioning function
|
||||
needs it to calculate partitioning function
|
||||
values)
|
||||
|
||||
idx_read_flag TRUE <=> m_start_key has range start endpoint which
|
||||
@ -6878,7 +6878,7 @@ static int end_keyread_cb(handler* h, void *unused)
|
||||
function after completing a query.
|
||||
3) It is called when deleting the QUICK_RANGE_SELECT object if the
|
||||
QUICK_RANGE_SELECT object had its own handler object. It is called
|
||||
immediatley before close of this local handler object.
|
||||
immediately before close of this local handler object.
|
||||
HA_EXTRA_KEYREAD:
|
||||
HA_EXTRA_NO_KEYREAD:
|
||||
These parameters are used to provide an optimisation hint to the handler.
|
||||
@ -6915,7 +6915,7 @@ static int end_keyread_cb(handler* h, void *unused)
|
||||
HA_EXTRA_IGNORE_DUP_KEY:
|
||||
HA_EXTRA_NO_IGNORE_DUP_KEY:
|
||||
Informs the handler to we will not stop the transaction if we get an
|
||||
duplicate key errors during insert/upate.
|
||||
duplicate key errors during insert/update.
|
||||
Always called in pair, triggered by INSERT IGNORE and other similar
|
||||
SQL constructs.
|
||||
Not used by MyISAM.
|
||||
@ -8334,7 +8334,7 @@ bool ha_partition::prepare_inplace_alter_table(TABLE *altered_table,
|
||||
|
||||
/*
|
||||
Changing to similar partitioning, only update metadata.
|
||||
Non allowed changes would be catched in prep_alter_part_table().
|
||||
Non allowed changes would be caught in prep_alter_part_table().
|
||||
*/
|
||||
if (ha_alter_info->alter_info->flags == Alter_info::ALTER_PARTITION)
|
||||
DBUG_RETURN(false);
|
||||
@ -8367,7 +8367,7 @@ bool ha_partition::inplace_alter_table(TABLE *altered_table,
|
||||
|
||||
/*
|
||||
Changing to similar partitioning, only update metadata.
|
||||
Non allowed changes would be catched in prep_alter_part_table().
|
||||
Non allowed changes would be caught in prep_alter_part_table().
|
||||
*/
|
||||
if (ha_alter_info->alter_info->flags == Alter_info::ALTER_PARTITION)
|
||||
DBUG_RETURN(false);
|
||||
@ -8412,7 +8412,7 @@ bool ha_partition::commit_inplace_alter_table(TABLE *altered_table,
|
||||
|
||||
/*
|
||||
Changing to similar partitioning, only update metadata.
|
||||
Non allowed changes would be catched in prep_alter_part_table().
|
||||
Non allowed changes would be caught in prep_alter_part_table().
|
||||
*/
|
||||
if (ha_alter_info->alter_info->flags == Alter_info::ALTER_PARTITION)
|
||||
DBUG_RETURN(false);
|
||||
|
@ -285,7 +285,7 @@ public:
|
||||
-------------------------------------------------------------------------
|
||||
MODULE create/delete handler object
|
||||
-------------------------------------------------------------------------
|
||||
Object create/delete methode. The normal called when a table object
|
||||
Object create/delete method. Normally called when a table object
|
||||
exists. There is also a method to create the handler object with only
|
||||
partition information. This is used from mysql_create_table when the
|
||||
table is to be created and the engine type is deduced to be the
|
||||
@ -583,7 +583,7 @@ public:
|
||||
|
||||
/**
|
||||
@breif
|
||||
Positions an index cursor to the index specified in the hanlde. Fetches the
|
||||
Positions an index cursor to the index specified in the handle. Fetches the
|
||||
row if available. If the key value is null, begin at first key of the
|
||||
index.
|
||||
*/
|
||||
@ -800,7 +800,7 @@ public:
|
||||
|
||||
HA_REC_NOT_IN_SEQ:
|
||||
This flag is set for handlers that cannot guarantee that the rows are
|
||||
returned accroding to incremental positions (0, 1, 2, 3...).
|
||||
returned according to incremental positions (0, 1, 2, 3...).
|
||||
This also means that rnd_next() should return HA_ERR_RECORD_DELETED
|
||||
if it finds a deleted row.
|
||||
(MyISAM (not fixed length row), HEAP, InnoDB)
|
||||
|
@ -724,7 +724,7 @@ int ha_end()
|
||||
|
||||
|
||||
/*
|
||||
This should be eventualy based on the graceful shutdown flag.
|
||||
This should be eventually based on the graceful shutdown flag.
|
||||
So if flag is equal to HA_PANIC_CLOSE, the deallocate
|
||||
the errors.
|
||||
*/
|
||||
@ -1333,8 +1333,8 @@ int ha_commit_trans(THD *thd, bool all)
|
||||
THD_TRANS *trans= all ? &thd->transaction.all : &thd->transaction.stmt;
|
||||
/*
|
||||
"real" is a nick name for a transaction for which a commit will
|
||||
make persistent changes. E.g. a 'stmt' transaction inside a 'all'
|
||||
transation is not 'real': even though it's possible to commit it,
|
||||
make persistent changes. E.g. a 'stmt' transaction inside an 'all'
|
||||
transaction is not 'real': even though it's possible to commit it,
|
||||
the changes are not durable as they might be rolled back if the
|
||||
enclosing 'all' transaction is rolled back.
|
||||
*/
|
||||
@ -2494,7 +2494,7 @@ handler *handler::clone(const char *name, MEM_ROOT *mem_root)
|
||||
|
||||
/*
|
||||
TODO: Implement a more efficient way to have more than one index open for
|
||||
the same table instance. The ha_open call is not cachable for clone.
|
||||
the same table instance. The ha_open call is not cacheable for clone.
|
||||
|
||||
This is not critical as the engines already have the table open
|
||||
and should be able to use the original instance of the table.
|
||||
@ -3308,7 +3308,7 @@ int handler::update_auto_increment()
|
||||
index_init() or rnd_init() and in any column_bitmaps_signal() call after
|
||||
this.
|
||||
|
||||
The handler is allowd to do changes to the bitmap after a index_init or
|
||||
The handler is allowed to do changes to the bitmap after a index_init or
|
||||
rnd_init() call is made as after this, MySQL will not use the bitmap
|
||||
for any program logic checking.
|
||||
*/
|
||||
@ -3371,7 +3371,7 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment,
|
||||
{ // Autoincrement at key-start
|
||||
error= ha_index_last(table->record[1]);
|
||||
/*
|
||||
MySQL implicitely assumes such method does locking (as MySQL decides to
|
||||
MySQL implicitly assumes such method does locking (as MySQL decides to
|
||||
use nr+increment without checking again with the handler, in
|
||||
handler::update_auto_increment()), so reserves to infinite.
|
||||
*/
|
||||
|
@ -212,7 +212,7 @@ enum enum_alter_inplace_result {
|
||||
this flag must implement start_read_removal() and end_read_removal().
|
||||
The handler may return "fake" rows constructed from the key of the row
|
||||
asked for. This is used to optimize UPDATE and DELETE by reducing the
|
||||
numer of roundtrips between handler and storage engine.
|
||||
number of roundtrips between handler and storage engine.
|
||||
|
||||
Example:
|
||||
UPDATE a=1 WHERE pk IN (<keys>)
|
||||
@ -485,7 +485,7 @@ enum enum_binlog_command {
|
||||
|
||||
/* Bits in used_fields */
|
||||
#define HA_CREATE_USED_AUTO (1UL << 0)
|
||||
#define HA_CREATE_USED_RAID (1UL << 1) //RAID is no longer availble
|
||||
#define HA_CREATE_USED_RAID (1UL << 1) //RAID is no longer available
|
||||
#define HA_CREATE_USED_UNION (1UL << 2)
|
||||
#define HA_CREATE_USED_INSERT_METHOD (1UL << 3)
|
||||
#define HA_CREATE_USED_MIN_ROWS (1UL << 4)
|
||||
@ -953,7 +953,7 @@ struct handler_iterator {
|
||||
/*
|
||||
Pointer to buffer for the iterator to use.
|
||||
Should be allocated by function which created the iterator and
|
||||
destroied by freed by above "destroy" call
|
||||
destroyed by freed by above "destroy" call
|
||||
*/
|
||||
void *buffer;
|
||||
};
|
||||
@ -1169,7 +1169,7 @@ struct handlerton
|
||||
"cookie".
|
||||
|
||||
The flush and call of commit_checkpoint_notify_ha() need not happen
|
||||
immediately - it can be scheduled and performed asynchroneously (ie. as
|
||||
immediately - it can be scheduled and performed asynchronously (ie. as
|
||||
part of next prepare(), or sync every second, or whatever), but should
|
||||
not be postponed indefinitely. It is however also permissible to do it
|
||||
immediately, before returning from commit_checkpoint_request().
|
||||
@ -1254,13 +1254,13 @@ struct handlerton
|
||||
Used by open_table_error(), by the default rename_table and delete_table
|
||||
handler methods, and by the default discovery implementation.
|
||||
|
||||
For engines that have more than one file name extentions (separate
|
||||
For engines that have more than one file name extensions (separate
|
||||
metadata, index, and/or data files), the order of elements is relevant.
|
||||
First element of engine file name extentions array should be metadata
|
||||
file extention. This is implied by the open_table_error()
|
||||
First element of engine file name extensions array should be metadata
|
||||
file extension. This is implied by the open_table_error()
|
||||
and the default discovery implementation.
|
||||
|
||||
Second element - data file extention. This is implied
|
||||
Second element - data file extension. This is implied
|
||||
assumed by REPAIR TABLE ... USE_FRM implementation.
|
||||
*/
|
||||
const char **tablefile_extensions; // by default - empty list
|
||||
@ -1761,7 +1761,7 @@ struct HA_CREATE_INFO: public Table_scope_and_contents_source_st,
|
||||
CONVERT TO CHARACTER SET DEFAULT
|
||||
to
|
||||
CONVERT TO CHARACTER SET <character-set-of-the-current-database>
|
||||
TODO: Should't we postpone resolution of DEFAULT until the
|
||||
TODO: Shouldn't we postpone resolution of DEFAULT until the
|
||||
character set of the table owner database is loaded from its db.opt?
|
||||
*/
|
||||
DBUG_ASSERT(cs);
|
||||
@ -2653,7 +2653,7 @@ public:
|
||||
ha_statistics stats;
|
||||
|
||||
/** MultiRangeRead-related members: */
|
||||
range_seq_t mrr_iter; /* Interator to traverse the range sequence */
|
||||
range_seq_t mrr_iter; /* Iterator to traverse the range sequence */
|
||||
RANGE_SEQ_IF mrr_funcs; /* Range sequence traversal functions */
|
||||
HANDLER_BUFFER *multi_range_buffer; /* MRR buffer info */
|
||||
uint ranges_in_seq; /* Total number of ranges in the traversed sequence */
|
||||
@ -3490,7 +3490,7 @@ public:
|
||||
This method offers the storage engine, the possibility to store a reference
|
||||
to a table name which is going to be used with query cache.
|
||||
The method is called each time a statement is written to the cache and can
|
||||
be used to verify if a specific statement is cachable. It also offers
|
||||
be used to verify if a specific statement is cacheable. It also offers
|
||||
the possibility to register a generic (but static) call back function which
|
||||
is called each time a statement is matched against the query cache.
|
||||
|
||||
|
16
sql/item.cc
16
sql/item.cc
@ -1427,7 +1427,7 @@ err:
|
||||
bool Item::make_zero_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
|
||||
{
|
||||
/*
|
||||
if the item was not null and convertion failed, we return a zero date
|
||||
if the item was not null and conversion failed, we return a zero date
|
||||
if allowed, otherwise - null.
|
||||
*/
|
||||
bzero((char*) ltime,sizeof(*ltime));
|
||||
@ -4719,7 +4719,7 @@ static bool mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current,
|
||||
|
||||
@note
|
||||
We have to mark all items between current_sel (including) and
|
||||
last_select (excluding) as dependend (select before last_select should
|
||||
last_select (excluding) as dependent (select before last_select should
|
||||
be marked with actual table mask used by resolved item, all other with
|
||||
OUTER_REF_TABLE_BIT) and also write dependence information to Item of
|
||||
resolved identifier.
|
||||
@ -5095,7 +5095,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
|
||||
bool upward_lookup= FALSE;
|
||||
TABLE_LIST *table_list;
|
||||
|
||||
/* Calulate the TABLE_LIST for the table */
|
||||
/* Calculate the TABLE_LIST for the table */
|
||||
table_list= (cached_table ? cached_table :
|
||||
field_found && (*from_field) != view_ref_found ?
|
||||
(*from_field)->table->pos_in_table_list : 0);
|
||||
@ -5824,7 +5824,7 @@ Item *Item_field::propagate_equal_fields(THD *thd,
|
||||
but failed to create a valid DATE literal from the given string literal.
|
||||
|
||||
Do not do constant propagation in such cases and unlink
|
||||
"this" from the found Item_equal (as this equality not usefull).
|
||||
"this" from the found Item_equal (as this equality not useful).
|
||||
*/
|
||||
item_equal= NULL;
|
||||
return this;
|
||||
@ -7614,7 +7614,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
|
||||
/*
|
||||
Due to cache, find_field_in_tables() can return field which
|
||||
doesn't belong to provided outer_context. In this case we have
|
||||
to find proper field context in order to fix field correcly.
|
||||
to find proper field context in order to fix field correctly.
|
||||
*/
|
||||
do
|
||||
{
|
||||
@ -7799,9 +7799,9 @@ Item* Item_ref::transform(THD *thd, Item_transformer transformer, uchar *arg)
|
||||
callback functions.
|
||||
|
||||
First the function applies the analyzer to the Item_ref object. Then
|
||||
if the analizer succeeeds we first applies the compile method to the
|
||||
if the analyzer succeeds we first apply the compile method to the
|
||||
object the Item_ref object is referencing. If this returns a new
|
||||
item the old item is substituted for a new one. After this the
|
||||
item the old item is substituted for a new one. After this the
|
||||
transformer is applied to the Item_ref object itself.
|
||||
The compile function is not called if the analyzer returns NULL
|
||||
in the parameter arg_p.
|
||||
@ -10529,7 +10529,7 @@ bool Item_type_holder::join_types(THD *thd, Item *item)
|
||||
}
|
||||
|
||||
/**
|
||||
Calculate lenth for merging result for given Item type.
|
||||
Calculate length for merging result for given Item type.
|
||||
|
||||
@param item Item for length detection
|
||||
|
||||
|
16
sql/item.h
16
sql/item.h
@ -244,7 +244,7 @@ void dummy_error_processor(THD *thd, void *data);
|
||||
void view_error_processor(THD *thd, void *data);
|
||||
|
||||
/*
|
||||
Instances of Name_resolution_context store the information necesary for
|
||||
Instances of Name_resolution_context store the information necessary for
|
||||
name resolution of Items and other context analysis of a query made in
|
||||
fix_fields().
|
||||
|
||||
@ -402,7 +402,7 @@ public:
|
||||
Monotonicity is defined only for Item* trees that represent table
|
||||
partitioning expressions (i.e. have no subselects/user vars/PS parameters
|
||||
etc etc). An Item* tree is assumed to have the same monotonicity properties
|
||||
as its correspoinding function F:
|
||||
as its corresponding function F:
|
||||
|
||||
[signed] longlong F(field1, field2, ...) {
|
||||
put values of field_i into table record buffer;
|
||||
@ -746,7 +746,7 @@ protected:
|
||||
return rc;
|
||||
}
|
||||
/*
|
||||
This method is used if the item was not null but convertion to
|
||||
This method is used if the item was not null but conversion to
|
||||
TIME/DATE/DATETIME failed. We return a zero date if allowed,
|
||||
otherwise - null.
|
||||
*/
|
||||
@ -947,7 +947,7 @@ public:
|
||||
/*
|
||||
real_type() is the type of base item. This is same as type() for
|
||||
most items, except Item_ref() and Item_cache_wrapper() where it
|
||||
shows the type for the underlaying item.
|
||||
shows the type for the underlying item.
|
||||
*/
|
||||
virtual enum Type real_type() const { return type(); }
|
||||
|
||||
@ -1054,7 +1054,7 @@ public:
|
||||
The caller can modify the returned String, if it's not marked
|
||||
"const" (with the String::mark_as_const() method). That means that
|
||||
if the item returns its own internal buffer (e.g. tmp_value), it
|
||||
*must* be marked "const" [1]. So normally it's preferrable to
|
||||
*must* be marked "const" [1]. So normally it's preferable to
|
||||
return the result value in the String, that was passed as an
|
||||
argument. But, for example, SUBSTR() returns a String that simply
|
||||
points into the buffer of SUBSTR()'s args[0]->val_str(). Such a
|
||||
@ -1431,7 +1431,7 @@ public:
|
||||
@param cond_ptr[OUT] Store a replacement item here if the condition
|
||||
can be simplified, e.g.:
|
||||
WHERE part1 OR part2 OR part3
|
||||
with one of the partN evalutating to SEL_TREE::ALWAYS.
|
||||
with one of the partN evaluating to SEL_TREE::ALWAYS.
|
||||
*/
|
||||
virtual SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr);
|
||||
/*
|
||||
@ -2011,7 +2011,7 @@ public:
|
||||
virtual bool is_outer_field() const { DBUG_ASSERT(fixed); return FALSE; }
|
||||
|
||||
/**
|
||||
Checks if this item or any of its decendents contains a subquery.
|
||||
Checks if this item or any of its descendents contains a subquery.
|
||||
*/
|
||||
virtual bool has_subquery() const { return with_subselect; }
|
||||
|
||||
@ -5209,7 +5209,7 @@ public:
|
||||
|
||||
This is the method that updates the cached value.
|
||||
It must be explicitly called by the user of this class to store the value
|
||||
of the orginal item in the cache.
|
||||
of the original item in the cache.
|
||||
*/
|
||||
virtual void copy() = 0;
|
||||
|
||||
|
@ -192,7 +192,7 @@ bool Cached_item_field::cmp(void)
|
||||
|
||||
/*
|
||||
If value is not null and value changed (from null to not null or
|
||||
becasue of value change), then copy the new value to buffer.
|
||||
because of value change), then copy the new value to buffer.
|
||||
*/
|
||||
if (! null_value && (tmp || (tmp= (field->cmp(buff) != 0))))
|
||||
field->get_image(buff,length,field->charset());
|
||||
|
@ -1424,7 +1424,7 @@ bool Item_in_optimizer::fix_fields(THD *thd, Item **ref)
|
||||
@note
|
||||
Item_in_optimizer should work as pass-through for
|
||||
- subqueries that were processed by ALL/ANY->MIN/MAX rewrite
|
||||
- subqueries taht were originally EXISTS subqueries (and were coverted by
|
||||
- subqueries that were originally EXISTS subqueries (and were coinverted by
|
||||
the EXISTS->IN rewrite)
|
||||
|
||||
When Item_in_optimizer is not not working as a pass-through, it
|
||||
@ -2017,8 +2017,8 @@ longlong Item_func_interval::val_int()
|
||||
interval_range *range= intervals + mid;
|
||||
my_bool cmp_result;
|
||||
/*
|
||||
The values in the range intervall may have different types,
|
||||
Only do a decimal comparision of the first argument is a decimal
|
||||
The values in the range interval may have different types,
|
||||
Only do a decimal comparison if the first argument is a decimal
|
||||
and we are comparing against a decimal
|
||||
*/
|
||||
if (dec && range->type == DECIMAL_RESULT)
|
||||
@ -2684,7 +2684,7 @@ Item_func_nullif::fix_length_and_dec()
|
||||
Some examples of what NULLIF can end up with after argument
|
||||
substitution (we don't mention args[1] in some cases for simplicity):
|
||||
|
||||
1. l_expr is not an aggragate function:
|
||||
1. l_expr is not an aggregate function:
|
||||
|
||||
a. No conversion happened.
|
||||
args[0] and args[2] were not replaced to something else
|
||||
@ -2808,7 +2808,7 @@ Item_func_nullif::fix_length_and_dec()
|
||||
In this case we remember and reuse m_arg0 during EXECUTE time as args[2].
|
||||
|
||||
QQ: How to make sure that m_args0 does not point
|
||||
to something temporary which will be destoyed between PREPARE and EXECUTE.
|
||||
to something temporary which will be destroyed between PREPARE and EXECUTE.
|
||||
The condition below should probably be more strict and somehow check that:
|
||||
- change_item_tree() was called for the new args[0]
|
||||
- m_args0 is referenced from inside args[0], e.g. as a function argument,
|
||||
@ -3243,7 +3243,7 @@ bool Item_func_case::fix_length_and_dec()
|
||||
If we'll do string comparison, we also need to aggregate
|
||||
character set and collation for first/WHEN items and
|
||||
install converters for some of them to cmp_collation when necessary.
|
||||
This is done because cmp_item compatators cannot compare
|
||||
This is done because cmp_item comparators cannot compare
|
||||
strings in two different character sets.
|
||||
Some examples when we install converters:
|
||||
|
||||
@ -6959,7 +6959,7 @@ Item* Item_equal::get_first(JOIN_TAB *context, Item *field_item)
|
||||
and not ot2.col.
|
||||
|
||||
eliminate_item_equal() also has code that deals with equality substitution
|
||||
in presense of SJM nests.
|
||||
in presence of SJM nests.
|
||||
*/
|
||||
|
||||
TABLE_LIST *emb_nest;
|
||||
|
@ -155,7 +155,7 @@ protected:
|
||||
/*
|
||||
Return the full select tree for "field_item" and "value":
|
||||
- a single SEL_TREE if the field is not in a multiple equality, or
|
||||
- a conjuction of all SEL_TREEs for all fields from
|
||||
- a conjunction of all SEL_TREEs for all fields from
|
||||
the same multiple equality with "field_item".
|
||||
*/
|
||||
SEL_TREE *get_full_func_mm_tree(RANGE_OPT_PARAM *param,
|
||||
|
@ -366,7 +366,7 @@ Item *Item_func::transform(THD *thd, Item_transformer transformer, uchar *argume
|
||||
callback functions.
|
||||
|
||||
First the function applies the analyzer to the root node of
|
||||
the Item_func object. Then if the analizer succeeeds (returns TRUE)
|
||||
the Item_func object. Then if the analyzer succeeds (returns TRUE)
|
||||
the function recursively applies the compile method to each argument
|
||||
of the Item_func node.
|
||||
If the call of the method for an argument item returns a new item
|
||||
@ -1701,6 +1701,8 @@ my_decimal *Item_func_div::decimal_op(my_decimal *decimal_value)
|
||||
null_value= 1;
|
||||
return 0;
|
||||
}
|
||||
my_decimal_round(E_DEC_FATAL_ERROR, decimal_value,
|
||||
decimals, FALSE, decimal_value);
|
||||
return decimal_value;
|
||||
}
|
||||
|
||||
@ -1769,7 +1771,7 @@ bool Item_func_div::fix_length_and_dec()
|
||||
case TIME_RESULT:
|
||||
DBUG_ASSERT(0);
|
||||
}
|
||||
maybe_null= 1; // devision by zero
|
||||
maybe_null= 1; // division by zero
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
@ -1843,7 +1845,7 @@ longlong Item_func_int_div::val_int()
|
||||
bool Item_func_int_div::fix_length_and_dec()
|
||||
{
|
||||
Item_result argtype= args[0]->result_type();
|
||||
/* use precision ony for the data type it is applicable for and valid */
|
||||
/* use precision only for the data type it is applicable for and valid */
|
||||
uint32 char_length= args[0]->max_char_length() -
|
||||
(argtype == DECIMAL_RESULT || argtype == INT_RESULT ?
|
||||
args[0]->decimals : 0);
|
||||
@ -4822,7 +4824,7 @@ bool Item_func_set_user_var::register_field_in_bitmap(void *arg)
|
||||
@param type type of new value
|
||||
@param cs charset info for new value
|
||||
@param dv derivation for new value
|
||||
@param unsigned_arg indiates if a value of type INT_RESULT is unsigned
|
||||
@param unsigned_arg indicates if a value of type INT_RESULT is unsigned
|
||||
|
||||
@note Sets error and fatal error if allocation fails.
|
||||
|
||||
@ -6567,7 +6569,7 @@ void my_missing_function_error(const LEX_STRING &token, const char *func_name)
|
||||
@brief Initialize the result field by creating a temporary dummy table
|
||||
and assign it to a newly created field object. Meta data used to
|
||||
create the field is fetched from the sp_head belonging to the stored
|
||||
proceedure found in the stored procedure functon cache.
|
||||
procedure found in the stored procedure function cache.
|
||||
|
||||
@note This function should be called from fix_fields to init the result
|
||||
field. It is some what related to Item_field.
|
||||
@ -6871,7 +6873,7 @@ Item_func_sp::fix_fields(THD *thd, Item **ref)
|
||||
/*
|
||||
Here we check privileges of the stored routine only during view
|
||||
creation, in order to validate the view. A runtime check is
|
||||
perfomed in Item_func_sp::execute(), and this method is not
|
||||
performed in Item_func_sp::execute(), and this method is not
|
||||
called during context analysis. Notice, that during view
|
||||
creation we do not infer into stored routine bodies and do not
|
||||
check privileges of its statements, which would probably be a
|
||||
|
@ -206,7 +206,7 @@ String *Item_func_inet_str_base::val_str_ascii(String *buffer)
|
||||
|
||||
@return Completion status.
|
||||
@retval false Given string does not represent an IPv4-address.
|
||||
@retval true The string has been converted sucessfully.
|
||||
@retval true The string has been converted successfully.
|
||||
|
||||
@note The problem with inet_pton() is that it treats leading zeros in
|
||||
IPv4-part differently on different platforms.
|
||||
@ -331,7 +331,7 @@ static bool str_to_ipv4(const char *str, size_t str_length, in_addr *ipv4_addres
|
||||
|
||||
@return Completion status.
|
||||
@retval false Given string does not represent an IPv6-address.
|
||||
@retval true The string has been converted sucessfully.
|
||||
@retval true The string has been converted successfully.
|
||||
|
||||
@note The problem with inet_pton() is that it treats leading zeros in
|
||||
IPv4-part differently on different platforms.
|
||||
@ -677,7 +677,7 @@ static void ipv6_to_str(const in6_addr *ipv6, char *str)
|
||||
|
||||
@return Completion status.
|
||||
@retval false Given string does not represent an IP-address.
|
||||
@retval true The string has been converted sucessfully.
|
||||
@retval true The string has been converted successfully.
|
||||
*/
|
||||
|
||||
bool Item_func_inet6_aton::calc_value(const String *arg, String *buffer)
|
||||
@ -717,7 +717,7 @@ bool Item_func_inet6_aton::calc_value(const String *arg, String *buffer)
|
||||
|
||||
@return Completion status.
|
||||
@retval false The argument does not correspond to IP-address.
|
||||
@retval true The string has been converted sucessfully.
|
||||
@retval true The string has been converted successfully.
|
||||
*/
|
||||
|
||||
bool Item_func_inet6_ntoa::calc_value(const String *arg, String *buffer)
|
||||
|
@ -929,7 +929,7 @@ String *Item_func_concat_ws::val_str(String *str)
|
||||
goto null; // Must be a blob
|
||||
}
|
||||
else if (res2 == &tmp_value)
|
||||
{ // This can happend only 1 time
|
||||
{ // This can happen only 1 time
|
||||
if (tmp_value.replace(0,0,*sep_str) || tmp_value.replace(0,0,*res))
|
||||
goto null;
|
||||
res= &tmp_value;
|
||||
@ -1079,7 +1079,7 @@ bool Item_func_reverse::fix_length_and_dec()
|
||||
}
|
||||
|
||||
/**
|
||||
Replace all occurences of string2 in string1 with string3.
|
||||
Replace all occurrences of string2 in string1 with string3.
|
||||
|
||||
Don't reallocate val_str() if not needed.
|
||||
|
||||
@ -3944,7 +3944,7 @@ bool Item_func_export_set::fix_length_and_dec()
|
||||
using in a SQL statement.
|
||||
|
||||
Adds a \\ before all characters that needs to be escaped in a SQL string.
|
||||
We also escape '^Z' (END-OF-FILE in windows) to avoid probelms when
|
||||
We also escape '^Z' (END-OF-FILE in windows) to avoid problems when
|
||||
running commands from a file in windows.
|
||||
|
||||
This function is very useful when you want to generate SQL statements.
|
||||
|
@ -1111,12 +1111,12 @@ void Item_singlerow_subselect::reset()
|
||||
|
||||
/**
|
||||
@todo
|
||||
- We cant change name of Item_field or Item_ref, because it will
|
||||
prevent it's correct resolving, but we should save name of
|
||||
- We can't change name of Item_field or Item_ref, because it will
|
||||
prevent its correct resolving, but we should save name of
|
||||
removed item => we do not make optimization if top item of
|
||||
list is field or reference.
|
||||
- switch off this optimization for prepare statement,
|
||||
because we do not rollback this changes.
|
||||
because we do not rollback these changes.
|
||||
Make rollback for it, or special name resolving mode in 5.0.
|
||||
|
||||
@param join Join object of the subquery (i.e. 'child' join).
|
||||
@ -1139,8 +1139,8 @@ Item_singlerow_subselect::select_transformer(JOIN *join)
|
||||
select_lex->item_list.elements == 1 &&
|
||||
!select_lex->item_list.head()->with_sum_func &&
|
||||
/*
|
||||
We cant change name of Item_field or Item_ref, because it will
|
||||
prevent it's correct resolving, but we should save name of
|
||||
We can't change name of Item_field or Item_ref, because it will
|
||||
prevent its correct resolving, but we should save name of
|
||||
removed item => we do not make optimization if top item of
|
||||
list is field or reference.
|
||||
TODO: solve above problem
|
||||
@ -1631,7 +1631,7 @@ longlong Item_exists_subselect::val_int()
|
||||
Return the result of EXISTS as a string value
|
||||
|
||||
Converts the true/false result into a string value.
|
||||
Note that currently this cannot be NULL, so if the query exection fails
|
||||
Note that currently this cannot be NULL, so if the query execution fails
|
||||
it will return 0.
|
||||
|
||||
@param decimal_value[out] buffer to hold the resulting string value
|
||||
@ -1654,7 +1654,7 @@ String *Item_exists_subselect::val_str(String *str)
|
||||
Return the result of EXISTS as a decimal value
|
||||
|
||||
Converts the true/false result into a decimal value.
|
||||
Note that currently this cannot be NULL, so if the query exection fails
|
||||
Note that currently this cannot be NULL, so if the query execution fails
|
||||
it will return 0.
|
||||
|
||||
@param decimal_value[out] Buffer to hold the resulting decimal value
|
||||
@ -2350,7 +2350,7 @@ Item_in_subselect::row_value_transformer(JOIN *join)
|
||||
is_not_null_test(v3))
|
||||
where is_not_null_test registers NULLs values but reject rows.
|
||||
|
||||
in case when we do not need correct NULL, we have simplier construction:
|
||||
in case when we do not need correct NULL, we have simpler construction:
|
||||
EXISTS (SELECT ... WHERE where and
|
||||
(l1 = v1) and
|
||||
(l2 = v2) and
|
||||
@ -2753,6 +2753,8 @@ bool Item_exists_subselect::select_prepare_to_be_in()
|
||||
Check if 'func' is an equality in form "inner_table.column = outer_expr"
|
||||
|
||||
@param func Expression to check
|
||||
@param allow_subselect If true, the outer_expr part can have a subquery
|
||||
If false, it cannot.
|
||||
@param local_field OUT Return "inner_table.column" here
|
||||
@param outer_expr OUT Return outer_expr here
|
||||
|
||||
@ -2760,6 +2762,7 @@ bool Item_exists_subselect::select_prepare_to_be_in()
|
||||
*/
|
||||
|
||||
static bool check_equality_for_exist2in(Item_func *func,
|
||||
bool allow_subselect,
|
||||
Item_ident **local_field,
|
||||
Item **outer_exp)
|
||||
{
|
||||
@ -2770,7 +2773,8 @@ static bool check_equality_for_exist2in(Item_func *func,
|
||||
args= func->arguments();
|
||||
if (args[0]->real_type() == Item::FIELD_ITEM &&
|
||||
args[0]->all_used_tables() != OUTER_REF_TABLE_BIT &&
|
||||
args[1]->all_used_tables() == OUTER_REF_TABLE_BIT)
|
||||
args[1]->all_used_tables() == OUTER_REF_TABLE_BIT &&
|
||||
(allow_subselect || !args[1]->has_subquery()))
|
||||
{
|
||||
/* It is Item_field or Item_direct_view_ref) */
|
||||
DBUG_ASSERT(args[0]->type() == Item::FIELD_ITEM ||
|
||||
@ -2781,7 +2785,8 @@ static bool check_equality_for_exist2in(Item_func *func,
|
||||
}
|
||||
else if (args[1]->real_type() == Item::FIELD_ITEM &&
|
||||
args[1]->all_used_tables() != OUTER_REF_TABLE_BIT &&
|
||||
args[0]->all_used_tables() == OUTER_REF_TABLE_BIT)
|
||||
args[0]->all_used_tables() == OUTER_REF_TABLE_BIT &&
|
||||
(allow_subselect || !args[0]->has_subquery()))
|
||||
{
|
||||
/* It is Item_field or Item_direct_view_ref) */
|
||||
DBUG_ASSERT(args[1]->type() == Item::FIELD_ITEM ||
|
||||
@ -2810,6 +2815,13 @@ typedef struct st_eq_field_outer
|
||||
|
||||
outer1=inner_tbl1.col1 AND ... AND outer2=inner_tbl1.col2 AND remainder_cond
|
||||
|
||||
if there is just one outer_expr=inner_expr pair, then outer_expr can have a
|
||||
subselect in it. If there are many such pairs, then none of outer_expr can
|
||||
have a subselect in it. If we allow this, the query will fail with an error:
|
||||
|
||||
This version of MariaDB doesn't yet support 'SUBQUERY in ROW in left
|
||||
expression of IN/ALL/ANY'
|
||||
|
||||
@param conds Condition to be checked
|
||||
@parm result Array to collect EQ_FIELD_OUTER elements describing
|
||||
inner-vs-outer equalities the function has found.
|
||||
@ -2827,14 +2839,17 @@ static bool find_inner_outer_equalities(Item **conds,
|
||||
{
|
||||
List_iterator<Item> li(*((Item_cond*)*conds)->argument_list());
|
||||
Item *item;
|
||||
bool allow_subselect= true;
|
||||
while ((item= li++))
|
||||
{
|
||||
if (item->type() == Item::FUNC_ITEM &&
|
||||
check_equality_for_exist2in((Item_func *)item,
|
||||
allow_subselect,
|
||||
&element.local_field,
|
||||
&element.outer_exp))
|
||||
{
|
||||
found= TRUE;
|
||||
allow_subselect= false;
|
||||
element.eq_ref= li.ref();
|
||||
if (result.append(element))
|
||||
goto alloc_err;
|
||||
@ -2843,6 +2858,7 @@ static bool find_inner_outer_equalities(Item **conds,
|
||||
}
|
||||
else if ((*conds)->type() == Item::FUNC_ITEM &&
|
||||
check_equality_for_exist2in((Item_func *)*conds,
|
||||
true,
|
||||
&element.local_field,
|
||||
&element.outer_exp))
|
||||
{
|
||||
@ -3205,7 +3221,7 @@ Item_in_subselect::select_in_like_transformer(JOIN *join)
|
||||
/*
|
||||
In some optimisation cases we will not need this Item_in_optimizer
|
||||
object, but we can't know it here, but here we need address correct
|
||||
reference on left expresion.
|
||||
reference on left expression.
|
||||
|
||||
note: we won't need Item_in_optimizer when handling degenerate cases
|
||||
like "... IN (SELECT 1)"
|
||||
@ -3237,7 +3253,7 @@ Item_in_subselect::select_in_like_transformer(JOIN *join)
|
||||
and all that items do not make permanent changes in current item arena
|
||||
which allow to us call them with changed arena (if we do not know nature
|
||||
of Item, we have to call fix_fields() for it only with original arena to
|
||||
avoid memory leack)
|
||||
avoid memory leak)
|
||||
*/
|
||||
if (left_expr->cols() == 1)
|
||||
trans_res= single_value_transformer(join);
|
||||
@ -3400,7 +3416,7 @@ bool Item_in_subselect::setup_mat_engine()
|
||||
|
||||
/*
|
||||
The select_engine (that executes transformed IN=>EXISTS subselects) is
|
||||
pre-created at parse time, and is stored in statment memory (preserved
|
||||
pre-created at parse time, and is stored in statement memory (preserved
|
||||
across PS executions).
|
||||
*/
|
||||
DBUG_ASSERT(engine->engine_type() == subselect_engine::SINGLE_SELECT_ENGINE);
|
||||
@ -3871,7 +3887,7 @@ int subselect_single_select_engine::exec()
|
||||
For at least one of the pushed predicates the following is true:
|
||||
We should not apply optimizations based on the condition that was
|
||||
pushed down into the subquery. Those optimizations are ref[_or_null]
|
||||
acceses. Change them to be full table scans.
|
||||
accesses. Change them to be full table scans.
|
||||
*/
|
||||
JOIN_TAB *tab;
|
||||
for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
|
||||
@ -6108,7 +6124,7 @@ int subselect_partial_match_engine::exec()
|
||||
if (has_covering_null_row)
|
||||
{
|
||||
/*
|
||||
If there is a NULL-only row that coveres all columns the result of IN
|
||||
If there is a NULL-only row that covers all columns the result of IN
|
||||
is UNKNOWN.
|
||||
*/
|
||||
item_in->value= 0;
|
||||
@ -6308,7 +6324,7 @@ subselect_rowid_merge_engine::init(MY_BITMAP *non_null_key_parts,
|
||||
for (uint i= (non_null_key ? 1 : 0); i < merge_keys_count; i++)
|
||||
{
|
||||
/*
|
||||
Check if the first and only indexed column contains NULL in the curent
|
||||
Check if the first and only indexed column contains NULL in the current
|
||||
row, and add the row number to the corresponding key.
|
||||
*/
|
||||
if (merge_keys[i]->get_field(0)->is_null())
|
||||
@ -6520,7 +6536,7 @@ bool subselect_rowid_merge_engine::partial_match()
|
||||
}
|
||||
|
||||
/*
|
||||
If all nullable columns contain only NULLs, then there is a guranteed
|
||||
If all nullable columns contain only NULLs, then there is a guaranteed
|
||||
partial match, and we don't need to search for a matching row.
|
||||
*/
|
||||
if (has_covering_null_columns)
|
||||
|
@ -560,7 +560,7 @@ public:
|
||||
bool jtbm_const_row_found;
|
||||
|
||||
/*
|
||||
TRUE<=>this is a flattenable semi-join, false overwise.
|
||||
TRUE<=>this is a flattenable semi-join, false otherwise.
|
||||
*/
|
||||
bool is_flattenable_semijoin;
|
||||
|
||||
@ -987,7 +987,7 @@ class subselect_indexsubquery_engine: public subselect_uniquesubquery_engine
|
||||
/* FALSE for 'ref', TRUE for 'ref-or-null'. */
|
||||
bool check_null;
|
||||
/*
|
||||
The "having" clause. This clause (further reffered to as "artificial
|
||||
The "having" clause. This clause (further referred to as "artificial
|
||||
having") was inserted by subquery transformation code. It contains
|
||||
Item(s) that have a side-effect: they record whether the subquery has
|
||||
produced a row with NULL certain components. We need to use it for cases
|
||||
@ -1008,7 +1008,7 @@ class subselect_indexsubquery_engine: public subselect_uniquesubquery_engine
|
||||
However, subqueries like the above are currently not handled by index
|
||||
lookup-based subquery engines, the engine applicability check misses
|
||||
them: it doesn't switch the engine for case of artificial having and
|
||||
[eq_]ref access (only for artifical having + ref_or_null or no having).
|
||||
[eq_]ref access (only for artificial having + ref_or_null or no having).
|
||||
The above example subquery is handled as a full-blown SELECT with eq_ref
|
||||
access to one table.
|
||||
|
||||
@ -1079,7 +1079,7 @@ public:
|
||||
*/
|
||||
JOIN *materialize_join;
|
||||
/*
|
||||
A conjunction of all the equality condtions between all pairs of expressions
|
||||
A conjunction of all the equality conditions between all pairs of expressions
|
||||
that are arguments of an IN predicate. We need these to post-filter some
|
||||
IN results because index lookups sometimes match values that are actually
|
||||
not equal to the search key in SQL terms.
|
||||
|
@ -697,7 +697,7 @@ int Aggregator_distinct::composite_key_cmp(void* arg, uchar* key1, uchar* key2)
|
||||
|
||||
C_MODE_START
|
||||
|
||||
/* Declarations for auxilary C-callbacks */
|
||||
/* Declarations for auxiliary C-callbacks */
|
||||
|
||||
int simple_raw_key_cmp(void* arg, const void* key1, const void* key2)
|
||||
{
|
||||
@ -729,7 +729,7 @@ C_MODE_END
|
||||
@param thd Thread descriptor
|
||||
@return status
|
||||
@retval FALSE success
|
||||
@retval TRUE faliure
|
||||
@retval TRUE failure
|
||||
|
||||
Prepares Aggregator_distinct to process the incoming stream.
|
||||
Creates the temporary table and the Unique class if needed.
|
||||
@ -1184,7 +1184,7 @@ Item_sum_hybrid::fix_fields(THD *thd, Item **ref)
|
||||
};
|
||||
if (!is_window_func_sum_expr())
|
||||
setup_hybrid(thd, args[0], NULL);
|
||||
/* MIN/MAX can return NULL for empty set indepedent of the used column */
|
||||
/* MIN/MAX can return NULL for empty set independent of the used column */
|
||||
maybe_null= 1;
|
||||
result_field=0;
|
||||
null_value=1;
|
||||
@ -1660,7 +1660,7 @@ void Item_sum_count::cleanup()
|
||||
|
||||
|
||||
/*
|
||||
Avgerage
|
||||
Average
|
||||
*/
|
||||
bool Item_sum_avg::fix_length_and_dec()
|
||||
{
|
||||
@ -1906,7 +1906,7 @@ bool Item_sum_variance::fix_length_and_dec()
|
||||
/*
|
||||
According to the SQL2003 standard (Part 2, Foundations; sec 10.9,
|
||||
aggregate function; paragraph 7h of Syntax Rules), "the declared
|
||||
type of the result is an implementation-defined aproximate numeric
|
||||
type of the result is an implementation-defined approximate numeric
|
||||
type.
|
||||
*/
|
||||
|
||||
@ -1997,7 +1997,7 @@ double Item_sum_variance::val_real()
|
||||
is one or zero. If it's zero, i.e. a population variance, then we only
|
||||
set nullness when the count is zero.
|
||||
|
||||
Another way to read it is that 'sample' is the numerical threshhold, at and
|
||||
Another way to read it is that 'sample' is the numerical threshold, at and
|
||||
below which a 'count' number of items is called NULL.
|
||||
*/
|
||||
DBUG_ASSERT((sample == 0) || (sample == 1));
|
||||
@ -3723,7 +3723,7 @@ bool Item_func_group_concat::setup(THD *thd)
|
||||
{
|
||||
/*
|
||||
Force the create_tmp_table() to convert BIT columns to INT
|
||||
as we cannot compare two table records containg BIT fields
|
||||
as we cannot compare two table records containing BIT fields
|
||||
stored in the the tree used for distinct/order by.
|
||||
Moreover we don't even save in the tree record null bits
|
||||
where BIT fields store parts of their data.
|
||||
|
@ -251,7 +251,7 @@ class Window_spec;
|
||||
The field 'aggr_level' is to contain the nest level of the subquery
|
||||
where the set function is aggregated.
|
||||
|
||||
The field 'max_arg_level' is for the maximun of the nest levels of the
|
||||
The field 'max_arg_level' is for the maximum of the nest levels of the
|
||||
unbound column references occurred in the set function. A column reference
|
||||
is unbound within a set function if it is not bound by any subquery
|
||||
used as a subexpression in this function. A column reference is bound by
|
||||
|
@ -452,7 +452,7 @@ err:
|
||||
|
||||
|
||||
/**
|
||||
Create a formated date/time value in a string.
|
||||
Create a formatted date/time value in a string.
|
||||
*/
|
||||
|
||||
static bool make_date_time(const LEX_CSTRING &format, MYSQL_TIME *l_time,
|
||||
@ -1041,7 +1041,7 @@ uint week_mode(uint mode)
|
||||
a date at start of january) In this case one can get 53 for the
|
||||
first week of next year. This flag ensures that the week is
|
||||
relevant for the given year. Note that this flag is only
|
||||
releveant if WEEK_JANUARY is not set.
|
||||
relevant if WEEK_JANUARY is not set.
|
||||
|
||||
If set Week is in range 1-53.
|
||||
|
||||
@ -1357,7 +1357,7 @@ bool get_interval_value(Item *args,interval_type int_type, INTERVAL *interval)
|
||||
if (!(res= args->val_str_ascii(&str_value)))
|
||||
return (1);
|
||||
|
||||
/* record negative intervalls in interval->neg */
|
||||
/* record negative intervals in interval->neg */
|
||||
str=res->ptr();
|
||||
cs= res->charset();
|
||||
const char *end=str+res->length();
|
||||
@ -1608,7 +1608,7 @@ bool Item_func_from_days::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
|
||||
|
||||
|
||||
/**
|
||||
Converts current time in my_time_t to MYSQL_TIME represenatation for local
|
||||
Converts current time in my_time_t to MYSQL_TIME representation for local
|
||||
time zone. Defines time zone (local) used for whole CURDATE function.
|
||||
*/
|
||||
void Item_func_curdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||
@ -1619,7 +1619,7 @@ void Item_func_curdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||
|
||||
|
||||
/**
|
||||
Converts current time in my_time_t to MYSQL_TIME represenatation for UTC
|
||||
Converts current time in my_time_t to MYSQL_TIME representation for UTC
|
||||
time zone. Defines time zone (UTC) used for whole UTC_DATE function.
|
||||
*/
|
||||
void Item_func_curdate_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||
@ -1699,7 +1699,7 @@ static void set_sec_part(ulong sec_part, MYSQL_TIME *ltime, Item *item)
|
||||
}
|
||||
|
||||
/**
|
||||
Converts current time in my_time_t to MYSQL_TIME represenatation for local
|
||||
Converts current time in my_time_t to MYSQL_TIME representation for local
|
||||
time zone. Defines time zone (local) used for whole CURTIME function.
|
||||
*/
|
||||
void Item_func_curtime_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||
@ -1713,7 +1713,7 @@ void Item_func_curtime_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||
|
||||
|
||||
/**
|
||||
Converts current time in my_time_t to MYSQL_TIME represenatation for UTC
|
||||
Converts current time in my_time_t to MYSQL_TIME representation for UTC
|
||||
time zone. Defines time zone (UTC) used for whole UTC_TIME function.
|
||||
*/
|
||||
void Item_func_curtime_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||
@ -1768,7 +1768,7 @@ int Item_func_now_local::save_in_field(Field *field, bool no_conversions)
|
||||
|
||||
|
||||
/**
|
||||
Converts current time in my_time_t to MYSQL_TIME represenatation for local
|
||||
Converts current time in my_time_t to MYSQL_TIME representation for local
|
||||
time zone. Defines time zone (local) used for whole NOW function.
|
||||
*/
|
||||
void Item_func_now_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||
@ -1780,7 +1780,7 @@ void Item_func_now_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||
|
||||
|
||||
/**
|
||||
Converts current time in my_time_t to MYSQL_TIME represenatation for UTC
|
||||
Converts current time in my_time_t to MYSQL_TIME representation for UTC
|
||||
time zone. Defines time zone (UTC) used for whole UTC_TIMESTAMP function.
|
||||
*/
|
||||
void Item_func_now_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||
@ -1811,7 +1811,7 @@ bool Item_func_now::get_date(MYSQL_TIME *res,
|
||||
|
||||
|
||||
/**
|
||||
Converts current time in my_time_t to MYSQL_TIME represenatation for local
|
||||
Converts current time in my_time_t to MYSQL_TIME representation for local
|
||||
time zone. Defines time zone (local) used for whole SYSDATE function.
|
||||
*/
|
||||
void Item_func_sysdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
|
||||
@ -2122,7 +2122,7 @@ bool Item_func_convert_tz::get_date(MYSQL_TIME *ltime,
|
||||
uint not_used;
|
||||
my_time_tmp= from_tz->TIME_to_gmt_sec(ltime, ¬_used);
|
||||
ulong sec_part= ltime->second_part;
|
||||
/* my_time_tmp is guranteed to be in the allowed range */
|
||||
/* my_time_tmp is guaranteed to be in the allowed range */
|
||||
if (my_time_tmp)
|
||||
to_tz->gmt_sec_to_TIME(ltime, my_time_tmp);
|
||||
/* we rely on the fact that no timezone conversion can change sec_part */
|
||||
@ -2559,7 +2559,7 @@ bool Item_char_typecast::fix_length_and_dec()
|
||||
uint32 char_length;
|
||||
/*
|
||||
We always force character set conversion if cast_cs
|
||||
is a multi-byte character set. It garantees that the
|
||||
is a multi-byte character set. It guarantees that the
|
||||
result of CAST is a well-formed string.
|
||||
For single-byte character sets we allow just to copy
|
||||
from the argument. A single-byte character sets string
|
||||
|
@ -64,7 +64,7 @@ typedef struct my_xml_node_st
|
||||
} MY_XML_NODE;
|
||||
|
||||
|
||||
/* Lexical analizer token */
|
||||
/* Lexical analyzer token */
|
||||
typedef struct my_xpath_lex_st
|
||||
{
|
||||
int term; /* token type, see MY_XPATH_LEX_XXXXX below */
|
||||
@ -1101,7 +1101,7 @@ static Item* nametestfunc(MY_XPATH *xpath,
|
||||
|
||||
|
||||
/*
|
||||
Tokens consisting of one character, for faster lexical analizer.
|
||||
Tokens consisting of one character, for faster lexical analyzer.
|
||||
*/
|
||||
static char simpletok[128]=
|
||||
{
|
||||
@ -1421,7 +1421,7 @@ my_xpath_function(const char *beg, const char *end)
|
||||
}
|
||||
|
||||
|
||||
/* Initialize a lex analizer token */
|
||||
/* Initialize a lex analyzer token */
|
||||
static void
|
||||
my_xpath_lex_init(MY_XPATH_LEX *lex,
|
||||
const char *str, const char *strend)
|
||||
@ -1452,7 +1452,7 @@ my_xdigit(int c)
|
||||
SYNOPSYS
|
||||
Scan the next token from the input.
|
||||
lex->term is set to the scanned token type.
|
||||
lex->beg and lex->end are set to the beginnig
|
||||
lex->beg and lex->end are set to the beginning
|
||||
and to the end of the token.
|
||||
RETURN
|
||||
N/A
|
||||
@ -1478,7 +1478,7 @@ my_xpath_lex_scan(MY_XPATH *xpath,
|
||||
(const uchar*) end)) > 0 &&
|
||||
((ctype & (_MY_L | _MY_U)) || *beg == '_'))
|
||||
{
|
||||
// scan untill the end of the idenfitier
|
||||
// scan until the end of the identifier
|
||||
for (beg+= length;
|
||||
(length= xpath->cs->cset->ctype(xpath->cs, &ctype,
|
||||
(const uchar*) beg,
|
||||
@ -1607,7 +1607,7 @@ static int my_xpath_parse_AxisName(MY_XPATH *xpath)
|
||||
** Grammar rules, according to http://www.w3.org/TR/xpath
|
||||
** Implemented using recursive descendant method.
|
||||
** All the following grammar processing functions accept
|
||||
** a signle "xpath" argument and return 1 on success and 0 on error.
|
||||
** a single "xpath" argument and return 1 on success and 0 on error.
|
||||
** They also modify "xpath" argument by creating new items.
|
||||
*/
|
||||
|
||||
@ -2487,7 +2487,7 @@ static int my_xpath_parse_UnaryExpr(MY_XPATH *xpath)
|
||||
as it is in conflict with abbreviated step.
|
||||
1 + .123 does not work,
|
||||
1 + 0.123 does.
|
||||
Perhaps it is better to move this code into lex analizer.
|
||||
Perhaps it is better to move this code into lex analyzer.
|
||||
|
||||
RETURN
|
||||
1 - success
|
||||
@ -2838,7 +2838,7 @@ append_node(String *str, MY_XML_NODE *node)
|
||||
SYNOPSYS
|
||||
|
||||
A call-back function executed when XML parser
|
||||
is entering a tag or an attribue.
|
||||
is entering a tag or an attribute.
|
||||
Appends the new node into data->pxml.
|
||||
Increments data->level.
|
||||
|
||||
@ -2874,7 +2874,7 @@ int xml_enter(MY_XML_PARSER *st,const char *attr, size_t len)
|
||||
SYNOPSYS
|
||||
|
||||
A call-back function executed when XML parser
|
||||
is entering into a tag or an attribue textual value.
|
||||
is entering into a tag or an attribute textual value.
|
||||
The value is appended into data->pxml.
|
||||
|
||||
RETURN
|
||||
@ -2902,7 +2902,7 @@ int xml_value(MY_XML_PARSER *st,const char *attr, size_t len)
|
||||
SYNOPSYS
|
||||
|
||||
A call-back function executed when XML parser
|
||||
is leaving a tag or an attribue.
|
||||
is leaving a tag or an attribute.
|
||||
Decrements data->level.
|
||||
|
||||
RETURN
|
||||
|
@ -228,7 +228,7 @@ void key_restore(uchar *to_record, const uchar *from_key, KEY *key_info,
|
||||
{
|
||||
/*
|
||||
This in fact never happens, as we have only partial BLOB
|
||||
keys yet anyway, so it's difficult to find any sence to
|
||||
keys yet anyway, so it's difficult to find any sense to
|
||||
restore the part of a record.
|
||||
Maybe this branch is to be removed, but now we
|
||||
have to ignore GCov compaining.
|
||||
@ -610,8 +610,8 @@ int key_rec_cmp(void *key_p, uchar *first_rec, uchar *second_rec)
|
||||
max length. The exceptions are the BLOB and VARCHAR field types
|
||||
that take the max length into account.
|
||||
*/
|
||||
if ((result= field->cmp_max(field->ptr+first_diff, field->ptr+sec_diff,
|
||||
key_part->length)))
|
||||
if ((result= field->cmp_prefix(field->ptr+first_diff, field->ptr+sec_diff,
|
||||
key_part->length)))
|
||||
DBUG_RETURN(result);
|
||||
next_loop:
|
||||
key_part++;
|
||||
|
@ -23,7 +23,7 @@
|
||||
#include "lex_symbol.h"
|
||||
|
||||
SYM_GROUP sym_group_common= {"", ""};
|
||||
SYM_GROUP sym_group_geom= {"Spatial extentions", "HAVE_SPATIAL"};
|
||||
SYM_GROUP sym_group_geom= {"Spatial extensions", "HAVE_SPATIAL"};
|
||||
SYM_GROUP sym_group_rtree= {"RTree keys", "HAVE_RTREE_KEYS"};
|
||||
|
||||
/* We don't want to include sql_yacc.h into gen_lex_hash */
|
||||
|
@ -1099,7 +1099,7 @@ bool Global_read_lock::make_global_read_lock_block_commit(THD *thd)
|
||||
MDL_request mdl_request;
|
||||
DBUG_ENTER("make_global_read_lock_block_commit");
|
||||
/*
|
||||
If we didn't succeed lock_global_read_lock(), or if we already suceeded
|
||||
If we didn't succeed lock_global_read_lock(), or if we already succeeded
|
||||
make_global_read_lock_block_commit(), do nothing.
|
||||
*/
|
||||
|
||||
|
18
sql/log.cc
18
sql/log.cc
@ -149,7 +149,7 @@ void setup_log_handling()
|
||||
|
||||
/**
|
||||
purge logs, master and slave sides both, related error code
|
||||
convertor.
|
||||
converter.
|
||||
Called from @c purge_error_message(), @c MYSQL_BIN_LOG::reset_logs()
|
||||
|
||||
@param res an internal to purging routines error code
|
||||
@ -358,7 +358,7 @@ public:
|
||||
never zero.
|
||||
|
||||
This is done while calling the constructor binlog_cache_mngr.
|
||||
We cannot set informaton in the constructor binlog_cache_data
|
||||
We cannot set information in the constructor binlog_cache_data
|
||||
because the space for binlog_cache_mngr is allocated through
|
||||
a placement new.
|
||||
|
||||
@ -2941,7 +2941,7 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time,
|
||||
|
||||
mysql_mutex_lock(&LOCK_log);
|
||||
if (is_open())
|
||||
{ // Safety agains reopen
|
||||
{ // Safety against reopen
|
||||
int tmp_errno= 0;
|
||||
char buff[80], *end;
|
||||
char query_time_buff[22+7], lock_time_buff[22+7];
|
||||
@ -3222,7 +3222,7 @@ void MYSQL_BIN_LOG::cleanup()
|
||||
|
||||
/*
|
||||
Free data for global binlog state.
|
||||
We can't do that automaticly as we need to do this before
|
||||
We can't do that automatically as we need to do this before
|
||||
safemalloc is shut down
|
||||
*/
|
||||
if (!is_relay_log)
|
||||
@ -3993,7 +3993,7 @@ err:
|
||||
|
||||
|
||||
/**
|
||||
Delete all logs refered to in the index file.
|
||||
Delete all logs referred to in the index file.
|
||||
|
||||
The new index file will only contain this file.
|
||||
|
||||
@ -5556,7 +5556,7 @@ binlog_cache_mngr *THD::binlog_setup_trx_data()
|
||||
|
||||
- Start a statement transaction to allow us to truncate the cache.
|
||||
|
||||
- Save the currrent binlog position so that we can roll back the
|
||||
- Save the current binlog position so that we can roll back the
|
||||
statement by truncating the cache.
|
||||
|
||||
We only update the saved position if the old one was undefined,
|
||||
@ -6754,7 +6754,7 @@ static const char* get_first_binlog(char* buf_arg)
|
||||
}
|
||||
if (normalize_binlog_name(buf_arg, fname, false))
|
||||
{
|
||||
errmsg= "cound not normalize the first file name in the binlog index";
|
||||
errmsg= "could not normalize the first file name in the binlog index";
|
||||
goto end;
|
||||
}
|
||||
end:
|
||||
@ -9754,7 +9754,7 @@ TC_LOG_BINLOG::mark_xid_done(ulong binlog_id, bool write_checkpoint)
|
||||
than compare all found against each other to find the one pointing to the
|
||||
most recent binlog.
|
||||
|
||||
Note also that we need to first release LOCK_xid_list, then aquire
|
||||
Note also that we need to first release LOCK_xid_list, then acquire
|
||||
LOCK_log, then re-aquire LOCK_xid_list. If we were to take LOCK_log while
|
||||
holding LOCK_xid_list, we might deadlock with other threads that take the
|
||||
locks in the opposite order.
|
||||
@ -9839,7 +9839,7 @@ TC_LOG_BINLOG::commit_checkpoint_notify(void *cookie)
|
||||
necessary stuff.
|
||||
|
||||
In the future, this thread could also be used to do log rotation in the
|
||||
background, which could elimiate all stalls around binlog rotations.
|
||||
background, which could eliminate all stalls around binlog rotations.
|
||||
*/
|
||||
pthread_handler_t
|
||||
binlog_background_thread(void *arg __attribute__((unused)))
|
||||
|
@ -4205,7 +4205,7 @@ get_str_len_and_pointer(const Log_event::Byte **src,
|
||||
const Log_event::Byte *end)
|
||||
{
|
||||
if (*src >= end)
|
||||
return -1; // Will be UINT_MAX in two-complement arithmetics
|
||||
return -1; // Will be UINT_MAX in two-complement arithmetic
|
||||
uint length= **src;
|
||||
if (length > 0)
|
||||
{
|
||||
@ -4571,7 +4571,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
|
||||
|
||||
/* A 2nd variable part; this is common to all versions */
|
||||
memcpy((char*) start, end, data_len); // Copy db and query
|
||||
start[data_len]= '\0'; // End query with \0 (For safetly)
|
||||
start[data_len]= '\0'; // End query with \0 (For safety)
|
||||
db= (char *)start;
|
||||
query= (char *)(start + db_len + 1);
|
||||
q_len= data_len - db_len -1;
|
||||
@ -6225,7 +6225,7 @@ int Format_description_log_event::do_update_pos(rpl_group_info *rgi)
|
||||
If we do not skip stepping the group log position (and the
|
||||
server id was changed when restarting the server), it might well
|
||||
be that we start executing at a position that is invalid, e.g.,
|
||||
at a Rows_log_event or a Query_log_event preceeded by a
|
||||
at a Rows_log_event or a Query_log_event preceded by a
|
||||
Intvar_log_event instead of starting at a Table_map_log_event or
|
||||
the Intvar_log_event respectively.
|
||||
*/
|
||||
@ -6337,7 +6337,7 @@ Format_description_log_event::is_version_before_checksum(const master_version_sp
|
||||
|
||||
@return the version-safe checksum alg descriptor where zero
|
||||
designates no checksum, 255 - the orginator is
|
||||
checksum-unaware (effectively no checksum) and the actuall
|
||||
checksum-unaware (effectively no checksum) and the actual
|
||||
[1-254] range alg descriptor.
|
||||
*/
|
||||
enum enum_binlog_checksum_alg get_checksum_alg(const char* buf, ulong len)
|
||||
@ -7043,7 +7043,7 @@ int Load_log_event::do_apply_event(NET* net, rpl_group_info *rgi,
|
||||
/*
|
||||
When replication is running fine, if it was DUP_ERROR on the
|
||||
master then we could choose IGNORE here, because if DUP_ERROR
|
||||
suceeded on master, and data is identical on the master and slave,
|
||||
succeeded on master, and data is identical on the master and slave,
|
||||
then there should be no uniqueness errors on slave, so IGNORE is
|
||||
the same as DUP_ERROR. But in the unlikely case of uniqueness errors
|
||||
(because the data on the master and slave happen to be different
|
||||
@ -7580,7 +7580,7 @@ Gtid_log_event::Gtid_log_event(THD *thd_arg, uint64 seq_no_arg,
|
||||
|
||||
/*
|
||||
Used to record GTID while sending binlog to slave, without having to
|
||||
fully contruct every Gtid_log_event() needlessly.
|
||||
fully construct every Gtid_log_event() needlessly.
|
||||
*/
|
||||
bool
|
||||
Gtid_log_event::peek(const char *event_start, size_t event_len,
|
||||
@ -8103,7 +8103,7 @@ Gtid_list_log_event::print(FILE *file, PRINT_EVENT_INFO *print_event_info)
|
||||
|
||||
/*
|
||||
Used to record gtid_list event while sending binlog to slave, without having to
|
||||
fully contruct the event object.
|
||||
fully construct the event object.
|
||||
*/
|
||||
bool
|
||||
Gtid_list_log_event::peek(const char *event_start, uint32 event_len,
|
||||
@ -8183,7 +8183,7 @@ Intvar_log_event::Intvar_log_event(const char* buf,
|
||||
const Format_description_log_event* description_event)
|
||||
:Log_event(buf, description_event)
|
||||
{
|
||||
/* The Post-Header is empty. The Varible Data part begins immediately. */
|
||||
/* The Post-Header is empty. The Variable Data part begins immediately. */
|
||||
buf+= description_event->common_header_len +
|
||||
description_event->post_header_len[INTVAR_EVENT-1];
|
||||
type= buf[I_TYPE_OFFSET];
|
||||
@ -9421,7 +9421,7 @@ void Create_file_log_event::pack_info(Protocol *protocol)
|
||||
|
||||
/**
|
||||
Create_file_log_event::do_apply_event()
|
||||
Constructor for Create_file_log_event to intantiate an event
|
||||
Constructor for Create_file_log_event to instantiate an event
|
||||
from the relay log on the slave.
|
||||
|
||||
@retval
|
||||
@ -10471,7 +10471,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len,
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
/* if my_bitmap_init fails, catched in is_valid() */
|
||||
/* if my_bitmap_init fails, caught in is_valid() */
|
||||
if (likely(!my_bitmap_init(&m_cols,
|
||||
m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL,
|
||||
m_width,
|
||||
@ -10888,7 +10888,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
|
||||
*/
|
||||
|
||||
{
|
||||
DBUG_PRINT("debug", ("Checking compability of tables to lock - tables_to_lock: %p",
|
||||
DBUG_PRINT("debug", ("Checking compatibility of tables to lock - tables_to_lock: %p",
|
||||
rgi->tables_to_lock));
|
||||
|
||||
/**
|
||||
@ -10943,7 +10943,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
|
||||
ptr->table->s->table_name.str));
|
||||
/*
|
||||
We should not honour --slave-skip-errors at this point as we are
|
||||
having severe errors which should not be skiped.
|
||||
having severe errors which should not be skipped.
|
||||
*/
|
||||
thd->is_slave_error= 1;
|
||||
/* remove trigger's tables */
|
||||
@ -11324,7 +11324,7 @@ static int rows_event_stmt_cleanup(rpl_group_info *rgi, THD * thd)
|
||||
/**
|
||||
The method either increments the relay log position or
|
||||
commits the current statement and increments the master group
|
||||
possition if the event is STMT_END_F flagged and
|
||||
position if the event is STMT_END_F flagged and
|
||||
the statement corresponds to the autocommit query (i.e replicated
|
||||
without wrapping in BEGIN/COMMIT)
|
||||
|
||||
@ -11470,7 +11470,7 @@ public:
|
||||
|
||||
/**
|
||||
Print an event "body" cache to @c file possibly in two fragments.
|
||||
Each fragement is optionally per @c do_wrap to produce an SQL statement.
|
||||
Each fragment is optionally per @c do_wrap to produce an SQL statement.
|
||||
|
||||
@param file a file to print to
|
||||
@param body the "body" IO_CACHE of event
|
||||
@ -13185,7 +13185,7 @@ record_compare_exit:
|
||||
Find the best key to use when locating the row in @c find_row().
|
||||
|
||||
A primary key is preferred if it exists; otherwise a unique index is
|
||||
preferred. Else we pick the index with the smalles rec_per_key value.
|
||||
preferred. Else we pick the index with the smallest rec_per_key value.
|
||||
|
||||
If a suitable key is found, set @c m_key, @c m_key_nr and @c m_key_info
|
||||
member fields appropriately.
|
||||
@ -13318,7 +13318,7 @@ static int row_not_found_error(rpl_group_info *rgi)
|
||||
Locate the current row in event's table.
|
||||
|
||||
The current row is pointed by @c m_curr_row. Member @c m_width tells
|
||||
how many columns are there in the row (this can be differnet from
|
||||
how many columns are there in the row (this can be different from
|
||||
the number of columns in the table). It is assumed that event's
|
||||
table is already open and pointed by @c m_table.
|
||||
|
||||
@ -13359,7 +13359,7 @@ int Rows_log_event::find_row(rpl_group_info *rgi)
|
||||
rpl_row_tabledefs.test specifies that
|
||||
if the extra field on the slave does not have a default value
|
||||
and this is okay with Delete or Update events.
|
||||
Todo: fix wl3228 hld that requires defauls for all types of events
|
||||
Todo: fix wl3228 hld that requires defaults for all types of events
|
||||
*/
|
||||
|
||||
prepare_record(table, m_width, FALSE);
|
||||
@ -13603,7 +13603,7 @@ int Rows_log_event::find_row(rpl_group_info *rgi)
|
||||
while (record_compare(table));
|
||||
|
||||
/*
|
||||
Note: above record_compare will take into accout all record fields
|
||||
Note: above record_compare will take into account all record fields
|
||||
which might be incorrect in case a partial row was given in the event
|
||||
*/
|
||||
|
||||
|
@ -455,7 +455,7 @@ class String;
|
||||
/**
|
||||
@def LOG_EVENT_ARTIFICIAL_F
|
||||
|
||||
Artificial events are created arbitarily and not written to binary
|
||||
Artificial events are created arbitrarily and not written to binary
|
||||
log
|
||||
|
||||
These events should not update the master log position when slave
|
||||
@ -932,13 +932,13 @@ private:
|
||||
};
|
||||
|
||||
/**
|
||||
the struct aggregates two paramenters that identify an event
|
||||
the struct aggregates two parameters that identify an event
|
||||
uniquely in scope of communication of a particular master and slave couple.
|
||||
I.e there can not be 2 events from the same staying connected master which
|
||||
have the same coordinates.
|
||||
@note
|
||||
Such identifier is not yet unique generally as the event originating master
|
||||
is resetable. Also the crashed master can be replaced with some other.
|
||||
is resettable. Also the crashed master can be replaced with some other.
|
||||
*/
|
||||
typedef struct event_coordinates
|
||||
{
|
||||
@ -2730,7 +2730,7 @@ public:
|
||||
uint8 number_of_event_types;
|
||||
/*
|
||||
The list of post-headers' lengths followed
|
||||
by the checksum alg decription byte
|
||||
by the checksum alg description byte
|
||||
*/
|
||||
uint8 *post_header_len;
|
||||
struct master_version_split {
|
||||
@ -3070,7 +3070,7 @@ public:
|
||||
*/
|
||||
bool is_deferred() { return deferred; }
|
||||
/*
|
||||
In case of the deffered applying the variable instance is flagged
|
||||
In case of the deferred applying the variable instance is flagged
|
||||
and the parsing time query id is stored to be used at applying time.
|
||||
*/
|
||||
void set_deferred(query_id_t qid) { deferred= true; query_id= qid; }
|
||||
@ -3564,7 +3564,7 @@ public:
|
||||
bool write_data_header();
|
||||
bool write_data_body();
|
||||
/*
|
||||
Cut out Create_file extentions and
|
||||
Cut out Create_file extensions and
|
||||
write it as Load event - used on the slave
|
||||
*/
|
||||
bool write_base();
|
||||
@ -4938,7 +4938,7 @@ private:
|
||||
/**
|
||||
@class Incident_log_event
|
||||
|
||||
Class representing an incident, an occurance out of the ordinary,
|
||||
Class representing an incident, an occurence out of the ordinary,
|
||||
that happened on the master.
|
||||
|
||||
The event is used to inform the slave that something out of the
|
||||
@ -4982,7 +4982,7 @@ public:
|
||||
m_message.str= NULL; /* Just as a precaution */
|
||||
m_message.length= 0;
|
||||
set_direct_logging();
|
||||
/* Replicate the incident irregardless of @@skip_replication. */
|
||||
/* Replicate the incident regardless of @@skip_replication. */
|
||||
flags&= ~LOG_EVENT_SKIP_REPLICATION_F;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
@ -5003,7 +5003,7 @@ public:
|
||||
strmake(m_message.str, msg.str, msg.length);
|
||||
m_message.length= msg.length;
|
||||
set_direct_logging();
|
||||
/* Replicate the incident irregardless of @@skip_replication. */
|
||||
/* Replicate the incident regardless of @@skip_replication. */
|
||||
flags&= ~LOG_EVENT_SKIP_REPLICATION_F;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
@ -848,7 +848,7 @@ int Write_rows_log_event_old::do_after_row_operations(TABLE *table, int error)
|
||||
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
|
||||
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
|
||||
/*
|
||||
reseting the extra with
|
||||
resetting the extra with
|
||||
table->file->extra(HA_EXTRA_NO_IGNORE_NO_KEY);
|
||||
fires bug#27077
|
||||
todo: explain or fix
|
||||
@ -1240,7 +1240,7 @@ Old_rows_log_event::Old_rows_log_event(const char *buf, uint event_len,
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
/* if my_bitmap_init fails, catched in is_valid() */
|
||||
/* if my_bitmap_init fails, caught in is_valid() */
|
||||
if (likely(!my_bitmap_init(&m_cols,
|
||||
m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL,
|
||||
m_width,
|
||||
|
@ -4102,13 +4102,15 @@ static int replace_user_table(THD *thd, const User_table &user_table,
|
||||
table->key_info->key_length);
|
||||
|
||||
if (table->file->ha_index_read_idx_map(table->record[0], 0, user_key,
|
||||
HA_WHOLE_KEY,
|
||||
HA_READ_KEY_EXACT))
|
||||
HA_WHOLE_KEY, HA_READ_KEY_EXACT))
|
||||
{
|
||||
/* what == 'N' means revoke */
|
||||
if (what == 'N')
|
||||
{
|
||||
my_error(ER_NONEXISTING_GRANT, MYF(0), combo.user.str, combo.host.str);
|
||||
if (combo.host.length)
|
||||
my_error(ER_NONEXISTING_GRANT, MYF(0), combo.user.str, combo.host.str);
|
||||
else
|
||||
my_error(ER_INVALID_ROLE, MYF(0), combo.user.str);
|
||||
goto end;
|
||||
}
|
||||
/*
|
||||
@ -5623,6 +5625,8 @@ static void propagate_role_grants(ACL_ROLE *role,
|
||||
enum PRIVS_TO_MERGE::what what,
|
||||
const char *db= 0, const char *name= 0)
|
||||
{
|
||||
if (!role)
|
||||
return;
|
||||
|
||||
mysql_mutex_assert_owner(&acl_cache->lock);
|
||||
PRIVS_TO_MERGE data= { what, db, name };
|
||||
@ -7796,6 +7800,21 @@ err:
|
||||
}
|
||||
|
||||
|
||||
static void check_grant_column_int(GRANT_TABLE *grant_table, const char *name,
|
||||
uint length, ulong *want_access)
|
||||
{
|
||||
if (grant_table)
|
||||
{
|
||||
*want_access&= ~grant_table->privs;
|
||||
if (*want_access & grant_table->cols)
|
||||
{
|
||||
GRANT_COLUMN *grant_column= column_hash_search(grant_table, name, length);
|
||||
if (grant_column)
|
||||
*want_access&= ~grant_column->rights;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Check column rights in given security context
|
||||
|
||||
@ -7818,9 +7837,6 @@ bool check_grant_column(THD *thd, GRANT_INFO *grant,
|
||||
const char *db_name, const char *table_name,
|
||||
const char *name, uint length, Security_context *sctx)
|
||||
{
|
||||
GRANT_TABLE *grant_table;
|
||||
GRANT_TABLE *grant_table_role;
|
||||
GRANT_COLUMN *grant_column;
|
||||
ulong want_access= grant->want_privilege & ~grant->privilege;
|
||||
DBUG_ENTER("check_grant_column");
|
||||
DBUG_PRINT("enter", ("table: %s want_access: %lu", table_name, want_access));
|
||||
@ -7845,45 +7861,18 @@ bool check_grant_column(THD *thd, GRANT_INFO *grant,
|
||||
grant->version= grant_version; /* purecov: inspected */
|
||||
}
|
||||
|
||||
grant_table= grant->grant_table_user;
|
||||
grant_table_role= grant->grant_table_role;
|
||||
check_grant_column_int(grant->grant_table_user, name, length, &want_access);
|
||||
check_grant_column_int(grant->grant_table_role, name, length, &want_access);
|
||||
|
||||
if (!grant_table && !grant_table_role)
|
||||
goto err;
|
||||
|
||||
if (grant_table)
|
||||
{
|
||||
grant_column= column_hash_search(grant_table, name, length);
|
||||
if (grant_column)
|
||||
{
|
||||
want_access&= ~grant_column->rights;
|
||||
}
|
||||
}
|
||||
if (grant_table_role)
|
||||
{
|
||||
grant_column= column_hash_search(grant_table_role, name, length);
|
||||
if (grant_column)
|
||||
{
|
||||
want_access&= ~grant_column->rights;
|
||||
}
|
||||
}
|
||||
if (!want_access)
|
||||
{
|
||||
mysql_rwlock_unlock(&LOCK_grant);
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
err:
|
||||
mysql_rwlock_unlock(&LOCK_grant);
|
||||
if (!want_access)
|
||||
DBUG_RETURN(0);
|
||||
|
||||
char command[128];
|
||||
get_privilege_desc(command, sizeof(command), want_access);
|
||||
/* TODO perhaps error should print current rolename aswell */
|
||||
my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0),
|
||||
command,
|
||||
sctx->priv_user,
|
||||
sctx->host_or_ip,
|
||||
name,
|
||||
table_name);
|
||||
my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0), command, sctx->priv_user,
|
||||
sctx->host_or_ip, name, table_name);
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
|
||||
|
@ -765,8 +765,18 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
|
||||
{
|
||||
compl_result_code= result_code= HA_ADMIN_INVALID;
|
||||
}
|
||||
|
||||
/*
|
||||
The check for Alter_info::ALTER_ADMIN_PARTITION implements this logic:
|
||||
do not collect EITS STATS for this syntax:
|
||||
ALTER TABLE ... ANALYZE PARTITION p
|
||||
EITS statistics is global (not per-partition). Collecting global stats
|
||||
is much more expensive processing just one partition, so the most
|
||||
appropriate action is to just not collect EITS stats for this command.
|
||||
*/
|
||||
collect_eis=
|
||||
(table->table->s->table_category == TABLE_CATEGORY_USER &&
|
||||
!(lex->alter_info.flags &= Alter_info::ALTER_ADMIN_PARTITION) &&
|
||||
(get_use_stat_tables_mode(thd) > NEVER ||
|
||||
lex->with_persistent_for_clause));
|
||||
}
|
||||
|
@ -1233,7 +1233,7 @@ bool wait_while_table_is_used(THD *thd, TABLE *table,
|
||||
FALSE);
|
||||
/* extra() call must come only after all instances above are closed */
|
||||
if (function != HA_EXTRA_NOT_USED)
|
||||
(void) table->file->extra(function);
|
||||
DBUG_RETURN(table->file->extra(function));
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
@ -7506,15 +7506,11 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
|
||||
FALSE ok; In this case *map will include the chosen index
|
||||
TRUE error
|
||||
*/
|
||||
bool setup_tables_and_check_access(THD *thd,
|
||||
Name_resolution_context *context,
|
||||
bool setup_tables_and_check_access(THD *thd, Name_resolution_context *context,
|
||||
List<TABLE_LIST> *from_clause,
|
||||
TABLE_LIST *tables,
|
||||
List<TABLE_LIST> &leaves,
|
||||
bool select_insert,
|
||||
ulong want_access_first,
|
||||
ulong want_access,
|
||||
bool full_table_list)
|
||||
TABLE_LIST *tables, List<TABLE_LIST> &leaves,
|
||||
bool select_insert, ulong want_access_first,
|
||||
ulong want_access, bool full_table_list)
|
||||
{
|
||||
DBUG_ENTER("setup_tables_and_check_access");
|
||||
|
||||
|
@ -3868,7 +3868,8 @@ bool st_select_lex::optimize_unflattened_subqueries(bool const_only)
|
||||
sl->options|= SELECT_DESCRIBE;
|
||||
inner_join->select_options|= SELECT_DESCRIBE;
|
||||
}
|
||||
res= inner_join->optimize();
|
||||
if ((res= inner_join->optimize()))
|
||||
return TRUE;
|
||||
if (!inner_join->cleaned)
|
||||
sl->update_used_tables();
|
||||
sl->update_correlated_cache();
|
||||
|
@ -1109,11 +1109,11 @@ int JOIN::optimize()
|
||||
if (optimization_state != JOIN::NOT_OPTIMIZED)
|
||||
return FALSE;
|
||||
optimization_state= JOIN::OPTIMIZATION_IN_PROGRESS;
|
||||
create_explain_query_if_not_exists(thd->lex, thd->mem_root);
|
||||
|
||||
int res= optimize_inner();
|
||||
if (!res && have_query_plan != QEP_DELETED)
|
||||
{
|
||||
create_explain_query_if_not_exists(thd->lex, thd->mem_root);
|
||||
have_query_plan= QEP_AVAILABLE;
|
||||
|
||||
/*
|
||||
|
@ -1104,8 +1104,7 @@ bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list,
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
if (setup_tables_and_check_access(thd, &select_lex->context,
|
||||
&select_lex->top_join_list,
|
||||
table_list,
|
||||
&select_lex->top_join_list, table_list,
|
||||
select_lex->leaf_tables,
|
||||
FALSE, UPDATE_ACL, SELECT_ACL, TRUE) ||
|
||||
setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
|
||||
|
@ -450,9 +450,8 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
|
||||
if (thd->open_temporary_tables(lex->query_tables) ||
|
||||
open_and_lock_tables(thd, lex->query_tables, TRUE, 0))
|
||||
{
|
||||
view= lex->unlink_first_table(&link_to_local);
|
||||
res= TRUE;
|
||||
goto err;
|
||||
goto err_no_relink;
|
||||
}
|
||||
|
||||
view= lex->unlink_first_table(&link_to_local);
|
||||
@ -703,10 +702,12 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
|
||||
|
||||
WSREP_ERROR_LABEL:
|
||||
res= TRUE;
|
||||
goto err_no_relink;
|
||||
|
||||
err:
|
||||
THD_STAGE_INFO(thd, stage_end);
|
||||
lex->link_first_table_back(view, link_to_local);
|
||||
err_no_relink:
|
||||
unit->cleanup();
|
||||
DBUG_RETURN(res || thd->is_error());
|
||||
}
|
||||
|
@ -864,7 +864,7 @@ buf_LRU_check_size_of_non_data_objects(
|
||||
+ UT_LIST_GET_LEN(buf_pool->LRU))
|
||||
< buf_pool->curr_size / 3) {
|
||||
|
||||
if (!buf_lru_switched_on_innodb_mon) {
|
||||
if (!buf_lru_switched_on_innodb_mon && srv_monitor_event) {
|
||||
|
||||
/* Over 67 % of the buffer pool is occupied by lock
|
||||
heaps or the adaptive hash index. This may be a memory
|
||||
|
@ -1106,9 +1106,6 @@ fts_cache_clear(
|
||||
index_cache->doc_stats = NULL;
|
||||
}
|
||||
|
||||
mem_heap_free(static_cast<mem_heap_t*>(cache->sync_heap->arg));
|
||||
cache->sync_heap->arg = NULL;
|
||||
|
||||
fts_need_sync = false;
|
||||
|
||||
cache->total_size = 0;
|
||||
@ -1116,6 +1113,9 @@ fts_cache_clear(
|
||||
mutex_enter((ib_mutex_t*) &cache->deleted_lock);
|
||||
cache->deleted_doc_ids = NULL;
|
||||
mutex_exit((ib_mutex_t*) &cache->deleted_lock);
|
||||
|
||||
mem_heap_free(static_cast<mem_heap_t*>(cache->sync_heap->arg));
|
||||
cache->sync_heap->arg = NULL;
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
|
@ -4417,16 +4417,7 @@ innobase_commit_low(
|
||||
const bool is_wsrep = trx->is_wsrep();
|
||||
THD* thd = trx->mysql_thd;
|
||||
if (is_wsrep) {
|
||||
#ifdef WSREP_PROC_INFO
|
||||
char info[64];
|
||||
info[sizeof(info) - 1] = '\0';
|
||||
snprintf(info, sizeof(info) - 1,
|
||||
"innobase_commit_low():trx_commit_for_mysql(%lld)",
|
||||
(long long) wsrep_thd_trx_seqno(thd));
|
||||
tmp = thd_proc_info(thd, info);
|
||||
#else
|
||||
tmp = thd_proc_info(thd, "innobase_commit_low()");
|
||||
#endif /* WSREP_PROC_INFO */
|
||||
}
|
||||
#endif /* WITH_WSREP */
|
||||
if (trx_is_started(trx)) {
|
||||
@ -19438,11 +19429,14 @@ static
|
||||
void
|
||||
innodb_status_output_update(THD*,st_mysql_sys_var*,void*var,const void*save)
|
||||
{
|
||||
*static_cast<my_bool*>(var) = *static_cast<const my_bool*>(save);
|
||||
mysql_mutex_unlock(&LOCK_global_system_variables);
|
||||
/* Wakeup server monitor thread. */
|
||||
os_event_set(srv_monitor_event);
|
||||
mysql_mutex_lock(&LOCK_global_system_variables);
|
||||
*static_cast<my_bool*>(var)= *static_cast<const my_bool*>(save);
|
||||
if (srv_monitor_event)
|
||||
{
|
||||
mysql_mutex_unlock(&LOCK_global_system_variables);
|
||||
/* Wakeup server monitor thread. */
|
||||
os_event_set(srv_monitor_event);
|
||||
mysql_mutex_lock(&LOCK_global_system_variables);
|
||||
}
|
||||
}
|
||||
|
||||
/** Update the system variable innodb_encryption_threads.
|
||||
|
@ -1078,7 +1078,7 @@ sync_array_print_long_waits(
|
||||
sync_array_exit(arr);
|
||||
}
|
||||
|
||||
if (noticed) {
|
||||
if (noticed && srv_monitor_event) {
|
||||
ibool old_val;
|
||||
|
||||
fprintf(stderr,
|
||||
|
@ -3199,6 +3199,7 @@ static int write_page(MARIA_SHARE *share, File file,
|
||||
args.page= buff;
|
||||
args.pageno= (pgcache_page_no_t) (pos / share->block_size);
|
||||
args.data= (uchar*) share;
|
||||
args.crypt_buf= NULL;
|
||||
(* share->kfile.pre_write_hook)(&args);
|
||||
res= my_pwrite(file, args.page, block_size, pos, myf_rw);
|
||||
(* share->kfile.post_write_hook)(res, &args);
|
||||
|
@ -31,7 +31,7 @@ int myrg_extra(MYRG_INFO *info,enum ha_extra_function function,
|
||||
DBUG_PRINT("info",("function: %lu", (ulong) function));
|
||||
|
||||
if (!info->children_attached)
|
||||
DBUG_RETURN(1);
|
||||
DBUG_RETURN(0);
|
||||
if (function == HA_EXTRA_CACHE)
|
||||
{
|
||||
info->cache_in_use=1;
|
||||
|
@ -1533,11 +1533,8 @@ select (1.20396873 * 0.89550000 * 0.68000000 * 1.08721696 * 0.99500000 *
|
||||
1.01500000 * 1.01500000 * 0.99500000)
|
||||
0.81298807395367312459230693948000000000
|
||||
create table t1 as select 5.05 / 0.014;
|
||||
Warnings:
|
||||
Note 1265 Data truncated for column '5.05 / 0.014' at row 1
|
||||
show warnings;
|
||||
Level Code Message
|
||||
Note 1265 Data truncated for column '5.05 / 0.014' at row 1
|
||||
show create table t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
@ -1652,8 +1649,6 @@ my_col
|
||||
0.12345678912345678912345678912345678912
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 SELECT 1 / .123456789123456789123456789123456789123456789123456789123456789123456789123456789 AS my_col;
|
||||
Warnings:
|
||||
Note 1265 Data truncated for column 'my_col' at row 1
|
||||
DESCRIBE t1;
|
||||
Field Type Null Key Default Extra
|
||||
my_col decimal(65,4) YES NULL
|
||||
|
@ -92,8 +92,6 @@ DROP INDEX test ON t1;
|
||||
insert into t1 values (10, 1,1,1,1,1,1,1,1,1,1,1,1,1,NULL,0,0,0,1,1,1,1,'one','one');
|
||||
insert into t1 values (NULL,2,2,2,2,2,2,2,2,2,2,2,2,2,NULL,NULL,NULL,NULL,NULL,NULL,2,2,'two','two,one');
|
||||
insert into t1 values (0,1/3,3,3,3,3,3,3,3,3,3,3,3,3,NULL,'19970303','10:10:10','19970303101010','','','','3',3,3);
|
||||
Warnings:
|
||||
Warning 1265 Data truncated for column 'string' at row 1
|
||||
insert into t1 values (0,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,NULL,19970807,080706,19970403090807,-1,-1,-1,'-1',-1,-1);
|
||||
Warnings:
|
||||
Warning 1264 Out of range value for column 'utiny' at row 1
|
||||
@ -131,7 +129,7 @@ select auto,string,tiny,short,medium,long_int,longlong,real_float,real_double,ut
|
||||
auto string tiny short medium long_int longlong real_float real_double utiny ushort umedium ulong ulonglong mod(floor(time_stamp/1000000),1000000)-mod(curdate(),1000000) date_field time_field date_time blob_col tinyblob_col mediumblob_col longblob_col
|
||||
10 1 1 1 1 1 1 1.0 1.0000 1 00001 1 1 1 0 0000-00-00 00:00:00 0000-00-00 00:00:00 1 1 1 1
|
||||
11 2 2 2 2 2 2 2.0 2.0000 2 00002 2 2 2 0 NULL NULL NULL NULL NULL 2 2
|
||||
12 0.33333333 3 3 3 3 3 3.0 3.0000 3 00003 3 3 3 0 1997-03-03 10:10:10 1997-03-03 10:10:10 3
|
||||
12 0.3333 3 3 3 3 3 3.0 3.0000 3 00003 3 3 3 0 1997-03-03 10:10:10 1997-03-03 10:10:10 3
|
||||
13 -1 -1 -1 -1 -1 -1 -1.0 -1.0000 0 00000 0 0 0 0 1997-08-07 08:07:06 1997-04-03 09:08:07 -1 -1 -1 -1
|
||||
14 -429496729 -128 -32768 -8388608 -2147483648 -4294967295 -4294967296.0 -4294967295.0000 0 00000 0 0 0 0 0000-00-00 00:00:00 0000-00-00 00:00:00 -4294967295 -4294967295 -4294967295 -4294967295
|
||||
15 4294967295 127 32767 8388607 2147483647 4294967295 4294967296.0 4294967295.0000 255 65535 16777215 4294967295 4294967295 0 0000-00-00 00:00:00 0000-00-00 00:00:00 4294967295 4294967295 4294967295 4294967295
|
||||
@ -183,7 +181,7 @@ Warning 1265 Data truncated for column 'new_field' at row 7
|
||||
select * from t2;
|
||||
auto string mediumblob_col new_field
|
||||
1 2 2 ne
|
||||
2 0.33333333 ne
|
||||
2 0.3333 ne
|
||||
3 -1 -1 ne
|
||||
4 -429496729 -4294967295 ne
|
||||
5 4294967295 4294967295 ne
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2017, 2019, MariaDB Corporation.
|
||||
Copyright (c) 2017, 2020, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@ -1049,7 +1049,7 @@ buf_LRU_check_size_of_non_data_objects(
|
||||
+ UT_LIST_GET_LEN(buf_pool->LRU))
|
||||
< buf_pool->curr_size / 3) {
|
||||
|
||||
if (!buf_lru_switched_on_innodb_mon) {
|
||||
if (!buf_lru_switched_on_innodb_mon && srv_monitor_event) {
|
||||
|
||||
/* Over 67 % of the buffer pool is occupied by lock
|
||||
heaps or the adaptive hash index. This may be a memory
|
||||
|
@ -1127,14 +1127,14 @@ fts_cache_clear(
|
||||
index_cache->doc_stats = NULL;
|
||||
}
|
||||
|
||||
mem_heap_free(static_cast<mem_heap_t*>(cache->sync_heap->arg));
|
||||
cache->sync_heap->arg = NULL;
|
||||
|
||||
cache->total_size = 0;
|
||||
|
||||
mutex_enter((ib_mutex_t*) &cache->deleted_lock);
|
||||
cache->deleted_doc_ids = NULL;
|
||||
mutex_exit((ib_mutex_t*) &cache->deleted_lock);
|
||||
|
||||
mem_heap_free(static_cast<mem_heap_t*>(cache->sync_heap->arg));
|
||||
cache->sync_heap->arg = NULL;
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
|
@ -4647,16 +4647,7 @@ innobase_commit_low(
|
||||
#ifdef WITH_WSREP
|
||||
const char* tmp = 0;
|
||||
if (trx->is_wsrep()) {
|
||||
#ifdef WSREP_PROC_INFO
|
||||
char info[64];
|
||||
info[sizeof(info) - 1] = '\0';
|
||||
snprintf(info, sizeof(info) - 1,
|
||||
"innobase_commit_low():trx_commit_for_mysql(%lld)",
|
||||
(long long) wsrep_thd_trx_seqno(trx->mysql_thd));
|
||||
tmp = thd_proc_info(trx->mysql_thd, info);
|
||||
#else
|
||||
tmp = thd_proc_info(trx->mysql_thd, "innobase_commit_low()");
|
||||
#endif /* WSREP_PROC_INFO */
|
||||
}
|
||||
#endif /* WITH_WSREP */
|
||||
if (trx_is_started(trx)) {
|
||||
@ -19384,11 +19375,14 @@ static
|
||||
void
|
||||
innodb_status_output_update(THD*,st_mysql_sys_var*,void*var,const void*save)
|
||||
{
|
||||
*static_cast<my_bool*>(var) = *static_cast<const my_bool*>(save);
|
||||
mysql_mutex_unlock(&LOCK_global_system_variables);
|
||||
/* Wakeup server monitor thread. */
|
||||
os_event_set(srv_monitor_event);
|
||||
mysql_mutex_lock(&LOCK_global_system_variables);
|
||||
*static_cast<my_bool*>(var)= *static_cast<const my_bool*>(save);
|
||||
if (srv_monitor_event)
|
||||
{
|
||||
mysql_mutex_unlock(&LOCK_global_system_variables);
|
||||
/* Wakeup server monitor thread. */
|
||||
os_event_set(srv_monitor_event);
|
||||
mysql_mutex_lock(&LOCK_global_system_variables);
|
||||
}
|
||||
}
|
||||
|
||||
/** Update the system variable innodb_encryption_threads.
|
||||
|
@ -45,10 +45,10 @@ Created 1/20/1994 Heikki Tuuri
|
||||
|
||||
#define INNODB_VERSION_MAJOR 5
|
||||
#define INNODB_VERSION_MINOR 6
|
||||
#define INNODB_VERSION_BUGFIX 48
|
||||
#define INNODB_VERSION_BUGFIX 49
|
||||
|
||||
#ifndef PERCONA_INNODB_VERSION
|
||||
#define PERCONA_INNODB_VERSION 88.0
|
||||
#define PERCONA_INNODB_VERSION 89.0
|
||||
#endif
|
||||
|
||||
/* Enable UNIV_LOG_ARCHIVE in XtraDB */
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2008, Google Inc.
|
||||
Copyright (c) 2013, 2019, MariaDB Corporation.
|
||||
Copyright (c) 2013, 2020, MariaDB Corporation.
|
||||
|
||||
Portions of this file contain modifications contributed and copyrighted by
|
||||
Google, Inc. Those modifications are gratefully acknowledged and are described
|
||||
@ -1164,7 +1164,7 @@ sync_array_print_long_waits(
|
||||
sync_array_exit(arr);
|
||||
}
|
||||
|
||||
if (noticed) {
|
||||
if (noticed && srv_monitor_event) {
|
||||
ibool old_val;
|
||||
|
||||
fprintf(stderr,
|
||||
|
@ -142,7 +142,7 @@ TimeoutStopSec=900
|
||||
##
|
||||
|
||||
# Number of files limit. previously [mysqld_safe] open-files-limit
|
||||
LimitNOFILE=16364
|
||||
LimitNOFILE=16384
|
||||
|
||||
# Maximium core size. previously [mysqld_safe] core-file-size
|
||||
# LimitCore=
|
||||
|
@ -168,7 +168,7 @@ TimeoutStopSec=900
|
||||
##
|
||||
|
||||
# Number of files limit. previously [mysqld_safe] open-files-limit
|
||||
LimitNOFILE=16364
|
||||
LimitNOFILE=16384
|
||||
|
||||
# Maximium core size. previously [mysqld_safe] core-file-size
|
||||
# LimitCore=
|
||||
|
Loading…
x
Reference in New Issue
Block a user