Merge branch '10.3' into 10.4
This commit is contained in:
commit
fdcfc25127
@ -2032,6 +2032,17 @@ Warnings:
|
||||
Warning 1280 Name 'foo' ignored for PRIMARY key.
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-30342 Wrong "Truncated incorrect DECIMAL value" warning/error
|
||||
#
|
||||
create table t1(c1 varchar(1));
|
||||
insert into t1(c1) values('#');
|
||||
select @@sql_mode like '%strict_all_tables%';
|
||||
@@sql_mode like '%strict_all_tables%'
|
||||
0
|
||||
create table t2 as select if(c1 = '#', c1 = 0, c1) as c1 from t1;
|
||||
ERROR 22007: Truncated incorrect DECIMAL value: '#'
|
||||
drop table t1;
|
||||
#
|
||||
# End of 10.3 tests
|
||||
#
|
||||
#
|
||||
|
@ -1892,6 +1892,19 @@ create table t1 (c int(10) unsigned) engine=memory transactional=0;
|
||||
CREATE TABLE t1 ( id1 INT, id2 INT, CONSTRAINT `foo` PRIMARY KEY (id1), CONSTRAINT `bar` UNIQUE KEY(id2));
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-30342 Wrong "Truncated incorrect DECIMAL value" warning/error
|
||||
--echo #
|
||||
|
||||
create table t1(c1 varchar(1));
|
||||
insert into t1(c1) values('#');
|
||||
|
||||
select @@sql_mode like '%strict_all_tables%';
|
||||
--error ER_TRUNCATED_WRONG_VALUE
|
||||
create table t2 as select if(c1 = '#', c1 = 0, c1) as c1 from t1;
|
||||
|
||||
drop table t1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.3 tests
|
||||
--echo #
|
||||
|
@ -1,4 +1,3 @@
|
||||
drop table if exists t1,t2,t3,t4,t5,t6;
|
||||
set @sav_dpi= @@div_precision_increment;
|
||||
set div_precision_increment= 5;
|
||||
show variables like 'div_precision_increment';
|
||||
@ -2567,5 +2566,15 @@ stddev_samp(i) stddev_pop(i) stddev(i) std(i)
|
||||
drop view v1;
|
||||
drop table t1;
|
||||
#
|
||||
# MDEV-29988: Major performance regression with 10.6.11
|
||||
#
|
||||
create table t1 (a varchar(10) charset utf8mb4, b int, c int);
|
||||
insert t1 values (1,2,3),(4,5,6),(1,7,8);
|
||||
select concat(a,":",group_concat(b)) from t1 group by a;
|
||||
concat(a,":",group_concat(b))
|
||||
1:2,7
|
||||
4:5
|
||||
drop table t1;
|
||||
#
|
||||
# End of 10.3 tests
|
||||
#
|
||||
|
@ -2,10 +2,6 @@
|
||||
# simple test of all group functions
|
||||
#
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t1,t2,t3,t4,t5,t6;
|
||||
--enable_warnings
|
||||
|
||||
set @sav_dpi= @@div_precision_increment;
|
||||
set div_precision_increment= 5;
|
||||
show variables like 'div_precision_increment';
|
||||
@ -1801,6 +1797,14 @@ select * from v1;
|
||||
drop view v1;
|
||||
drop table t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-29988: Major performance regression with 10.6.11
|
||||
--echo #
|
||||
create table t1 (a varchar(10) charset utf8mb4, b int, c int);
|
||||
insert t1 values (1,2,3),(4,5,6),(1,7,8);
|
||||
select concat(a,":",group_concat(b)) from t1 group by a;
|
||||
drop table t1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.3 tests
|
||||
--echo #
|
||||
|
@ -1042,4 +1042,17 @@ select * from t1;
|
||||
a
|
||||
deallocate prepare stmt;
|
||||
drop table t1,t2,t3;
|
||||
#
|
||||
# MDEV-30342 Wrong "Truncated incorrect DECIMAL value" warning/error
|
||||
#
|
||||
create table t1(c1 varchar(1));
|
||||
create table t2(c1 varchar(1));
|
||||
insert into t1(c1) values('#');
|
||||
select @@sql_mode like '%strict_all_tables%';
|
||||
@@sql_mode like '%strict_all_tables%'
|
||||
0
|
||||
insert into t2(c1) select if(c1 = '#', c1 = 0, c1) as c1 from t1;
|
||||
drop table t1, t2;
|
||||
#
|
||||
# End of 10.3 test
|
||||
#
|
||||
|
@ -595,4 +595,21 @@ deallocate prepare stmt;
|
||||
|
||||
drop table t1,t2,t3;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-30342 Wrong "Truncated incorrect DECIMAL value" warning/error
|
||||
--echo #
|
||||
|
||||
create table t1(c1 varchar(1));
|
||||
create table t2(c1 varchar(1));
|
||||
|
||||
insert into t1(c1) values('#');
|
||||
|
||||
select @@sql_mode like '%strict_all_tables%';
|
||||
insert into t2(c1) select if(c1 = '#', c1 = 0, c1) as c1 from t1;
|
||||
|
||||
drop table t1, t2;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.3 test
|
||||
--echo #
|
||||
|
@ -1999,3 +1999,55 @@ Note 1003 select `test`.`t3`.`pk` AS `pk`,`test`.`t3`.`c1` AS `c1`,`test`.`t3`.`
|
||||
DROP TABLE t1,t2,t3;
|
||||
set join_cache_level= @save_join_cache_level;
|
||||
set optimizer_switch=@save_optimizer_switch;
|
||||
#
|
||||
# MDEV-27624: Nested left joins with not_exists optimization
|
||||
# for most inner left join
|
||||
#
|
||||
set @save_join_cache_level= @@join_cache_level;
|
||||
CREATE TABLE t1 (a INT NOT NULL, b INT, c INT);
|
||||
INSERT INTO t1 VALUES (1,1,1), (1,2,1), (1,3,1);
|
||||
CREATE TABLE t2(a INT NOT NULL);
|
||||
INSERT INTO t2 VALUES (1), (2);
|
||||
CREATE TABLE t3(a INT not null, b INT);
|
||||
INSERT INTO t3 VALUES (1, 1), (2, 1), (3, 1);
|
||||
set join_cache_level = 0;
|
||||
EXPLAIN SELECT *
|
||||
FROM t1
|
||||
LEFT JOIN
|
||||
( t2 LEFT JOIN t3 ON t2.a = t3.b )
|
||||
ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL)
|
||||
WHERE t1.c = 1 AND t3.a is NULL;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where
|
||||
1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Not exists
|
||||
SELECT *
|
||||
FROM t1
|
||||
LEFT JOIN
|
||||
( t2 LEFT JOIN t3 ON t2.a = t3.b )
|
||||
ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL)
|
||||
WHERE t1.c = 1 AND t3.a is NULL;
|
||||
a b c a a b
|
||||
1 3 1 NULL NULL NULL
|
||||
set join_cache_level = 2;
|
||||
EXPLAIN SELECT *
|
||||
FROM t1
|
||||
LEFT JOIN
|
||||
( t2 LEFT JOIN t3 ON t2.a = t3.b )
|
||||
ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL)
|
||||
WHERE t1.c = 1 AND t3.a is NULL;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
|
||||
1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Not exists; Using join buffer (incremental, BNL join)
|
||||
SELECT *
|
||||
FROM t1
|
||||
LEFT JOIN
|
||||
( t2 LEFT JOIN t3 ON t2.a = t3.b )
|
||||
ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL)
|
||||
WHERE t1.c = 1 AND t3.a is NULL;
|
||||
a b c a a b
|
||||
1 3 1 NULL NULL NULL
|
||||
DROP TABLE t1, t2, t3;
|
||||
set join_cache_level= @save_join_cache_level;
|
||||
# end of 10.3 tests
|
||||
|
@ -1419,3 +1419,41 @@ DROP TABLE t1,t2,t3;
|
||||
set join_cache_level= @save_join_cache_level;
|
||||
|
||||
set optimizer_switch=@save_optimizer_switch;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-27624: Nested left joins with not_exists optimization
|
||||
--echo # for most inner left join
|
||||
--echo #
|
||||
|
||||
set @save_join_cache_level= @@join_cache_level;
|
||||
|
||||
CREATE TABLE t1 (a INT NOT NULL, b INT, c INT);
|
||||
INSERT INTO t1 VALUES (1,1,1), (1,2,1), (1,3,1);
|
||||
|
||||
CREATE TABLE t2(a INT NOT NULL);
|
||||
INSERT INTO t2 VALUES (1), (2);
|
||||
|
||||
CREATE TABLE t3(a INT not null, b INT);
|
||||
INSERT INTO t3 VALUES (1, 1), (2, 1), (3, 1);
|
||||
|
||||
let $q=
|
||||
SELECT *
|
||||
FROM t1
|
||||
LEFT JOIN
|
||||
( t2 LEFT JOIN t3 ON t2.a = t3.b )
|
||||
ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL)
|
||||
WHERE t1.c = 1 AND t3.a is NULL;
|
||||
|
||||
set join_cache_level = 0;
|
||||
eval EXPLAIN $q;
|
||||
eval $q;
|
||||
|
||||
set join_cache_level = 2;
|
||||
eval EXPLAIN $q;
|
||||
eval $q;
|
||||
|
||||
DROP TABLE t1, t2, t3;
|
||||
|
||||
set join_cache_level= @save_join_cache_level;
|
||||
|
||||
--echo # end of 10.3 tests
|
||||
|
@ -2008,6 +2008,58 @@ Note 1003 select `test`.`t3`.`pk` AS `pk`,`test`.`t3`.`c1` AS `c1`,`test`.`t3`.`
|
||||
DROP TABLE t1,t2,t3;
|
||||
set join_cache_level= @save_join_cache_level;
|
||||
set optimizer_switch=@save_optimizer_switch;
|
||||
#
|
||||
# MDEV-27624: Nested left joins with not_exists optimization
|
||||
# for most inner left join
|
||||
#
|
||||
set @save_join_cache_level= @@join_cache_level;
|
||||
CREATE TABLE t1 (a INT NOT NULL, b INT, c INT);
|
||||
INSERT INTO t1 VALUES (1,1,1), (1,2,1), (1,3,1);
|
||||
CREATE TABLE t2(a INT NOT NULL);
|
||||
INSERT INTO t2 VALUES (1), (2);
|
||||
CREATE TABLE t3(a INT not null, b INT);
|
||||
INSERT INTO t3 VALUES (1, 1), (2, 1), (3, 1);
|
||||
set join_cache_level = 0;
|
||||
EXPLAIN SELECT *
|
||||
FROM t1
|
||||
LEFT JOIN
|
||||
( t2 LEFT JOIN t3 ON t2.a = t3.b )
|
||||
ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL)
|
||||
WHERE t1.c = 1 AND t3.a is NULL;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where
|
||||
1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Not exists
|
||||
SELECT *
|
||||
FROM t1
|
||||
LEFT JOIN
|
||||
( t2 LEFT JOIN t3 ON t2.a = t3.b )
|
||||
ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL)
|
||||
WHERE t1.c = 1 AND t3.a is NULL;
|
||||
a b c a a b
|
||||
1 3 1 NULL NULL NULL
|
||||
set join_cache_level = 2;
|
||||
EXPLAIN SELECT *
|
||||
FROM t1
|
||||
LEFT JOIN
|
||||
( t2 LEFT JOIN t3 ON t2.a = t3.b )
|
||||
ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL)
|
||||
WHERE t1.c = 1 AND t3.a is NULL;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
|
||||
1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Not exists; Using join buffer (incremental, BNL join)
|
||||
SELECT *
|
||||
FROM t1
|
||||
LEFT JOIN
|
||||
( t2 LEFT JOIN t3 ON t2.a = t3.b )
|
||||
ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL)
|
||||
WHERE t1.c = 1 AND t3.a is NULL;
|
||||
a b c a a b
|
||||
1 3 1 NULL NULL NULL
|
||||
DROP TABLE t1, t2, t3;
|
||||
set join_cache_level= @save_join_cache_level;
|
||||
# end of 10.3 tests
|
||||
CREATE TABLE t5 (a int, b int, c int, PRIMARY KEY(a), KEY b_i (b));
|
||||
CREATE TABLE t6 (a int, b int, c int, PRIMARY KEY(a), KEY b_i (b));
|
||||
CREATE TABLE t7 (a int, b int, c int, PRIMARY KEY(a), KEY b_i (b));
|
||||
|
@ -5722,6 +5722,24 @@ EXECUTE stmt USING 'd';
|
||||
EXECUTE stmt USING 'd';
|
||||
300
|
||||
DROP TABLE t1, t2, t3;
|
||||
set @@max_session_mem_used=default;
|
||||
create table t (a varchar(10)) character set utf8;
|
||||
insert into t values ('');
|
||||
prepare stmt from "select 1 from t where a = ?";
|
||||
set @@max_session_mem_used=(select memory_used*2 from information_schema.processlist where id=connection_id());
|
||||
deallocate prepare stmt;
|
||||
drop table t;
|
||||
set @@max_session_mem_used=default;
|
||||
create table t (a varchar(10)) character set utf8;
|
||||
insert into t values ('');
|
||||
prepare stmt from "select 1 from t where a = 'a'";
|
||||
set @@max_session_mem_used=(select memory_used*2 from information_schema.processlist where id=connection_id());
|
||||
deallocate prepare stmt;
|
||||
drop table t;
|
||||
set @@max_session_mem_used=default;
|
||||
#
|
||||
# End of 10.3 tests
|
||||
#
|
||||
#
|
||||
# MDEV-19263: Server crashes in mysql_handle_single_derived
|
||||
# upon 2nd execution of PS
|
||||
|
@ -5109,6 +5109,45 @@ EXECUTE stmt USING 'd';
|
||||
EXECUTE stmt USING 'd';
|
||||
DROP TABLE t1, t2, t3;
|
||||
|
||||
set @@max_session_mem_used=default;
|
||||
create table t (a varchar(10)) character set utf8;
|
||||
insert into t values ('');
|
||||
prepare stmt from "select 1 from t where a = ?";
|
||||
set @@max_session_mem_used=(select memory_used*2 from information_schema.processlist where id=connection_id());
|
||||
let $run= 1000;
|
||||
disable_result_log;
|
||||
disable_query_log;
|
||||
while ($run) {
|
||||
execute stmt using repeat('x',10000);
|
||||
dec $run;
|
||||
}
|
||||
enable_result_log;
|
||||
enable_query_log;
|
||||
deallocate prepare stmt;
|
||||
drop table t;
|
||||
set @@max_session_mem_used=default;
|
||||
|
||||
create table t (a varchar(10)) character set utf8;
|
||||
insert into t values ('');
|
||||
prepare stmt from "select 1 from t where a = 'a'";
|
||||
set @@max_session_mem_used=(select memory_used*2 from information_schema.processlist where id=connection_id());
|
||||
let $run= 1000;
|
||||
disable_result_log;
|
||||
disable_query_log;
|
||||
while ($run) {
|
||||
execute stmt;
|
||||
dec $run;
|
||||
}
|
||||
enable_result_log;
|
||||
enable_query_log;
|
||||
deallocate prepare stmt;
|
||||
drop table t;
|
||||
set @@max_session_mem_used=default;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.3 tests
|
||||
--echo #
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-19263: Server crashes in mysql_handle_single_derived
|
||||
--echo # upon 2nd execution of PS
|
||||
|
@ -36,7 +36,7 @@ echo '[Service]'
|
||||
echo
|
||||
|
||||
|
||||
if [[ ( "$user" != "root" && "$user" != "mysql" ) || "${SET_USER}" == 1 ]]; then
|
||||
if [[ ( ! -z "$user" && "$user" != "root" && "$user" != "mysql" ) || "${SET_USER}" == 1 ]]; then
|
||||
echo User=$user
|
||||
fi
|
||||
|
||||
|
@ -11152,7 +11152,7 @@ Create_field *Create_field::clone(MEM_ROOT *mem_root) const
|
||||
}
|
||||
|
||||
/**
|
||||
Return true if default is an expression that must be saved explicitely
|
||||
Return true if default is an expression that must be saved explicitly
|
||||
|
||||
This is:
|
||||
- Not basic constants
|
||||
|
@ -4933,7 +4933,7 @@ public:
|
||||
Record_addr addr(true);
|
||||
return make_field(share, mem_root, &addr, field_name_arg);
|
||||
}
|
||||
/* Return true if default is an expression that must be saved explicitely */
|
||||
/* Return true if default is an expression that must be saved explicitly */
|
||||
bool has_default_expression();
|
||||
|
||||
bool has_default_now_unireg_check() const
|
||||
|
86
sql/item.cc
86
sql/item.cc
@ -41,6 +41,7 @@
|
||||
// find_item_in_list,
|
||||
// RESOLVED_AGAINST_ALIAS, ...
|
||||
#include "sql_expression_cache.h"
|
||||
#include "sql_lex.h" // empty_clex_str
|
||||
|
||||
const String my_null_string("NULL", 4, default_charset_info);
|
||||
const String my_default_string("DEFAULT", 7, default_charset_info);
|
||||
@ -1280,12 +1281,11 @@ Item *Item_cache::safe_charset_converter(THD *thd, CHARSET_INFO *tocs)
|
||||
Item *conv= example->safe_charset_converter(thd, tocs);
|
||||
if (conv == example)
|
||||
return this;
|
||||
Item_cache *cache;
|
||||
if (!conv || conv->fix_fields(thd, (Item **) NULL) ||
|
||||
unlikely(!(cache= new (thd->mem_root) Item_cache_str(thd, conv))))
|
||||
return NULL; // Safe conversion is not possible, or OEM
|
||||
cache->setup(thd, conv);
|
||||
return cache;
|
||||
if (!conv || conv->fix_fields(thd, (Item **) NULL))
|
||||
return NULL; // Safe conversion is not possible, or OOM
|
||||
setup(thd, conv);
|
||||
thd->change_item_tree(&example, conv);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
@ -2245,7 +2245,8 @@ void Item::split_sum_func2(THD *thd, Ref_ptr_array ref_pointer_array,
|
||||
|
||||
if (unlikely((!(used_tables() & ~PARAM_TABLE_BIT) ||
|
||||
(type() == REF_ITEM &&
|
||||
((Item_ref*)this)->ref_type() != Item_ref::VIEW_REF))))
|
||||
((Item_ref*)this)->ref_type() != Item_ref::VIEW_REF &&
|
||||
((Item_ref*)this)->ref_type() != Item_ref::DIRECT_REF))))
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2558,7 +2559,6 @@ bool Type_std_attributes::agg_item_set_converter(const DTCollation &coll,
|
||||
safe_args[1]= args[item_sep];
|
||||
}
|
||||
|
||||
bool res= FALSE;
|
||||
uint i;
|
||||
|
||||
DBUG_ASSERT(!thd->stmt_arena->is_stmt_prepare());
|
||||
@ -2578,19 +2578,31 @@ bool Type_std_attributes::agg_item_set_converter(const DTCollation &coll,
|
||||
args[item_sep]= safe_args[1];
|
||||
}
|
||||
my_coll_agg_error(args, nargs, fname, item_sep);
|
||||
res= TRUE;
|
||||
break; // we cannot return here, we need to restore "arena".
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
thd->change_item_tree(arg, conv);
|
||||
|
||||
if (conv->fix_fields_if_needed(thd, arg))
|
||||
return TRUE;
|
||||
|
||||
Query_arena *arena, backup;
|
||||
arena= thd->activate_stmt_arena_if_needed(&backup);
|
||||
if (arena)
|
||||
{
|
||||
res= TRUE;
|
||||
break; // we cannot return here, we need to restore "arena".
|
||||
Item_direct_ref_to_item *ref=
|
||||
new (thd->mem_root) Item_direct_ref_to_item(thd, *arg);
|
||||
if ((ref == NULL) || ref->fix_fields(thd, (Item **)&ref))
|
||||
{
|
||||
thd->restore_active_arena(arena, &backup);
|
||||
return TRUE;
|
||||
}
|
||||
*arg= ref;
|
||||
thd->restore_active_arena(arena, &backup);
|
||||
ref->change_item(thd, conv);
|
||||
}
|
||||
else
|
||||
thd->change_item_tree(arg, conv);
|
||||
}
|
||||
return res;
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
|
||||
@ -10786,8 +10798,6 @@ const char *dbug_print(SELECT_LEX_UNIT *x) { return dbug_print_unit(x); }
|
||||
|
||||
#endif /*DBUG_OFF*/
|
||||
|
||||
|
||||
|
||||
void Item::register_in(THD *thd)
|
||||
{
|
||||
next= thd->free_list;
|
||||
@ -10795,6 +10805,48 @@ void Item::register_in(THD *thd)
|
||||
}
|
||||
|
||||
|
||||
Item_direct_ref_to_item::Item_direct_ref_to_item(THD *thd, Item *item)
|
||||
: Item_direct_ref(thd, NULL, NULL, "", &empty_clex_str, FALSE)
|
||||
{
|
||||
m_item= item;
|
||||
ref= (Item**)&m_item;
|
||||
}
|
||||
|
||||
bool Item_direct_ref_to_item::fix_fields(THD *thd, Item **)
|
||||
{
|
||||
DBUG_ASSERT(m_item != NULL);
|
||||
if (m_item->fix_fields_if_needed_for_scalar(thd, ref))
|
||||
return TRUE;
|
||||
set_properties();
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
void Item_direct_ref_to_item::print(String *str, enum_query_type query_type)
|
||||
{
|
||||
m_item->print(str, query_type);
|
||||
}
|
||||
|
||||
Item *Item_direct_ref_to_item::safe_charset_converter(THD *thd,
|
||||
CHARSET_INFO *tocs)
|
||||
{
|
||||
Item *conv= m_item->safe_charset_converter(thd, tocs);
|
||||
if (conv != m_item)
|
||||
{
|
||||
if (conv== NULL || conv->fix_fields(thd, &conv))
|
||||
return NULL;
|
||||
change_item(thd, conv);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
void Item_direct_ref_to_item::change_item(THD *thd, Item *i)
|
||||
{
|
||||
DBUG_ASSERT(i->is_fixed());
|
||||
thd->change_item_tree(ref, i);
|
||||
set_properties();
|
||||
}
|
||||
|
||||
|
||||
bool Item::cleanup_excluding_immutables_processor (void *arg)
|
||||
{
|
||||
if (!(get_extraction_flag() == IMMUTABLE_FL))
|
||||
|
98
sql/item.h
98
sql/item.h
@ -7610,6 +7610,104 @@ inline void Virtual_column_info::print(String* str)
|
||||
expr->print_for_table_def(str);
|
||||
}
|
||||
|
||||
class Item_direct_ref_to_item : public Item_direct_ref
|
||||
{
|
||||
Item *m_item;
|
||||
public:
|
||||
Item_direct_ref_to_item(THD *thd, Item *item);
|
||||
|
||||
void change_item(THD *thd, Item *);
|
||||
|
||||
bool fix_fields(THD *thd, Item **it);
|
||||
|
||||
void print(String *str, enum_query_type query_type);
|
||||
|
||||
Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs);
|
||||
Item *get_tmp_table_item(THD *thd)
|
||||
{ return m_item->get_tmp_table_item(thd); }
|
||||
Item *get_copy(THD *thd)
|
||||
{ return m_item->get_copy(thd); }
|
||||
COND *build_equal_items(THD *thd, COND_EQUAL *inherited,
|
||||
bool link_item_fields,
|
||||
COND_EQUAL **cond_equal_ref)
|
||||
{
|
||||
return m_item->build_equal_items(thd, inherited, link_item_fields,
|
||||
cond_equal_ref);
|
||||
}
|
||||
const char *full_name() const { return m_item->full_name(); }
|
||||
void make_send_field(THD *thd, Send_field *field)
|
||||
{ m_item->make_send_field(thd, field); }
|
||||
bool eq(const Item *item, bool binary_cmp) const
|
||||
{
|
||||
Item *it= ((Item *) item)->real_item();
|
||||
return m_item->eq(it, binary_cmp);
|
||||
}
|
||||
void fix_after_pullout(st_select_lex *new_parent, Item **refptr, bool merge)
|
||||
{ m_item->fix_after_pullout(new_parent, &m_item, merge); }
|
||||
void save_val(Field *to)
|
||||
{ return m_item->save_val(to); }
|
||||
void save_result(Field *to)
|
||||
{ return m_item->save_result(to); }
|
||||
int save_in_field(Field *to, bool no_conversions)
|
||||
{ return m_item->save_in_field(to, no_conversions); }
|
||||
const Type_handler *type_handler() const { return m_item->type_handler(); }
|
||||
table_map used_tables() const { return m_item->used_tables(); }
|
||||
void update_used_tables()
|
||||
{ m_item->update_used_tables(); }
|
||||
bool const_item() const { return m_item->const_item(); }
|
||||
table_map not_null_tables() const { return m_item->not_null_tables(); }
|
||||
bool walk(Item_processor processor, bool walk_subquery, void *arg)
|
||||
{
|
||||
return m_item->walk(processor, walk_subquery, arg) ||
|
||||
(this->*processor)(arg);
|
||||
}
|
||||
bool enumerate_field_refs_processor(void *arg)
|
||||
{ return m_item->enumerate_field_refs_processor(arg); }
|
||||
Item_field *field_for_view_update()
|
||||
{ return m_item->field_for_view_update(); }
|
||||
|
||||
/* Row emulation: forwarding of ROW-related calls to orig_item */
|
||||
uint cols() const
|
||||
{ return m_item->cols(); }
|
||||
Item* element_index(uint i)
|
||||
{ return this; }
|
||||
Item** addr(uint i)
|
||||
{ return &m_item; }
|
||||
bool check_cols(uint c)
|
||||
{ return Item::check_cols(c); }
|
||||
bool null_inside()
|
||||
{ return m_item->null_inside(); }
|
||||
void bring_value()
|
||||
{}
|
||||
|
||||
Item_equal *get_item_equal() { return m_item->get_item_equal(); }
|
||||
void set_item_equal(Item_equal *item_eq) { m_item->set_item_equal(item_eq); }
|
||||
Item_equal *find_item_equal(COND_EQUAL *cond_equal)
|
||||
{ return m_item->find_item_equal(cond_equal); }
|
||||
Item *propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond)
|
||||
{ return m_item->propagate_equal_fields(thd, ctx, cond); }
|
||||
Item *replace_equal_field(THD *thd, uchar *arg)
|
||||
{ return m_item->replace_equal_field(thd, arg); }
|
||||
|
||||
bool excl_dep_on_table(table_map tab_map)
|
||||
{ return m_item->excl_dep_on_table(tab_map); }
|
||||
bool excl_dep_on_grouping_fields(st_select_lex *sel)
|
||||
{ return m_item->excl_dep_on_grouping_fields(sel); }
|
||||
bool is_expensive() { return m_item->is_expensive(); }
|
||||
Item* build_clone(THD *thd) { return get_copy(thd); }
|
||||
|
||||
void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array,
|
||||
List<Item> &fields, uint flags)
|
||||
{
|
||||
m_item->split_sum_func(thd, ref_pointer_array, fields, flags);
|
||||
}
|
||||
/*
|
||||
This processor states that this is safe for virtual columns
|
||||
(because this Item transparency)
|
||||
*/
|
||||
bool check_vcol_func_processor(void *arg) { return FALSE;}
|
||||
};
|
||||
|
||||
inline bool TABLE::mark_column_with_deps(Field *field)
|
||||
{
|
||||
bool res;
|
||||
|
@ -418,18 +418,9 @@ bool Item_func::setup_args_and_comparator(THD *thd, Arg_comparator *cmp)
|
||||
if (args[0]->cmp_type() == STRING_RESULT &&
|
||||
args[1]->cmp_type() == STRING_RESULT)
|
||||
{
|
||||
Query_arena *arena, backup;
|
||||
arena= thd->activate_stmt_arena_if_needed(&backup);
|
||||
|
||||
DTCollation tmp;
|
||||
bool ret= agg_arg_charsets_for_comparison(tmp, args, 2);
|
||||
|
||||
if (arena)
|
||||
thd->restore_active_arena(arena, &backup);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (agg_arg_charsets_for_comparison(tmp, args, 2))
|
||||
return true;
|
||||
cmp->m_compare_collation= tmp.collation;
|
||||
}
|
||||
// Convert constants when compared to int/year field
|
||||
|
@ -127,6 +127,8 @@
|
||||
|
||||
#include <my_service_manager.h>
|
||||
|
||||
#include <source_revision.h>
|
||||
|
||||
#define mysqld_charset &my_charset_latin1
|
||||
|
||||
/* We have HAVE_valgrind below as this speeds up the shutdown of MySQL */
|
||||
@ -4117,21 +4119,6 @@ static int init_common_variables()
|
||||
|
||||
mysql_real_data_home_len= uint(strlen(mysql_real_data_home));
|
||||
|
||||
if (!opt_abort)
|
||||
{
|
||||
if (IS_SYSVAR_AUTOSIZE(&server_version_ptr))
|
||||
sql_print_information("%s (mysqld %s) starting as process %lu ...",
|
||||
my_progname, server_version, (ulong) getpid());
|
||||
else
|
||||
{
|
||||
char real_server_version[SERVER_VERSION_LENGTH];
|
||||
set_server_version(real_server_version, sizeof(real_server_version));
|
||||
sql_print_information("%s (mysqld %s as %s) starting as process %lu ...",
|
||||
my_progname, real_server_version, server_version,
|
||||
(ulong) getpid());
|
||||
}
|
||||
}
|
||||
|
||||
sf_leaking_memory= 0; // no memory leaks from now on
|
||||
|
||||
#ifndef EMBEDDED_LIBRARY
|
||||
@ -5014,6 +5001,14 @@ static int init_server_components()
|
||||
error_handler_hook= my_message_sql;
|
||||
proc_info_hook= set_thd_stage_info;
|
||||
|
||||
/*
|
||||
 Print source revision hash, as one of the first lines, if not the
|
||||
 first in error log, for troubleshooting and debugging purposes
|
||||
 */
|
||||
if (!opt_help)
|
||||
sql_print_information("Starting MariaDB %s source revision %s as process %lu",
|
||||
server_version, SOURCE_REVISION, (ulong) getpid());
|
||||
|
||||
#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE
|
||||
/*
|
||||
Parsing the performance schema command line option may have reported
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "mysqld.h"
|
||||
#include "sql_class.h"
|
||||
#include "my_stacktrace.h"
|
||||
#include <source_revision.h>
|
||||
|
||||
#ifdef __WIN__
|
||||
#include <crtdbg.h>
|
||||
@ -178,7 +179,8 @@ extern "C" sig_handler handle_fatal_signal(int sig)
|
||||
"something is definitely wrong and this may fail.\n\n");
|
||||
|
||||
set_server_version(server_version, sizeof(server_version));
|
||||
my_safe_printf_stderr("Server version: %s\n", server_version);
|
||||
my_safe_printf_stderr("Server version: %s source revision: %s\n",
|
||||
server_version, SOURCE_REVISION);
|
||||
|
||||
if (dflt_key_cache)
|
||||
my_safe_printf_stderr("key_buffer_size=%lu\n",
|
||||
|
@ -2332,7 +2332,7 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
|
||||
Disable slow log if:
|
||||
- Slow logging is enabled (no change needed)
|
||||
- This is a normal SP (not event log)
|
||||
- If we have not explicitely disabled logging of SP
|
||||
- If we have not explicitly disabled logging of SP
|
||||
*/
|
||||
if (save_enable_slow_log &&
|
||||
((!(m_flags & LOG_SLOW_STATEMENTS) &&
|
||||
@ -2346,7 +2346,7 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
|
||||
Disable general log if:
|
||||
- If general log is enabled (no change needed)
|
||||
- This is a normal SP (not event log)
|
||||
- If we have not explicitely disabled logging of SP
|
||||
- If we have not explicitly disabled logging of SP
|
||||
*/
|
||||
if (!(thd->variables.option_bits & OPTION_LOG_OFF) &&
|
||||
(!(m_flags & LOG_GENERAL_LOG) &&
|
||||
|
@ -5626,7 +5626,7 @@ class select_insert :public select_result_interceptor {
|
||||
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
|
||||
virtual int prepare2(JOIN *join);
|
||||
virtual int send_data(List<Item> &items);
|
||||
virtual bool store_values(List<Item> &values, bool ignore_errors);
|
||||
virtual bool store_values(List<Item> &values);
|
||||
virtual bool can_rollback_data() { return 0; }
|
||||
bool prepare_eof();
|
||||
bool send_ok_packet();
|
||||
@ -5668,7 +5668,7 @@ public:
|
||||
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
|
||||
|
||||
int binlog_show_create_table(TABLE **tables, uint count);
|
||||
bool store_values(List<Item> &values, bool ignore_errors);
|
||||
bool store_values(List<Item> &values);
|
||||
bool send_eof();
|
||||
virtual void abort_result_set();
|
||||
virtual bool can_rollback_data() { return 1; }
|
||||
|
@ -3962,7 +3962,7 @@ int select_insert::send_data(List<Item> &values)
|
||||
DBUG_RETURN(0);
|
||||
|
||||
thd->count_cuted_fields= CHECK_FIELD_WARN; // Calculate cuted fields
|
||||
if (store_values(values, info.ignore))
|
||||
if (store_values(values))
|
||||
DBUG_RETURN(1);
|
||||
thd->count_cuted_fields= CHECK_FIELD_ERROR_FOR_NULL;
|
||||
if (unlikely(thd->is_error()))
|
||||
@ -4020,17 +4020,17 @@ int select_insert::send_data(List<Item> &values)
|
||||
}
|
||||
|
||||
|
||||
bool select_insert::store_values(List<Item> &values, bool ignore_errors)
|
||||
bool select_insert::store_values(List<Item> &values)
|
||||
{
|
||||
DBUG_ENTER("select_insert::store_values");
|
||||
bool error;
|
||||
|
||||
if (fields->elements)
|
||||
error= fill_record_n_invoke_before_triggers(thd, table, *fields, values,
|
||||
ignore_errors, TRG_EVENT_INSERT);
|
||||
true, TRG_EVENT_INSERT);
|
||||
else
|
||||
error= fill_record_n_invoke_before_triggers(thd, table, table->field_to_fill(),
|
||||
values, ignore_errors, TRG_EVENT_INSERT);
|
||||
values, true, TRG_EVENT_INSERT);
|
||||
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
@ -4719,10 +4719,10 @@ select_create::binlog_show_create_table(TABLE **tables, uint count)
|
||||
return result;
|
||||
}
|
||||
|
||||
bool select_create::store_values(List<Item> &values, bool ignore_errors)
|
||||
bool select_create::store_values(List<Item> &values)
|
||||
{
|
||||
return fill_record_n_invoke_before_triggers(thd, table, field, values,
|
||||
ignore_errors, TRG_EVENT_INSERT);
|
||||
true, TRG_EVENT_INSERT);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2287,11 +2287,7 @@ enum_nested_loop_state JOIN_CACHE::join_matching_records(bool skip_last)
|
||||
int error;
|
||||
enum_nested_loop_state rc= NESTED_LOOP_OK;
|
||||
join_tab->table->null_row= 0;
|
||||
bool check_only_first_match=
|
||||
join_tab->check_only_first_match() &&
|
||||
(!join_tab->first_inner || // semi-join case
|
||||
join_tab->first_inner == join_tab->first_unmatched); // outer join case
|
||||
bool outer_join_first_inner= join_tab->is_first_inner_for_outer_join();
|
||||
bool check_only_first_match= join_tab->check_only_first_match();
|
||||
DBUG_ENTER("JOIN_CACHE::join_matching_records");
|
||||
|
||||
/* Return at once if there are no records in the join buffer */
|
||||
@ -2357,7 +2353,34 @@ enum_nested_loop_state JOIN_CACHE::join_matching_records(bool skip_last)
|
||||
Also those records that must be null complemented are not considered
|
||||
as candidates for matches.
|
||||
*/
|
||||
if ((!check_only_first_match && !outer_join_first_inner) ||
|
||||
|
||||
bool not_exists_opt_is_applicable= true;
|
||||
if (check_only_first_match && join_tab->first_inner)
|
||||
{
|
||||
/*
|
||||
This is the case with not_exists optimization for nested outer join
|
||||
when join_tab is the last inner table for one or more embedding outer
|
||||
joins. To safely use 'not_exists' optimization in this case we have
|
||||
to check that the match flags for all these embedding outer joins are
|
||||
in the 'on' state.
|
||||
(See also a similar check in evaluate_join_record() for the case when
|
||||
join buffer are not used.)
|
||||
*/
|
||||
for (JOIN_TAB *tab= join_tab->first_inner;
|
||||
tab && tab->first_inner && tab->last_inner == join_tab;
|
||||
tab= tab->first_inner->first_upper)
|
||||
{
|
||||
if (get_match_flag_by_pos_from_join_buffer(rec_ptr, tab) !=
|
||||
MATCH_FOUND)
|
||||
{
|
||||
not_exists_opt_is_applicable= false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!check_only_first_match ||
|
||||
(join_tab->first_inner && !not_exists_opt_is_applicable) ||
|
||||
!skip_next_candidate_for_match(rec_ptr))
|
||||
{
|
||||
read_next_candidate_for_match(rec_ptr);
|
||||
|
@ -540,7 +540,7 @@ bool Item_func_in::create_value_list_for_tvc(THD *thd,
|
||||
|
||||
if (is_list_of_rows)
|
||||
{
|
||||
Item_row *row_list= (Item_row *)(args[i]->build_clone(thd));
|
||||
Item_row *row_list= (Item_row *)(args[i]);
|
||||
|
||||
if (!row_list)
|
||||
return true;
|
||||
@ -565,8 +565,7 @@ bool Item_func_in::create_value_list_for_tvc(THD *thd,
|
||||
sprintf(col_name, "_col_%i", 1);
|
||||
args[i]->set_name(thd, col_name, strlen(col_name), thd->charset());
|
||||
}
|
||||
Item *arg_clone= args[i]->build_clone(thd);
|
||||
if (!arg_clone || tvc_value->push_back(arg_clone))
|
||||
if (tvc_value->push_back(args[i]))
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1695,7 +1695,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
|
||||
TRANSACTION can be a non-empty history unit, or can be an identifier
|
||||
in bit_expr.
|
||||
|
||||
In the grammar below we use %prec to explicitely tell Bison to go
|
||||
In the grammar below we use %prec to explicitly tell Bison to go
|
||||
through the empty branch in the optional rule only when the lookahead
|
||||
token does not belong to a small set of selected tokens.
|
||||
|
||||
|
@ -1171,7 +1171,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
|
||||
TRANSACTION can be a non-empty history unit, or can be an identifier
|
||||
in bit_expr.
|
||||
|
||||
In the grammar below we use %prec to explicitely tell Bison to go
|
||||
In the grammar below we use %prec to explicitly tell Bison to go
|
||||
through the empty branch in the optional rule only when the lookahead
|
||||
token does not belong to a small set of selected tokens.
|
||||
|
||||
|
@ -3347,7 +3347,7 @@ class Vcol_expr_context
|
||||
bool inited;
|
||||
THD *thd;
|
||||
TABLE *table;
|
||||
Query_arena backup_arena;
|
||||
Query_arena backup_arena, *stmt_arena;
|
||||
table_map old_map;
|
||||
Security_context *save_security_ctx;
|
||||
sql_mode_t save_sql_mode;
|
||||
@ -3357,6 +3357,7 @@ public:
|
||||
inited(false),
|
||||
thd(_thd),
|
||||
table(_table),
|
||||
stmt_arena(thd->stmt_arena),
|
||||
old_map(table->map),
|
||||
save_security_ctx(thd->security_ctx),
|
||||
save_sql_mode(thd->variables.sql_mode) {}
|
||||
@ -3377,6 +3378,7 @@ bool Vcol_expr_context::init()
|
||||
thd->security_ctx= tl->security_ctx;
|
||||
|
||||
thd->set_n_backup_active_arena(table->expr_arena, &backup_arena);
|
||||
thd->stmt_arena= thd;
|
||||
|
||||
inited= true;
|
||||
return false;
|
||||
@ -3390,6 +3392,7 @@ Vcol_expr_context::~Vcol_expr_context()
|
||||
thd->security_ctx= save_security_ctx;
|
||||
thd->restore_active_arena(table->expr_arena, &backup_arena);
|
||||
thd->variables.sql_mode= save_sql_mode;
|
||||
thd->stmt_arena= stmt_arena;
|
||||
}
|
||||
|
||||
|
||||
|
@ -249,8 +249,7 @@ int azdopen(azio_stream *s, File fd, int Flags)
|
||||
for end of file.
|
||||
IN assertion: the stream s has been sucessfully opened for reading.
|
||||
*/
|
||||
int get_byte(s)
|
||||
azio_stream *s;
|
||||
int get_byte(azio_stream *s)
|
||||
{
|
||||
if (s->z_eof) return EOF;
|
||||
if (s->stream.avail_in == 0)
|
||||
@ -427,8 +426,7 @@ void read_header(azio_stream *s, unsigned char *buffer)
|
||||
* Cleanup then free the given azio_stream. Return a zlib error code.
|
||||
Try freeing in the reverse order of allocations.
|
||||
*/
|
||||
int destroy (s)
|
||||
azio_stream *s;
|
||||
int destroy (azio_stream *s)
|
||||
{
|
||||
int err = Z_OK;
|
||||
|
||||
@ -679,9 +677,7 @@ int do_flush (azio_stream *s, int flush)
|
||||
return s->z_err == Z_STREAM_END ? Z_OK : s->z_err;
|
||||
}
|
||||
|
||||
int ZEXPORT azflush (s, flush)
|
||||
azio_stream *s;
|
||||
int flush;
|
||||
int ZEXPORT azflush (azio_stream *s, int flush)
|
||||
{
|
||||
int err;
|
||||
|
||||
@ -708,8 +704,7 @@ int ZEXPORT azflush (s, flush)
|
||||
/* ===========================================================================
|
||||
Rewinds input file.
|
||||
*/
|
||||
int azrewind (s)
|
||||
azio_stream *s;
|
||||
int azrewind (azio_stream *s)
|
||||
{
|
||||
if (s == NULL || s->mode != 'r') return -1;
|
||||
|
||||
@ -733,10 +728,7 @@ int azrewind (s)
|
||||
SEEK_END is not implemented, returns error.
|
||||
In this version of the library, azseek can be extremely slow.
|
||||
*/
|
||||
my_off_t azseek (s, offset, whence)
|
||||
azio_stream *s;
|
||||
my_off_t offset;
|
||||
int whence;
|
||||
my_off_t azseek (azio_stream *s, my_off_t offset, int whence)
|
||||
{
|
||||
|
||||
if (s == NULL || whence == SEEK_END ||
|
||||
@ -812,8 +804,7 @@ my_off_t azseek (s, offset, whence)
|
||||
given compressed file. This position represents a number of bytes in the
|
||||
uncompressed data stream.
|
||||
*/
|
||||
my_off_t ZEXPORT aztell (file)
|
||||
azio_stream *file;
|
||||
my_off_t ZEXPORT aztell (azio_stream *file)
|
||||
{
|
||||
return azseek(file, 0L, SEEK_CUR);
|
||||
}
|
||||
|
@ -237,7 +237,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
|
||||
bool del, PHC)
|
||||
{
|
||||
char *p;
|
||||
int i, n;
|
||||
int n;
|
||||
bool rcop= true;
|
||||
PCOL colp;
|
||||
//PCOLUMN cp;
|
||||
@ -276,7 +276,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
|
||||
n = strlen(p) + 1;
|
||||
} // endfor p
|
||||
|
||||
for (i = 0, colp = tdbp->GetColumns(); colp; i++, colp = colp->GetNext()) {
|
||||
for (colp = tdbp->GetColumns(); colp; colp = colp->GetNext()) {
|
||||
if (colp->InitValue(g))
|
||||
throw 2;
|
||||
|
||||
@ -317,7 +317,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
|
||||
n = strlen(p) + 1;
|
||||
} // endfor p
|
||||
|
||||
for (i = 0, colp = utp->GetColumns(); colp; i++, colp = colp->GetNext()) {
|
||||
for (colp = utp->GetColumns(); colp; colp = colp->GetNext()) {
|
||||
if (colp->InitValue(g))
|
||||
throw 5;
|
||||
|
||||
|
@ -221,8 +221,7 @@ static int ZCALLBACK ferror_file_func (voidpf opaque __attribute__((unused)), vo
|
||||
return ret;
|
||||
}
|
||||
|
||||
void fill_fopen_filefunc (pzlib_filefunc_def)
|
||||
zlib_filefunc_def* pzlib_filefunc_def;
|
||||
void fill_fopen_filefunc (zlib_filefunc_def* pzlib_filefunc_def)
|
||||
{
|
||||
pzlib_filefunc_def->zopen_file = fopen_file_func;
|
||||
pzlib_filefunc_def->zread_file = fread_file_func;
|
||||
|
@ -1055,7 +1055,7 @@ bool TDBCSV::PrepareWriting(PGLOBAL g)
|
||||
if (!strlen(Field[i])) {
|
||||
// Generally null fields are not quoted
|
||||
if (Quoted > 2)
|
||||
// Except if explicitely required
|
||||
// Except if explicitly required
|
||||
strcat(strcat(To_Line, qot), qot);
|
||||
|
||||
} else if (Qot && (strchr(Field[i], Sep) || *Field[i] == Qot
|
||||
|
@ -230,7 +230,6 @@ PCOL TDBTBL::InsertSpecialColumn(PCOL scp)
|
||||
/***********************************************************************/
|
||||
bool TDBTBL::InitTableList(PGLOBAL g)
|
||||
{
|
||||
int n;
|
||||
uint sln;
|
||||
const char *scs;
|
||||
PTABLE tp, tabp;
|
||||
@ -243,7 +242,7 @@ bool TDBTBL::InitTableList(PGLOBAL g)
|
||||
sln = hc->get_table()->s->connect_string.length;
|
||||
// PlugSetPath(filename, Tdbp->GetFile(g), Tdbp->GetPath());
|
||||
|
||||
for (n = 0, tp = tdp->Tablep; tp; tp = tp->GetNext()) {
|
||||
for (tp = tdp->Tablep; tp; tp = tp->GetNext()) {
|
||||
if (TestFil(g, To_CondFil, tp)) {
|
||||
tabp = new(g) XTAB(tp);
|
||||
|
||||
@ -276,7 +275,6 @@ bool TDBTBL::InitTableList(PGLOBAL g)
|
||||
else
|
||||
Tablist = tabp;
|
||||
|
||||
n++;
|
||||
} // endif filp
|
||||
|
||||
} // endfor tp
|
||||
|
@ -1471,11 +1471,6 @@ extern int ZEXPORT zipWriteInFileInZip (zipFile file,const void* buf,unsigned in
|
||||
{
|
||||
uLong uTotalOutBefore = zi->ci.stream.total_out;
|
||||
err=deflate(&zi->ci.stream, Z_NO_FLUSH);
|
||||
if(uTotalOutBefore > zi->ci.stream.total_out)
|
||||
{
|
||||
int bBreak = 0;
|
||||
bBreak++;
|
||||
}
|
||||
|
||||
zi->ci.pos_in_buffered_data += (uInt)(zi->ci.stream.total_out - uTotalOutBefore) ;
|
||||
}
|
||||
|
@ -8468,8 +8468,7 @@ int ha_rocksdb::index_read_map_impl(uchar *const buf, const uchar *const key,
|
||||
const key_range *end_key) {
|
||||
DBUG_ENTER_FUNC();
|
||||
|
||||
DBUG_EXECUTE_IF("myrocks_busy_loop_on_row_read", int debug_i = 0;
|
||||
while (1) { debug_i++; });
|
||||
DBUG_EXECUTE_IF("myrocks_busy_loop_on_row_read", my_sleep(50000););
|
||||
|
||||
int rc = 0;
|
||||
|
||||
@ -12129,7 +12128,6 @@ static int calculate_stats(
|
||||
}
|
||||
}
|
||||
|
||||
int num_sst = 0;
|
||||
for (const auto &it : props) {
|
||||
std::vector<Rdb_index_stats> sst_stats;
|
||||
Rdb_tbl_prop_coll::read_stats_from_tbl_props(it.second, &sst_stats);
|
||||
@ -12158,7 +12156,6 @@ static int calculate_stats(
|
||||
stats[it1.m_gl_index_id].merge(
|
||||
it1, true, it_index->second->max_storage_fmt_length());
|
||||
}
|
||||
num_sst++;
|
||||
}
|
||||
|
||||
if (include_memtables) {
|
||||
|
@ -401,7 +401,7 @@ class ha_rocksdb : public my_core::handler {
|
||||
void free_key_buffers();
|
||||
|
||||
// the buffer size should be at least 2*Rdb_key_def::INDEX_NUMBER_SIZE
|
||||
rocksdb::Range get_range(const int i, uchar buf[]) const;
|
||||
rocksdb::Range get_range(const int i, uchar buf[2 * 4]) const;
|
||||
|
||||
/*
|
||||
Perf timers for data reads
|
||||
|
@ -9108,6 +9108,12 @@ int spider_db_print_item_type(
|
||||
DBUG_ENTER("spider_db_print_item_type");
|
||||
DBUG_PRINT("info",("spider COND type=%d", item->type()));
|
||||
|
||||
if (item->type() == Item::REF_ITEM &&
|
||||
((Item_ref*)item)->ref_type() == Item_ref::DIRECT_REF)
|
||||
{
|
||||
item= item->real_item();
|
||||
DBUG_PRINT("info",("spider new COND type=%d", item->type()));
|
||||
}
|
||||
switch (item->type())
|
||||
{
|
||||
case Item::FUNC_ITEM:
|
||||
|
@ -2423,7 +2423,6 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
|
||||
// The pivots file will contain all the pivot strings (in the form <size(32bits)> <data>)
|
||||
// The pivots_fname is the name of the pivots file.
|
||||
// Note that the pivots file will have one extra pivot in it (the last key in the dictionary) which will not appear in the tree.
|
||||
int64_t n_pivots=0; // number of pivots in pivots_file
|
||||
FIDX pivots_file; // the file
|
||||
|
||||
r = ft_loader_open_temp_file (bl, &pivots_file);
|
||||
@ -2539,8 +2538,6 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
|
||||
|
||||
allocate_node(&sts, lblock);
|
||||
|
||||
n_pivots++;
|
||||
|
||||
invariant(maxkey.data != NULL);
|
||||
if ((r = bl_write_dbt(&maxkey, pivots_stream, NULL, nullptr, bl))) {
|
||||
ft_loader_set_panic(bl, r, true, which_db, nullptr, nullptr);
|
||||
@ -2616,8 +2613,6 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
|
||||
// We haven't paniced, so the sum should add up.
|
||||
invariant(used_estimate == total_disksize_estimate);
|
||||
|
||||
n_pivots++;
|
||||
|
||||
{
|
||||
DBT key = make_dbt(0,0); // must write an extra DBT into the pivots file.
|
||||
r = bl_write_dbt(&key, pivots_stream, NULL, nullptr, bl);
|
||||
@ -3302,7 +3297,7 @@ static int write_nonleaves (FTLOADER bl, FIDX pivots_fidx, struct dbout *out, st
|
||||
int height = 1;
|
||||
|
||||
// Watch out for the case where we saved the last pivot but didn't write any more nodes out.
|
||||
// The trick is not to look at n_pivots, but to look at blocks.n_blocks
|
||||
// The trick is to look at blocks.n_blocks
|
||||
while (sts->n_subtrees > 1) {
|
||||
// If there is more than one block in blocks, then we must build another level of the tree.
|
||||
|
||||
|
@ -208,12 +208,10 @@ verify_snapshot_system(TXN_MANAGER txn_manager UU()) {
|
||||
{
|
||||
//verify neither pair->begin_id nor end_id is in snapshot_xids
|
||||
TOKUTXN curr_txn = txn_manager->snapshot_head;
|
||||
uint32_t curr_index = 0;
|
||||
while (curr_txn != NULL) {
|
||||
invariant(tuple->begin_id != curr_txn->txnid.parent_id64);
|
||||
invariant(tuple->end_id != curr_txn->txnid.parent_id64);
|
||||
curr_txn = curr_txn->snapshot_next;
|
||||
curr_index++;
|
||||
}
|
||||
}
|
||||
{
|
||||
|
Loading…
x
Reference in New Issue
Block a user