MDEV-36483: store ddls in the optimizer trace

This feature stores the ddls of the tables/views that are used in
a query, to the optimizer trace. It is currently controlled by a
system variable store_ddls_in_optimizer_trace, and is not enabled
by default. All the ddls will be stored in a single json array, with each
element having table/view name, and the associated create definition
of the table/view.

The approach taken is to read global query_tables from the thd->lex,
and read them in reverse. Create a record with table_name, ddl of
the table and add the table_name to the hash,
along with dumping the information to the trace.
dbName_plus_tableName is used as a key,
and the duplicate entries are not added to the hash.

The main suite tests are also run with the feature enabled, and they
all succeed.
This commit is contained in:
bsrikanth-mariadb 2025-05-05 09:46:34 -04:00 committed by Sergei Golubchik
parent e6d652d2dd
commit d5a8d3e90f
15 changed files with 1159 additions and 5 deletions

View File

@ -164,6 +164,7 @@ SET(SQL_EMBEDDED_SOURCES emb_qcache.cc libmysqld.c lib_sql.cc
../sql/opt_hints_parser.cc ../sql/opt_hints_parser.h
../sql/scan_char.h
../sql/opt_hints.cc ../sql/opt_hints.h
../sql/opt_trace_ddl_info.cc ../sql/opt_trace_ddl_info.h
${GEN_SOURCES}
${MYSYS_LIBWRAP_SOURCE}
)

View File

@ -849,6 +849,9 @@ The following specify which files/extra groups are read (specified before remain
heuristic, thus perform exhaustive search: 1 - prune
plans based on cost and number of retrieved rows eq_ref:
2 - prune also if we find an eq_ref chain
--optimizer-record-context
Controls storing of optmizer context of all the tables
that are referenced in a query
--optimizer-row-copy-cost=#
Cost of copying a row from the engine or the join cache
to the SQL layer
@ -1886,6 +1889,7 @@ optimizer-key-next-find-cost 0.082347
optimizer-max-sel-arg-weight 32000
optimizer-max-sel-args 16000
optimizer-prune-level 2
optimizer-record-context FALSE
optimizer-row-copy-cost 0.060866
optimizer-row-lookup-cost 0.130839
optimizer-row-next-find-cost 0.045916

View File

@ -0,0 +1,480 @@
set optimizer_record_context=ON;
#
show variables like 'optimizer_record_context';
Variable_name Value
optimizer_record_context ON
#
set optimizer_record_context=OFF;
#
show variables like 'optimizer_record_context';
Variable_name Value
optimizer_record_context OFF
#
create database db1;
use db1;
create table t1 (a int, b int);
insert into t1 values (1,2),(2,3);
#
create table t2 (a int);
insert into t2 values (1),(2);
#
create view view1 as (select t1.a as a, t1.b as b, t2.a as c from (t1 join t2) where t1.a = t2.a);
#
# disable both optimizer_trace and optimizer_record_context
# there should be no trace
#
set optimizer_trace=0;
set optimizer_record_context=OFF;
#
select * from t1 where t1.a = 3;
a b
#
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
ddl
#
# disable optimizer_trace, but enable optimizer_record_context
# there should be no trace here as well
#
set optimizer_record_context=ON;
#
select * from t1 where t1.a = 3;
a b
#
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
ddl
#
# enable optimizer_trace, but disable optimizer_record_context
# trace result should be empty
#
set optimizer_trace=1;
set optimizer_record_context=OFF;
#
select * from t1 where t1.a = 3;
a b
#
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
ddl
#
# enable both optimizer_trace and optimizer_record_context
# trace result should have 1 ddl statement for table t1
#
set optimizer_trace=1;
set optimizer_record_context=ON;
#
select * from t1 where t1.a = 3;
a b
#
set @trace= (select trace from information_schema.optimizer_trace);
set @db=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.current_database')));
select db_used from json_table(@db, '$[*]' columns(db_used text path '$')) as jt;
db_used
db1
set @fn=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.name')));
select name from json_table(@fn, '$[*]' columns(name text path '$')) as jt;
name
db1.t1
set @ddls=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.ddl')));
select ddl from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
ddl
CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL,
`b` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
#
# enable both optimizer_trace and optimizer_record_context
# test for view
# trace result should have 3 ddl statements
#
set optimizer_record_context=ON;
select * from view1 where view1.a = 3;
a b c
#
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
ddl
CREATE TABLE `t2` (
`a` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL,
`b` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW db1.view1 AS (select `db1`.`t1`.`a` AS `a`,`db1`.`t1`.`b` AS `b`,`db1`.`t2`.`a` AS `c` from (`db1`.`t1` join `db1`.`t2`) where `db1`.`t1`.`a` = `db1`.`t2`.`a`)
#
# enable both optimizer_trace and optimizer_record_context
# test for temp table
# trace result should have 1 ddl statement for table t1
#
create temporary table temp1(col1 int);
insert into temp1 select * from t2;
#
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
ddl
CREATE TABLE `t2` (
`a` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
CREATE TEMPORARY TABLE `temp1` (
`col1` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
#
# there should be no duplicate ddls
# there should be only 1 ddl for table t2
#
select * from t2 union select * from t2 union select * from t2;
a
1
2
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
ddl
CREATE TABLE `t2` (
`a` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
#
# there should be no duplicate ddls
# there should be only 3 ddls for tables t1, t2, and view1
#
select * from view1 where view1.a = 3 union select * from view1 where view1.a = 3;
a b c
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
ddl
CREATE TABLE `t2` (
`a` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL,
`b` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW db1.view1 AS (select `db1`.`t1`.`a` AS `a`,`db1`.`t1`.`b` AS `b`,`db1`.`t2`.`a` AS `c` from (`db1`.`t1` join `db1`.`t2`) where `db1`.`t1`.`a` = `db1`.`t2`.`a`)
#
# test for insert
# there should be no trace for insert with values
#
insert into t1 values ((select max(t2.a) from t2), (select min(t2.a) from t2));
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
ddl
#
# test for delete
# trace result should have 1 ddl statement for table t1
#
delete from t1 where t1.a=3;
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
ddl
CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL,
`b` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
#
# test for update
# trace result should have 1 ddl statement for table t1
#
update t1 set t1.b = t1.a;
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
ddl
CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL,
`b` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
#
# test for insert as select
# trace result should have 2 ddl statements for tables t1, t2
#
insert into t1 (select t2.a as a, t2.a as b from t2);
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
ddl
CREATE TABLE `t2` (
`a` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL,
`b` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
create database db2;
use db2;
create table t1(a int);
insert into t1 values (1),(2),(3);
#
# use database db1
# test to select 2 tables with same name from 2 databases
# trace result should have 2 ddl statements for tables db1.t1, db2.t1
#
use db1;
select db1_t1.b
FROM t1 AS db1_t1, db2.t1 AS db2_t1
WHERE db1_t1.a = db2_t1.a AND db1_t1.a >= 3;
b
set @trace= (select trace from information_schema.optimizer_trace);
set @db=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.current_database')));
select db_used from json_table(@db, '$[*]' columns(db_used text path '$')) as jt;
db_used
db1
set @fn=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.name')));
select name from json_table(@fn, '$[*]' columns(name text path '$')) as jt;
name
db2.t1
db1.t1
set @ddls=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.ddl')));
select ddl from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
ddl
CREATE TABLE `db2`.`t1` (
`a` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL,
`b` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
#
# use database db2
# test to select 2 tables with same name but from 2 databases
# trace result should have 2 ddl statements for tables db1.t1, db2.t1
#
use db2;
select db1_t1.b
FROM db1.t1 AS db1_t1, db2.t1 AS db2_t1
WHERE db1_t1.a = db2_t1.a AND db1_t1.a >= 3;
b
set @trace= (select trace from information_schema.optimizer_trace);
set @db=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.current_database')));
select db_used from json_table(@db, '$[*]' columns(db_used text path '$')) as jt;
db_used
db2
set @fn=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.name')));
select name from json_table(@fn, '$[*]' columns(name text path '$')) as jt;
name
db2.t1
db1.t1
set @ddls=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.ddl')));
select ddl from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
ddl
CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
CREATE TABLE `db1`.`t1` (
`a` int(11) DEFAULT NULL,
`b` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
#
# use database db2
# test to select from 2 tables from 2 different databases,
# of which one is a mysql table, and other is a db1 table
# trace result should have only 1 ddl
#
select t1.b
FROM db1.t1 AS t1, mysql.db AS t2
WHERE t1.a >= 3;
b
set @trace= (select trace from information_schema.optimizer_trace);
set @db=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.current_database')));
select db_used from json_table(@db, '$[*]' columns(db_used text path '$')) as jt;
db_used
db2
set @fn=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.name')));
select name from json_table(@fn, '$[*]' columns(name text path '$')) as jt;
name
db1.t1
set @ddls=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.ddl')));
select ddl from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
ddl
CREATE TABLE `db1`.`t1` (
`a` int(11) DEFAULT NULL,
`b` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
use db1;
drop table db2.t1;
drop database db2;
drop table temp1;
drop view view1;
drop table t2;
#
# const table test with explain
#
insert into t1 select seq, seq from seq_1_to_10;
create table t2 (a int primary key, b int);
insert into t2 select seq, seq from seq_1_to_10;
explain select * from t1, t2 where t2.a=1 and t1.b=t2.b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t1 ALL NULL NULL NULL NULL 15 Using where
set @trace= (select trace from information_schema.optimizer_trace);
set @db=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.current_database')));
select db_used from json_table(@db, '$[*]' columns(db_used text path '$')) as jt;
db_used
db1
set @fn=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.name')));
select name from json_table(@fn, '$[*]' columns(name text path '$')) as jt;
name
db1.t2
db1.t1
set @ddls=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.ddl')));
select ddl from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
ddl
CREATE TABLE `t2` (
`a` int(11) NOT NULL,
`b` int(11) DEFAULT NULL,
PRIMARY KEY (`a`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL,
`b` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
drop table t1;
drop table t2;
#
# query failure test
# trace should not contain the failed query result
# no table definitions for t10, and t11 should be present
#
create table t10 (a int, b int);
insert into t10 select seq, seq from seq_1_to_10;
create table t11 (a int primary key, b varchar(10));
insert into t11 values (1, 'one'),(2, 'two');
select t10.b, t11.a from t10, t11 where t10.a = t11.c + 10;
ERROR 42S22: Unknown column 't11.c' in 'WHERE'
set @trace= (select trace from information_schema.optimizer_trace);
set @db=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.current_database')));
select db_used from json_table(@db, '$[*]' columns(db_used text path '$')) as jt;
db_used
set @fn=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.name')));
select name from json_table(@fn, '$[*]' columns(name text path '$')) as jt;
name
set @ddls=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.ddl')));
select ddl from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
ddl
drop table t10;
drop table t11;
#
# partitioned table test
# trace result should have 1 ddl
#
create table t1 (
pk int primary key,
a int,
key (a)
)
engine=myisam
partition by range(pk) (
partition p0 values less than (10),
partition p1 values less than MAXVALUE
);
insert into t1 select seq, MOD(seq, 100) from seq_1_to_5000;
flush tables;
explain
select * from t1 partition (p1) where a=10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref a a 5 const 49
set @trace= (select trace from information_schema.optimizer_trace);
set @db=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.current_database')));
select db_used from json_table(@db, '$[*]' columns(db_used text path '$')) as jt;
db_used
db1
set @fn=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.name')));
select name from json_table(@fn, '$[*]' columns(name text path '$')) as jt;
name
db1.t1
set @ddls=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.ddl')));
select ddl from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
ddl
CREATE TABLE `t1` (
`pk` int(11) NOT NULL,
`a` int(11) DEFAULT NULL,
PRIMARY KEY (`pk`),
KEY `a` (`a`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
PARTITION BY RANGE (`pk`)
(PARTITION `p0` VALUES LESS THAN (10) ENGINE = MyISAM,
PARTITION `p1` VALUES LESS THAN MAXVALUE ENGINE = MyISAM)
drop table t1;
#
# test with insert delayed
# test shouldn't fail
# Also, trace result shouldn't have any ddls
#
CREATE TABLE t1 (
a int(11) DEFAULT 1,
b int(11) DEFAULT (a + 1),
c int(11) DEFAULT (a + b)
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci;
insert into t1 values ();
insert into t1 (a) values (2);
insert into t1 (a,b) values (10,20);
insert into t1 (a,b,c) values (100,200,400);
truncate table t1;
insert delayed into t1 values ();
set @trace= (select trace from information_schema.optimizer_trace);
set @db=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.current_database')));
select db_used from json_table(@db, '$[*]' columns(db_used text path '$')) as jt;
db_used
db1
set @fn=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.name')));
select name from json_table(@fn, '$[*]' columns(name text path '$')) as jt;
name
set @ddls=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.ddl')));
select ddl from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
ddl
drop table t1;
#
# test primary, and foreign key tables
# trace result should have the ddls in correct order
#
CREATE TABLE t1 (
id INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
name VARCHAR(10)
);
CREATE TABLE t2 (
id INT,
address VARCHAR(10),
CONSTRAINT `fk_id` FOREIGN KEY (id) REFERENCES t1 (id)
);
insert into t1 values (1, 'abc'), (2, 'xyz');
insert into t2 values (1, 'address1'), (2, 'address2');
select t1.name, t2.address
from t1,t2 where t1.id = t2.id;
name address
abc address1
xyz address2
set @trace= (select trace from information_schema.optimizer_trace);
set @db=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.current_database')));
select db_used from json_table(@db, '$[*]' columns(db_used text path '$')) as jt;
db_used
db1
set @fn=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.name')));
select name from json_table(@fn, '$[*]' columns(name text path '$')) as jt;
name
db1.t2
db1.t1
set @ddls=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.ddl')));
select ddl from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
ddl
CREATE TABLE `t2` (
`id` int(11) DEFAULT NULL,
`address` varchar(10) DEFAULT NULL,
KEY `fk_id` (`id`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
CREATE TABLE `t1` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(10) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=MyISAM AUTO_INCREMENT=3 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
drop table t1;
drop table t2;
drop database db1;

View File

@ -0,0 +1,364 @@
--source include/not_embedded.inc
--source include/have_sequence.inc
--source include/have_partition.inc
set optimizer_record_context=ON;
--echo #
show variables like 'optimizer_record_context';
--echo #
set optimizer_record_context=OFF;
--echo #
show variables like 'optimizer_record_context';
--echo #
create database db1;
use db1;
create table t1 (a int, b int);
insert into t1 values (1,2),(2,3);
--echo #
create table t2 (a int);
insert into t2 values (1),(2);
--echo #
create view view1 as (select t1.a as a, t1.b as b, t2.a as c from (t1 join t2) where t1.a = t2.a);
--echo #
--echo # disable both optimizer_trace and optimizer_record_context
--echo # there should be no trace
--echo #
set optimizer_trace=0;
set optimizer_record_context=OFF;
--echo #
select * from t1 where t1.a = 3;
--echo #
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
--echo #
--echo # disable optimizer_trace, but enable optimizer_record_context
--echo # there should be no trace here as well
--echo #
set optimizer_record_context=ON;
--echo #
select * from t1 where t1.a = 3;
--echo #
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
--echo #
--echo # enable optimizer_trace, but disable optimizer_record_context
--echo # trace result should be empty
--echo #
set optimizer_trace=1;
set optimizer_record_context=OFF;
--echo #
select * from t1 where t1.a = 3;
--echo #
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
--echo #
--echo # enable both optimizer_trace and optimizer_record_context
--echo # trace result should have 1 ddl statement for table t1
--echo #
set optimizer_trace=1;
set optimizer_record_context=ON;
--echo #
select * from t1 where t1.a = 3;
--echo #
set @trace= (select trace from information_schema.optimizer_trace);
set @db=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.current_database')));
select db_used from json_table(@db, '$[*]' columns(db_used text path '$')) as jt;
set @fn=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.name')));
select name from json_table(@fn, '$[*]' columns(name text path '$')) as jt;
set @ddls=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.ddl')));
select ddl from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
--echo #
--echo # enable both optimizer_trace and optimizer_record_context
--echo # test for view
--echo # trace result should have 3 ddl statements
--echo #
set optimizer_record_context=ON;
select * from view1 where view1.a = 3;
--echo #
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
--echo #
--echo # enable both optimizer_trace and optimizer_record_context
--echo # test for temp table
--echo # trace result should have 1 ddl statement for table t1
--echo #
create temporary table temp1(col1 int);
insert into temp1 select * from t2;
--echo #
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
--echo #
--echo # there should be no duplicate ddls
--echo # there should be only 1 ddl for table t2
--echo #
select * from t2 union select * from t2 union select * from t2;
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
--echo #
--echo # there should be no duplicate ddls
--echo # there should be only 3 ddls for tables t1, t2, and view1
--echo #
select * from view1 where view1.a = 3 union select * from view1 where view1.a = 3;
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
--echo #
--echo # test for insert
--echo # there should be no trace for insert with values
--echo #
insert into t1 values ((select max(t2.a) from t2), (select min(t2.a) from t2));
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
--echo #
--echo # test for delete
--echo # trace result should have 1 ddl statement for table t1
--echo #
delete from t1 where t1.a=3;
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
--echo #
--echo # test for update
--echo # trace result should have 1 ddl statement for table t1
--echo #
update t1 set t1.b = t1.a;
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
--echo #
--echo # test for insert as select
--echo # trace result should have 2 ddl statements for tables t1, t2
--echo #
insert into t1 (select t2.a as a, t2.a as b from t2);
set @ddls= (select json_detailed(json_extract(trace, '$**.ddl')) from information_schema.optimizer_trace);
select ddl
from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
create database db2;
use db2;
create table t1(a int);
insert into t1 values (1),(2),(3);
--echo #
--echo # use database db1
--echo # test to select 2 tables with same name from 2 databases
--echo # trace result should have 2 ddl statements for tables db1.t1, db2.t1
--echo #
use db1;
select db1_t1.b
FROM t1 AS db1_t1, db2.t1 AS db2_t1
WHERE db1_t1.a = db2_t1.a AND db1_t1.a >= 3;
set @trace= (select trace from information_schema.optimizer_trace);
set @db=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.current_database')));
select db_used from json_table(@db, '$[*]' columns(db_used text path '$')) as jt;
set @fn=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.name')));
select name from json_table(@fn, '$[*]' columns(name text path '$')) as jt;
set @ddls=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.ddl')));
select ddl from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
--echo #
--echo # use database db2
--echo # test to select 2 tables with same name but from 2 databases
--echo # trace result should have 2 ddl statements for tables db1.t1, db2.t1
--echo #
use db2;
select db1_t1.b
FROM db1.t1 AS db1_t1, db2.t1 AS db2_t1
WHERE db1_t1.a = db2_t1.a AND db1_t1.a >= 3;
set @trace= (select trace from information_schema.optimizer_trace);
set @db=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.current_database')));
select db_used from json_table(@db, '$[*]' columns(db_used text path '$')) as jt;
set @fn=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.name')));
select name from json_table(@fn, '$[*]' columns(name text path '$')) as jt;
set @ddls=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.ddl')));
select ddl from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
--echo #
--echo # use database db2
--echo # test to select from 2 tables from 2 different databases,
--echo # of which one is a mysql table, and other is a db1 table
--echo # trace result should have only 1 ddl
--echo #
select t1.b
FROM db1.t1 AS t1, mysql.db AS t2
WHERE t1.a >= 3;
set @trace= (select trace from information_schema.optimizer_trace);
set @db=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.current_database')));
select db_used from json_table(@db, '$[*]' columns(db_used text path '$')) as jt;
set @fn=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.name')));
select name from json_table(@fn, '$[*]' columns(name text path '$')) as jt;
set @ddls=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.ddl')));
select ddl from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
use db1;
drop table db2.t1;
drop database db2;
drop table temp1;
drop view view1;
drop table t2;
--echo #
--echo # const table test with explain
--echo #
insert into t1 select seq, seq from seq_1_to_10;
create table t2 (a int primary key, b int);
insert into t2 select seq, seq from seq_1_to_10;
explain select * from t1, t2 where t2.a=1 and t1.b=t2.b;
set @trace= (select trace from information_schema.optimizer_trace);
set @db=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.current_database')));
select db_used from json_table(@db, '$[*]' columns(db_used text path '$')) as jt;
set @fn=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.name')));
select name from json_table(@fn, '$[*]' columns(name text path '$')) as jt;
set @ddls=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.ddl')));
select ddl from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
drop table t1;
drop table t2;
--echo #
--echo # query failure test
--echo # trace should not contain the failed query result
--echo # no table definitions for t10, and t11 should be present
--echo #
create table t10 (a int, b int);
insert into t10 select seq, seq from seq_1_to_10;
create table t11 (a int primary key, b varchar(10));
insert into t11 values (1, 'one'),(2, 'two');
--error ER_BAD_FIELD_ERROR
select t10.b, t11.a from t10, t11 where t10.a = t11.c + 10;
set @trace= (select trace from information_schema.optimizer_trace);
set @db=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.current_database')));
select db_used from json_table(@db, '$[*]' columns(db_used text path '$')) as jt;
set @fn=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.name')));
select name from json_table(@fn, '$[*]' columns(name text path '$')) as jt;
set @ddls=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.ddl')));
select ddl from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
drop table t10;
drop table t11;
--echo #
--echo # partitioned table test
--echo # trace result should have 1 ddl
--echo #
create table t1 (
pk int primary key,
a int,
key (a)
)
engine=myisam
partition by range(pk) (
partition p0 values less than (10),
partition p1 values less than MAXVALUE
);
insert into t1 select seq, MOD(seq, 100) from seq_1_to_5000;
flush tables;
explain
select * from t1 partition (p1) where a=10;
set @trace= (select trace from information_schema.optimizer_trace);
set @db=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.current_database')));
select db_used from json_table(@db, '$[*]' columns(db_used text path '$')) as jt;
set @fn=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.name')));
select name from json_table(@fn, '$[*]' columns(name text path '$')) as jt;
set @ddls=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.ddl')));
select ddl from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
drop table t1;
--echo #
--echo # test with insert delayed
--echo # test shouldn't fail
--echo # Also, trace result shouldn't have any ddls
--echo #
CREATE TABLE t1 (
a int(11) DEFAULT 1,
b int(11) DEFAULT (a + 1),
c int(11) DEFAULT (a + b)
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci;
insert into t1 values ();
insert into t1 (a) values (2);
insert into t1 (a,b) values (10,20);
insert into t1 (a,b,c) values (100,200,400);
truncate table t1;
insert delayed into t1 values ();
set @trace= (select trace from information_schema.optimizer_trace);
set @db=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.current_database')));
select db_used from json_table(@db, '$[*]' columns(db_used text path '$')) as jt;
set @fn=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.name')));
select name from json_table(@fn, '$[*]' columns(name text path '$')) as jt;
set @ddls=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.ddl')));
select ddl from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
drop table t1;
--echo #
--echo # test primary, and foreign key tables
--echo # trace result should have the ddls in correct order
--echo #
CREATE TABLE t1 (
id INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
name VARCHAR(10)
);
CREATE TABLE t2 (
id INT,
address VARCHAR(10),
CONSTRAINT `fk_id` FOREIGN KEY (id) REFERENCES t1 (id)
);
insert into t1 values (1, 'abc'), (2, 'xyz');
insert into t2 values (1, 'address1'), (2, 'address2');
select t1.name, t2.address
from t1,t2 where t1.id = t2.id;
set @trace= (select trace from information_schema.optimizer_trace);
set @db=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.current_database')));
select db_used from json_table(@db, '$[*]' columns(db_used text path '$')) as jt;
set @fn=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.name')));
select name from json_table(@fn, '$[*]' columns(name text path '$')) as jt;
set @ddls=(select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.ddl')));
select ddl from json_table(@ddls, '$[*]' columns(ddl text path '$')) as jt;
drop table t1;
drop table t2;
drop database db1;

View File

@ -2602,6 +2602,16 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_RECORD_CONTEXT
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BOOLEAN
VARIABLE_COMMENT Controls storing of optmizer context of all the tables that are referenced in a query
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST OFF,ON
READ_ONLY NO
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME OPTIMIZER_ROWID_COMPARE_COST
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE DOUBLE

View File

@ -2792,6 +2792,16 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_RECORD_CONTEXT
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BOOLEAN
VARIABLE_COMMENT Controls storing of optmizer context of all the tables that are referenced in a query
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST OFF,ON
READ_ONLY NO
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME OPTIMIZER_ROWID_COMPARE_COST
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE DOUBLE

View File

@ -188,6 +188,7 @@ SET (SQL_SOURCE
rowid_filter.cc rowid_filter.h
optimizer_costs.h optimizer_defaults.h
opt_trace.cc
opt_trace_ddl_info.cc
table_cache.cc encryption.cc temporary_tables.cc
json_table.cc
proxy_protocol.cc backup.cc xa.cc

View File

@ -2378,7 +2378,17 @@ void ha_partition::update_create_info(HA_CREATE_INFO *create_info)
DBUG_ASSERT(part < m_file_tot_parts);
DBUG_ASSERT(m_file[part]);
dummy_info.data_file_name= dummy_info.index_file_name = NULL;
m_file[part]->update_create_info(&dummy_info);
/*
store_table_definitions_in_trace()/show_create_table() may attempt
to produce DDL for a table which has only some partitions open.
We can't get options for unopened partitions. They are not relevant
for purpose, so it's ok to skip printing their options.
*/
if (m_file[part]->is_open())
m_file[part]->update_create_info(&dummy_info);
else
dummy_info.init();
sub_elem->data_file_name = (char*) dummy_info.data_file_name;
sub_elem->index_file_name = (char*) dummy_info.index_file_name;
}
@ -2387,7 +2397,14 @@ void ha_partition::update_create_info(HA_CREATE_INFO *create_info)
{
DBUG_ASSERT(m_file[i]);
dummy_info.data_file_name= dummy_info.index_file_name= NULL;
m_file[i]->update_create_info(&dummy_info);
/*
A partition might not be open, see above note about
store_table_definitions_in_trace()
*/
if (m_file[i]->is_open())
m_file[i]->update_create_info(&dummy_info);
else
dummy_info.init();
part_elem->data_file_name = (char*) dummy_info.data_file_name;
part_elem->index_file_name = (char*) dummy_info.index_file_name;
}

View File

@ -9708,6 +9708,7 @@ PSI_memory_key key_memory_user_var_entry;
PSI_memory_key key_memory_user_var_entry_value;
PSI_memory_key key_memory_String_value;
PSI_memory_key key_memory_WSREP;
PSI_memory_key key_memory_trace_ddl_info;
#ifdef HAVE_PSI_INTERFACE
@ -9954,7 +9955,8 @@ static PSI_memory_info all_server_memory[]=
{ &key_memory_Query_cache, "Query_cache", PSI_FLAG_GLOBAL},
{ &key_memory_Table_trigger_dispatcher, "Table_trigger_dispatcher::m_mem_root", 0},
{ &key_memory_native_functions, "native_functions", PSI_FLAG_GLOBAL},
{ &key_memory_WSREP, "wsrep", 0 }
{ &key_memory_WSREP, "wsrep", 0 },
{ &key_memory_trace_ddl_info, "TRACE_DDL_INFO", 0}
};
/**

View File

@ -498,6 +498,7 @@ extern PSI_memory_key key_memory_Query_cache;
extern PSI_memory_key key_memory_Table_trigger_dispatcher;
extern PSI_memory_key key_memory_native_functions;
extern PSI_memory_key key_memory_WSREP;
extern PSI_memory_key key_memory_trace_ddl_info;
/*
MAINTAINER: Please keep this list in order, to limit merge collisions.

229
sql/opt_trace_ddl_info.cc Normal file
View File

@ -0,0 +1,229 @@
/*
Copyright (c) 2025, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335
USA */
#include "opt_trace_ddl_info.h"
#include "sql_show.h"
#include "my_json_writer.h"
#include "sql_list.h"
#include "sql_table.h"
#include "mysql.h"
#include "hash.h"
/**
@file
@brief
Stores the ddls of the tables, and views that are used
in either SELECT, INSERT, DELETE, and UPDATE queries,
into the optimizer trace. All the ddls are stored together
at one place as a JSON array object with name "list_ddls"
*/
struct DDL_Key
{
char *name; //full name of the table or view
size_t name_len;
};
/*
helper function to know the key portion of the record
that is stored in hash.
*/
static const uchar *get_rec_key(const void *entry_, size_t *length,
my_bool flags)
{
auto entry= static_cast<const DDL_Key *>(entry_);
*length= entry->name_len;
return reinterpret_cast<const uchar *>(entry->name);
}
/*
@brief
Check whether a table is a regular base table (for which we should
dump the ddl) or not.
@detail
Besides base tables, the query may have:
- Table functions (Currently it's only JSON_TABLE)
- INFORMATION_SCHEMA tables
- Tables in PERFORMANCE_SCHEMA and mysql database
- Internal temporary ("work") tables
*/
static bool is_base_table(TABLE_LIST *tbl)
{
return
(tbl->table &&
tbl->table->s &&
!tbl->table_function &&
!tbl->schema_table &&
get_table_category(tbl->get_db_name(), tbl->get_table_name()) ==
TABLE_CATEGORY_USER &&
tbl->table->s->tmp_table != INTERNAL_TMP_TABLE &&
tbl->table->s->tmp_table != SYSTEM_TMP_TABLE);
}
static bool dump_record_to_trace(THD *thd, DDL_Key *ddl_key, String *stmt)
{
Json_writer_object ddl_wrapper(thd);
ddl_wrapper.add("name", ddl_key->name);
size_t non_esc_stmt_len= stmt->length();
/*
making escape_stmt size to be 4 times the non_esc_stmt
4 is chosen as a worst case although 3 should suffice.
"'" would be escaped to \"\'\"
*/
size_t len_multiplier= sizeof(uint32_t);
size_t escape_stmt_len= len_multiplier * non_esc_stmt_len;
char *escaped_stmt= (char *) thd->alloc(escape_stmt_len + 1);
if (!escaped_stmt)
return true;
int act_escape_stmt_len=
json_escape_string(stmt->c_ptr(), stmt->c_ptr() + non_esc_stmt_len,
escaped_stmt, escaped_stmt + escape_stmt_len);
if (act_escape_stmt_len < 0)
return true;
escaped_stmt[act_escape_stmt_len]= 0;
ddl_wrapper.add("ddl", escaped_stmt);
return false;
}
static void create_view_def(THD *thd, TABLE_LIST *table, String *name,
String *buf)
{
buf->append(STRING_WITH_LEN("CREATE "));
view_store_options(thd, table, buf);
buf->append(STRING_WITH_LEN("VIEW "));
buf->append(*name);
buf->append(STRING_WITH_LEN(" AS "));
buf->append(table->select_stmt.str, table->select_stmt.length);
}
/*
@brief
Dump definitions of all tables and view used by the statement into
the optimizer trace. The goal is to eventually save everything that
is needed to reproduce the query execution
@detail
Stores the ddls of the tables, and views that are used
in either SELECT, INSERT, DELETE, and UPDATE queries,
into the optimizer trace.
Global query_tables are read in reverse order from the thd->lex,
and a record with table_name, and ddl of the table are created.
Hash is used to store the records, where in no duplicates
are stored. db_name.table_name is used as a key to discard any
duplicates. If a new record that is created is not in the hash,
then that is dumped into the trace.
@return
false when no error occurred during the computation
*/
bool store_table_definitions_in_trace(THD *thd)
{
LEX *lex= thd->lex;
if (!(thd->variables.optimizer_trace &&
thd->variables.optimizer_record_context &&
(lex->sql_command == SQLCOM_SELECT ||
lex->sql_command == SQLCOM_INSERT_SELECT ||
lex->sql_command == SQLCOM_DELETE ||
lex->sql_command == SQLCOM_UPDATE ||
lex->sql_command == SQLCOM_DELETE_MULTI ||
lex->sql_command == SQLCOM_UPDATE_MULTI)))
return false;
if (lex->query_tables == *(lex->query_tables_last))
return false;
Json_writer_object ddls_wrapper(thd);
ddls_wrapper.add("current_database", thd->get_db());
Json_writer_array ddl_list(thd, "list_ddls");
HASH hash;
List<TABLE_LIST> tables_list;
/*
lex->query_tables lists the VIEWs before their underlying tables.
Create a list in the reverse order.
*/
for (TABLE_LIST *tbl= lex->query_tables; tbl; tbl= tbl->next_global)
{
if (!tbl->is_view() && !is_base_table(tbl))
continue;
if (tables_list.push_front(tbl))
return true;
}
if (tables_list.is_empty())
return false;
List_iterator li(tables_list);
my_hash_init(key_memory_trace_ddl_info, &hash, system_charset_info, 16, 0, 0,
get_rec_key, NULL, HASH_UNIQUE);
bool res= false;
for (TABLE_LIST *tbl= li++; tbl; li.remove(), tbl= li++)
{
String ddl;
String name;
DDL_Key *ddl_key;
char *name_copy;
/*
A query can use the same table multiple times. Do not dump the DDL
multiple times.
*/
name.append(tbl->get_db_name().str, tbl->get_db_name().length);
name.append(STRING_WITH_LEN("."));
name.append(tbl->get_table_name().str, tbl->get_table_name().length);
if (my_hash_search(&hash, (uchar *) name.c_ptr(), name.length()))
continue;
if (!(ddl_key= (DDL_Key *) thd->alloc(sizeof(DDL_Key))))
{
res= true;
break;
}
if (tbl->is_view())
create_view_def(thd, tbl, &name, &ddl);
else
{
if (show_create_table(thd, tbl, &ddl, NULL, WITH_DB_NAME))
{
res= true;
break;
}
}
name_copy= (char *) thd->alloc(name.length() + 1);
strcpy(name_copy, name.c_ptr());
ddl_key->name= name_copy;
ddl_key->name_len= name.length();
my_hash_insert(&hash, (uchar *) ddl_key);
if (dump_record_to_trace(thd, ddl_key, &ddl))
{
res= true;
break;
}
}
my_hash_free(&hash);
return res;
}

25
sql/opt_trace_ddl_info.h Normal file
View File

@ -0,0 +1,25 @@
/*
Copyright (c) 2025, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335
USA */
#ifndef OPT_TRACE_DDL_INFO
#define OPT_TRACE_DDL_INFO
#include "my_global.h"
bool store_table_definitions_in_trace(THD *thd);
#endif

View File

@ -892,6 +892,7 @@ typedef struct system_variables
my_bool session_track_user_variables;
#endif // USER_VAR_TRACKING
my_bool tcp_nodelay;
my_bool optimizer_record_context;
plugin_ref table_plugin;
plugin_ref tmp_table_plugin;
plugin_ref enforced_table_plugin;

View File

@ -92,8 +92,8 @@
#include "opt_trace.h"
#include "mysql/psi/mysql_sp.h"
#include "my_json_writer.h"
#include "my_json_writer.h"
#include "opt_trace_ddl_info.h"
#define FLAGSTR(V,F) ((V)&(F)?#F" ":"")
#ifdef WITH_ARIA_STORAGE_ENGINE
@ -5882,6 +5882,8 @@ wsrep_error_label:
res= true;
finish:
if (!thd->is_error() && !res)
res= store_table_definitions_in_trace(thd);
thd->reset_query_timer();
DBUG_ASSERT(!thd->in_active_multi_stmt_transaction() ||

View File

@ -3108,6 +3108,13 @@ static Sys_var_ulong Sys_optimizer_trace_max_mem_size(
SESSION_VAR(optimizer_trace_max_mem_size), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, ULONG_MAX), DEFAULT(1024 * 1024), BLOCK_SIZE(1));
static Sys_var_mybool Sys_optimizer_record_context(
"optimizer_record_context",
"Controls storing of optmizer context of all the tables "
"that are referenced in a query",
SESSION_VAR(optimizer_record_context), CMD_LINE(OPT_ARG),
DEFAULT(FALSE));
static Sys_var_ulong Sys_optimizer_adjust_secondary_key_costs(
"optimizer_adjust_secondary_key_costs",
UNUSED_HELP,